vfs_bio.c revision 38862
1139790Simp/* 2134287Smarcel * Copyright (c) 1994,1997 John S. Dyson 366458Sdfr * All rights reserved. 466458Sdfr * 5134287Smarcel * Redistribution and use in source and binary forms, with or without 6134287Smarcel * modification, are permitted provided that the following conditions 7134287Smarcel * are met: 866458Sdfr * 1. Redistributions of source code must retain the above copyright 9134287Smarcel * notice immediately at the beginning of the file, without modification, 10134287Smarcel * this list of conditions, and the following disclaimer. 11134287Smarcel * 2. Absolutely no warranty of function or purpose is made by the author 12134287Smarcel * John S. Dyson. 13134287Smarcel * 1466458Sdfr * $Id: vfs_bio.c,v 1.174 1998/09/04 08:06:55 dfr Exp $ 15134287Smarcel */ 16134287Smarcel 17134287Smarcel/* 18134287Smarcel * this file contains a new buffer I/O scheme implementing a coherent 19134287Smarcel * VM object and buffer cache scheme. Pains have been taken to make 20134287Smarcel * sure that the performance degradation associated with schemes such 21134287Smarcel * as this is not realized. 22134287Smarcel * 23134287Smarcel * Author: John S. Dyson 24134287Smarcel * Significant help during the development and debugging phases 25134287Smarcel * had been provided by David Greenman, also of the FreeBSD core team. 26134287Smarcel */ 2766458Sdfr 2866458Sdfr#include "opt_bounce.h" 29134398Smarcel 30134398Smarcel#define VMIO 31134398Smarcel#include <sys/param.h> 32134287Smarcel#include <sys/systm.h> 33134287Smarcel#include <sys/sysproto.h> 3466458Sdfr#include <sys/kernel.h> 35209617Smarcel#include <sys/sysctl.h> 3666458Sdfr#include <sys/proc.h> 37134287Smarcel#include <sys/vnode.h> 3866458Sdfr#include <sys/vmmeter.h> 3966458Sdfr#include <sys/lock.h> 4066458Sdfr#include <miscfs/specfs/specdev.h> 4166458Sdfr#include <vm/vm.h> 4266458Sdfr#include <vm/vm_param.h> 43134289Smarcel#include <vm/vm_prot.h> 44134289Smarcel#include <vm/vm_kern.h> 45134289Smarcel#include <vm/vm_pageout.h> 46134287Smarcel#include <vm/vm_page.h> 47134398Smarcel#include <vm/vm_object.h> 48134398Smarcel#include <vm/vm_extern.h> 49134398Smarcel#include <vm/vm_map.h> 50134398Smarcel#include <sys/buf.h> 51134398Smarcel#include <sys/mount.h> 52134398Smarcel#include <sys/malloc.h> 53134398Smarcel#include <sys/resourcevar.h> 54134398Smarcel 55134398Smarcelstatic MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56134398Smarcel 57134289Smarcelstruct bio_ops bioops; /* I/O operation notification */ 58134289Smarcel 5966458Sdfr#if 0 /* replaced bu sched_sync */ 60134287Smarcelstatic void vfs_update __P((void)); 61134287Smarcelstatic struct proc *updateproc; 62134287Smarcelstatic struct kproc_desc up_kp = { 6366458Sdfr "update", 64134398Smarcel vfs_update, 65134398Smarcel &updateproc 66}; 67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 68#endif 69 70struct buf *buf; /* buffer header pool */ 71struct swqueue bswlist; 72 73static int count_lock_queue __P((void)); 74static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 75 vm_offset_t to); 76static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 77 vm_offset_t to); 78static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 79 vm_offset_t off, vm_offset_t size, 80 vm_page_t m); 81static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 82 int pageno, vm_page_t m); 83static void vfs_clean_pages(struct buf * bp); 84static void vfs_setdirty(struct buf *bp); 85static void vfs_vmio_release(struct buf *bp); 86static void flushdirtybuffers(int slpflag, int slptimeo); 87 88int needsbuffer; 89 90/* 91 * Internal update daemon, process 3 92 * The variable vfs_update_wakeup allows for internal syncs. 93 */ 94int vfs_update_wakeup; 95 96 97/* 98 * buffers base kva 99 */ 100 101/* 102 * bogus page -- for I/O to/from partially complete buffers 103 * this is a temporary solution to the problem, but it is not 104 * really that bad. it would be better to split the buffer 105 * for input in the case of buffers partially already in memory, 106 * but the code is intricate enough already. 107 */ 108vm_page_t bogus_page; 109static vm_offset_t bogus_offset; 110 111static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 112 bufmallocspace, maxbufmallocspace; 113int numdirtybuffers; 114static int lodirtybuffers, hidirtybuffers; 115static int numfreebuffers, lofreebuffers, hifreebuffers; 116static int kvafreespace; 117 118SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 119 &numdirtybuffers, 0, ""); 120SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 121 &lodirtybuffers, 0, ""); 122SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 123 &hidirtybuffers, 0, ""); 124SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 125 &numfreebuffers, 0, ""); 126SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 127 &lofreebuffers, 0, ""); 128SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 129 &hifreebuffers, 0, ""); 130SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 131 &maxbufspace, 0, ""); 132SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 133 &bufspace, 0, ""); 134SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 135 &maxvmiobufspace, 0, ""); 136SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 137 &vmiospace, 0, ""); 138SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 139 &maxbufmallocspace, 0, ""); 140SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 141 &bufmallocspace, 0, ""); 142SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 143 &kvafreespace, 0, ""); 144 145static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 146struct bqueues bufqueues[BUFFER_QUEUES] = {0}; 147 148extern int vm_swap_size; 149 150#define BUF_MAXUSE 24 151 152#define VFS_BIO_NEED_ANY 1 153#define VFS_BIO_NEED_LOWLIMIT 2 154#define VFS_BIO_NEED_FREE 4 155 156/* 157 * Initialize buffer headers and related structures. 158 */ 159void 160bufinit() 161{ 162 struct buf *bp; 163 int i; 164 165 TAILQ_INIT(&bswlist); 166 LIST_INIT(&invalhash); 167 168 /* first, make a null hash table */ 169 for (i = 0; i < BUFHSZ; i++) 170 LIST_INIT(&bufhashtbl[i]); 171 172 /* next, make a null set of free lists */ 173 for (i = 0; i < BUFFER_QUEUES; i++) 174 TAILQ_INIT(&bufqueues[i]); 175 176 /* finally, initialize each buffer header and stick on empty q */ 177 for (i = 0; i < nbuf; i++) { 178 bp = &buf[i]; 179 bzero(bp, sizeof *bp); 180 bp->b_flags = B_INVAL; /* we're just an empty header */ 181 bp->b_dev = NODEV; 182 bp->b_rcred = NOCRED; 183 bp->b_wcred = NOCRED; 184 bp->b_qindex = QUEUE_EMPTY; 185 bp->b_vnbufs.le_next = NOLIST; 186 LIST_INIT(&bp->b_dep); 187 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 188 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 189 } 190/* 191 * maxbufspace is currently calculated to support all filesystem blocks 192 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 193 * cache is still the same as it would be for 8K filesystems. This 194 * keeps the size of the buffer cache "in check" for big block filesystems. 195 */ 196 maxbufspace = (nbuf + 8) * DFLTBSIZE; 197/* 198 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 199 */ 200 maxvmiobufspace = 2 * maxbufspace / 3; 201/* 202 * Limit the amount of malloc memory since it is wired permanently into 203 * the kernel space. Even though this is accounted for in the buffer 204 * allocation, we don't want the malloced region to grow uncontrolled. 205 * The malloc scheme improves memory utilization significantly on average 206 * (small) directories. 207 */ 208 maxbufmallocspace = maxbufspace / 20; 209 210/* 211 * Remove the probability of deadlock conditions by limiting the 212 * number of dirty buffers. 213 */ 214 hidirtybuffers = nbuf / 8 + 20; 215 lodirtybuffers = nbuf / 16 + 10; 216 numdirtybuffers = 0; 217 lofreebuffers = nbuf / 18 + 5; 218 hifreebuffers = 2 * lofreebuffers; 219 numfreebuffers = nbuf; 220 kvafreespace = 0; 221 222 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 223 bogus_page = vm_page_alloc(kernel_object, 224 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 225 VM_ALLOC_NORMAL); 226 227} 228 229/* 230 * Free the kva allocation for a buffer 231 * Must be called only at splbio or higher, 232 * as this is the only locking for buffer_map. 233 */ 234static void 235bfreekva(struct buf * bp) 236{ 237 if (bp->b_kvasize == 0) 238 return; 239 240 vm_map_delete(buffer_map, 241 (vm_offset_t) bp->b_kvabase, 242 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 243 244 bp->b_kvasize = 0; 245 246} 247 248/* 249 * remove the buffer from the appropriate free list 250 */ 251void 252bremfree(struct buf * bp) 253{ 254 int s = splbio(); 255 256 if (bp->b_qindex != QUEUE_NONE) { 257 if (bp->b_qindex == QUEUE_EMPTY) { 258 kvafreespace -= bp->b_kvasize; 259 } 260 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 261 bp->b_qindex = QUEUE_NONE; 262 } else { 263#if !defined(MAX_PERF) 264 panic("bremfree: removing a buffer when not on a queue"); 265#endif 266 } 267 if ((bp->b_flags & B_INVAL) || 268 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 269 --numfreebuffers; 270 splx(s); 271} 272 273 274/* 275 * Get a buffer with the specified data. Look in the cache first. 276 */ 277int 278bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 279 struct buf ** bpp) 280{ 281 struct buf *bp; 282 283 bp = getblk(vp, blkno, size, 0, 0); 284 *bpp = bp; 285 286 /* if not found in cache, do some I/O */ 287 if ((bp->b_flags & B_CACHE) == 0) { 288 if (curproc != NULL) 289 curproc->p_stats->p_ru.ru_inblock++; 290 bp->b_flags |= B_READ; 291 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 292 if (bp->b_rcred == NOCRED) { 293 if (cred != NOCRED) 294 crhold(cred); 295 bp->b_rcred = cred; 296 } 297 vfs_busy_pages(bp, 0); 298 VOP_STRATEGY(vp, bp); 299 return (biowait(bp)); 300 } 301 return (0); 302} 303 304/* 305 * Operates like bread, but also starts asynchronous I/O on 306 * read-ahead blocks. 307 */ 308int 309breadn(struct vnode * vp, daddr_t blkno, int size, 310 daddr_t * rablkno, int *rabsize, 311 int cnt, struct ucred * cred, struct buf ** bpp) 312{ 313 struct buf *bp, *rabp; 314 int i; 315 int rv = 0, readwait = 0; 316 317 *bpp = bp = getblk(vp, blkno, size, 0, 0); 318 319 /* if not found in cache, do some I/O */ 320 if ((bp->b_flags & B_CACHE) == 0) { 321 if (curproc != NULL) 322 curproc->p_stats->p_ru.ru_inblock++; 323 bp->b_flags |= B_READ; 324 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 325 if (bp->b_rcred == NOCRED) { 326 if (cred != NOCRED) 327 crhold(cred); 328 bp->b_rcred = cred; 329 } 330 vfs_busy_pages(bp, 0); 331 VOP_STRATEGY(vp, bp); 332 ++readwait; 333 } 334 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 335 if (inmem(vp, *rablkno)) 336 continue; 337 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 338 339 if ((rabp->b_flags & B_CACHE) == 0) { 340 if (curproc != NULL) 341 curproc->p_stats->p_ru.ru_inblock++; 342 rabp->b_flags |= B_READ | B_ASYNC; 343 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 344 if (rabp->b_rcred == NOCRED) { 345 if (cred != NOCRED) 346 crhold(cred); 347 rabp->b_rcred = cred; 348 } 349 vfs_busy_pages(rabp, 0); 350 VOP_STRATEGY(vp, rabp); 351 } else { 352 brelse(rabp); 353 } 354 } 355 356 if (readwait) { 357 rv = biowait(bp); 358 } 359 return (rv); 360} 361 362/* 363 * Write, release buffer on completion. (Done by iodone 364 * if async.) 365 */ 366int 367bwrite(struct buf * bp) 368{ 369 int oldflags, s; 370 struct vnode *vp; 371 struct mount *mp; 372 373 374 if (bp->b_flags & B_INVAL) { 375 brelse(bp); 376 return (0); 377 } 378 379 oldflags = bp->b_flags; 380 381#if !defined(MAX_PERF) 382 if ((bp->b_flags & B_BUSY) == 0) 383 panic("bwrite: buffer is not busy???"); 384#endif 385 386 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 387 bp->b_flags |= B_WRITEINPROG; 388 389 s = splbio(); 390 if ((oldflags & B_DELWRI) == B_DELWRI) { 391 --numdirtybuffers; 392 reassignbuf(bp, bp->b_vp); 393 } 394 395 bp->b_vp->v_numoutput++; 396 vfs_busy_pages(bp, 1); 397 if (curproc != NULL) 398 curproc->p_stats->p_ru.ru_oublock++; 399 splx(s); 400 VOP_STRATEGY(bp->b_vp, bp); 401 402 /* 403 * Collect statistics on synchronous and asynchronous writes. 404 * Writes to block devices are charged to their associated 405 * filesystem (if any). 406 */ 407 if ((vp = bp->b_vp) != NULL) { 408 if (vp->v_type == VBLK) 409 mp = vp->v_specmountpoint; 410 else 411 mp = vp->v_mount; 412 if (mp != NULL) 413 if ((oldflags & B_ASYNC) == 0) 414 mp->mnt_stat.f_syncwrites++; 415 else 416 mp->mnt_stat.f_asyncwrites++; 417 } 418 419 if ((oldflags & B_ASYNC) == 0) { 420 int rtval = biowait(bp); 421 brelse(bp); 422 return (rtval); 423 } 424 return (0); 425} 426 427__inline void 428vfs_bio_need_satisfy(void) { 429 ++numfreebuffers; 430 if (!needsbuffer) 431 return; 432 if (numdirtybuffers < lodirtybuffers) { 433 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 434 } else { 435 needsbuffer &= ~VFS_BIO_NEED_ANY; 436 } 437 if (numfreebuffers >= hifreebuffers) { 438 needsbuffer &= ~VFS_BIO_NEED_FREE; 439 } 440 wakeup(&needsbuffer); 441} 442 443/* 444 * Delayed write. (Buffer is marked dirty). 445 */ 446void 447bdwrite(struct buf * bp) 448{ 449 int s; 450 struct vnode *vp; 451 452#if !defined(MAX_PERF) 453 if ((bp->b_flags & B_BUSY) == 0) { 454 panic("bdwrite: buffer is not busy"); 455 } 456#endif 457 458 if (bp->b_flags & B_INVAL) { 459 brelse(bp); 460 return; 461 } 462 bp->b_flags &= ~(B_READ|B_RELBUF); 463 if ((bp->b_flags & B_DELWRI) == 0) { 464 bp->b_flags |= B_DONE | B_DELWRI; 465 reassignbuf(bp, bp->b_vp); 466 ++numdirtybuffers; 467 } 468 469 /* 470 * This bmap keeps the system from needing to do the bmap later, 471 * perhaps when the system is attempting to do a sync. Since it 472 * is likely that the indirect block -- or whatever other datastructure 473 * that the filesystem needs is still in memory now, it is a good 474 * thing to do this. Note also, that if the pageout daemon is 475 * requesting a sync -- there might not be enough memory to do 476 * the bmap then... So, this is important to do. 477 */ 478 if (bp->b_lblkno == bp->b_blkno) { 479 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 480 } 481 482 /* 483 * Set the *dirty* buffer range based upon the VM system dirty pages. 484 */ 485 vfs_setdirty(bp); 486 487 /* 488 * We need to do this here to satisfy the vnode_pager and the 489 * pageout daemon, so that it thinks that the pages have been 490 * "cleaned". Note that since the pages are in a delayed write 491 * buffer -- the VFS layer "will" see that the pages get written 492 * out on the next sync, or perhaps the cluster will be completed. 493 */ 494 vfs_clean_pages(bp); 495 bqrelse(bp); 496 497 /* 498 * XXX The soft dependency code is not prepared to 499 * have I/O done when a bdwrite is requested. For 500 * now we just let the write be delayed if it is 501 * requested by the soft dependency code. 502 */ 503 if ((vp = bp->b_vp) && 504 (vp->v_type == VBLK && vp->v_specmountpoint && 505 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 506 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))) 507 return; 508 509 if (numdirtybuffers >= hidirtybuffers) 510 flushdirtybuffers(0, 0); 511 512 return; 513} 514 515 516/* 517 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 518 * Check how this compares with vfs_setdirty(); XXX [JRE] 519 */ 520void 521bdirty(bp) 522 struct buf *bp; 523{ 524 int s; 525 526 bp->b_flags &= ~(B_READ|B_RELBUF); /* XXX ??? check this */ 527 if ((bp->b_flags & B_DELWRI) == 0) { 528 bp->b_flags |= B_DONE | B_DELWRI; /* why done? XXX JRE */ 529 reassignbuf(bp, bp->b_vp); 530 ++numdirtybuffers; 531 } 532} 533 534/* 535 * Asynchronous write. 536 * Start output on a buffer, but do not wait for it to complete. 537 * The buffer is released when the output completes. 538 */ 539void 540bawrite(struct buf * bp) 541{ 542 bp->b_flags |= B_ASYNC; 543 (void) VOP_BWRITE(bp); 544} 545 546/* 547 * Ordered write. 548 * Start output on a buffer, but only wait for it to complete if the 549 * output device cannot guarantee ordering in some other way. Devices 550 * that can perform asynchronous ordered writes will set the B_ASYNC 551 * flag in their strategy routine. 552 * The buffer is released when the output completes. 553 */ 554int 555bowrite(struct buf * bp) 556{ 557 /* 558 * XXX Add in B_ASYNC once the SCSI 559 * layer can deal with ordered 560 * writes properly. 561 */ 562 bp->b_flags |= B_ORDERED; 563 return (VOP_BWRITE(bp)); 564} 565 566/* 567 * Release a buffer. 568 */ 569void 570brelse(struct buf * bp) 571{ 572 int s; 573 574 if (bp->b_flags & B_CLUSTER) { 575 relpbuf(bp); 576 return; 577 } 578 579 s = splbio(); 580 581 /* anyone need this block? */ 582 if (bp->b_flags & B_WANTED) { 583 bp->b_flags &= ~(B_WANTED | B_AGE); 584 wakeup(bp); 585 } 586 587 if (bp->b_flags & B_LOCKED) 588 bp->b_flags &= ~B_ERROR; 589 590 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 591 (bp->b_bufsize <= 0)) { 592 bp->b_flags |= B_INVAL; 593 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 594 (*bioops.io_deallocate)(bp); 595 if (bp->b_flags & B_DELWRI) 596 --numdirtybuffers; 597 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 598 if ((bp->b_flags & B_VMIO) == 0) { 599 if (bp->b_bufsize) 600 allocbuf(bp, 0); 601 if (bp->b_vp) 602 brelvp(bp); 603 } 604 } 605 606 /* 607 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 608 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 609 * but the VM object is kept around. The B_NOCACHE flag is used to 610 * invalidate the pages in the VM object. 611 * 612 * If the buffer is a partially filled NFS buffer, keep it 613 * since invalidating it now will lose informatio. The valid 614 * flags in the vm_pages have only DEV_BSIZE resolution but 615 * the b_validoff, b_validend fields have byte resolution. 616 * This can avoid unnecessary re-reads of the buffer. 617 * XXX this seems to cause performance problems. 618 */ 619 if ((bp->b_flags & B_VMIO) 620 && !(bp->b_vp->v_tag == VT_NFS && 621 bp->b_vp->v_type != VBLK && 622 (bp->b_flags & B_DELWRI) != 0) 623#ifdef notdef 624 && (bp->b_vp->v_tag != VT_NFS 625 || bp->b_vp->v_type == VBLK 626 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 627 || bp->b_validend == 0 628 || (bp->b_validoff == 0 629 && bp->b_validend == bp->b_bufsize)) 630#endif 631 ) { 632 633 int i, j, resid; 634 vm_page_t m; 635 off_t foff; 636 vm_pindex_t poff; 637 vm_object_t obj; 638 struct vnode *vp; 639 640 vp = bp->b_vp; 641 642 resid = bp->b_bufsize; 643 foff = bp->b_offset; 644 645 for (i = 0; i < bp->b_npages; i++) { 646 m = bp->b_pages[i]; 647 vm_page_flag_clear(m, PG_ZERO); 648 if (m == bogus_page) { 649 650 obj = (vm_object_t) vp->v_object; 651 poff = OFF_TO_IDX(bp->b_offset); 652 653 for (j = i; j < bp->b_npages; j++) { 654 m = bp->b_pages[j]; 655 if (m == bogus_page) { 656 m = vm_page_lookup(obj, poff + j); 657#if !defined(MAX_PERF) 658 if (!m) { 659 panic("brelse: page missing\n"); 660 } 661#endif 662 bp->b_pages[j] = m; 663 } 664 } 665 666 if ((bp->b_flags & B_INVAL) == 0) { 667 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 668 } 669 } 670 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 671 int poffset = foff & PAGE_MASK; 672 int presid = resid > (PAGE_SIZE - poffset) ? 673 (PAGE_SIZE - poffset) : resid; 674 vm_page_set_invalid(m, poffset, presid); 675 } 676 resid -= PAGE_SIZE; 677 } 678 679 if (bp->b_flags & (B_INVAL | B_RELBUF)) 680 vfs_vmio_release(bp); 681 682 } else if (bp->b_flags & B_VMIO) { 683 684 if (bp->b_flags & (B_INVAL | B_RELBUF)) 685 vfs_vmio_release(bp); 686 687 } 688 689#if !defined(MAX_PERF) 690 if (bp->b_qindex != QUEUE_NONE) 691 panic("brelse: free buffer onto another queue???"); 692#endif 693 694 /* enqueue */ 695 /* buffers with no memory */ 696 if (bp->b_bufsize == 0) { 697 bp->b_flags |= B_INVAL; 698 bp->b_qindex = QUEUE_EMPTY; 699 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 700 LIST_REMOVE(bp, b_hash); 701 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 702 bp->b_dev = NODEV; 703 kvafreespace += bp->b_kvasize; 704 705 /* buffers with junk contents */ 706 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 707 bp->b_flags |= B_INVAL; 708 bp->b_qindex = QUEUE_AGE; 709 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 710 LIST_REMOVE(bp, b_hash); 711 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 712 bp->b_dev = NODEV; 713 714 /* buffers that are locked */ 715 } else if (bp->b_flags & B_LOCKED) { 716 bp->b_qindex = QUEUE_LOCKED; 717 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 718 719 /* buffers with stale but valid contents */ 720 } else if (bp->b_flags & B_AGE) { 721 bp->b_qindex = QUEUE_AGE; 722 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 723 724 /* buffers with valid and quite potentially reuseable contents */ 725 } else { 726 bp->b_qindex = QUEUE_LRU; 727 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 728 } 729 730 if ((bp->b_flags & B_INVAL) || 731 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 732 if (bp->b_flags & B_DELWRI) { 733 --numdirtybuffers; 734 bp->b_flags &= ~B_DELWRI; 735 } 736 vfs_bio_need_satisfy(); 737 } 738 739 /* unlock */ 740 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 741 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 742 splx(s); 743} 744 745/* 746 * Release a buffer. 747 */ 748void 749bqrelse(struct buf * bp) 750{ 751 int s; 752 753 s = splbio(); 754 755 /* anyone need this block? */ 756 if (bp->b_flags & B_WANTED) { 757 bp->b_flags &= ~(B_WANTED | B_AGE); 758 wakeup(bp); 759 } 760 761#if !defined(MAX_PERF) 762 if (bp->b_qindex != QUEUE_NONE) 763 panic("bqrelse: free buffer onto another queue???"); 764#endif 765 766 if (bp->b_flags & B_LOCKED) { 767 bp->b_flags &= ~B_ERROR; 768 bp->b_qindex = QUEUE_LOCKED; 769 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 770 /* buffers with stale but valid contents */ 771 } else { 772 bp->b_qindex = QUEUE_LRU; 773 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 774 } 775 776 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 777 vfs_bio_need_satisfy(); 778 } 779 780 /* unlock */ 781 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 782 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 783 splx(s); 784} 785 786static void 787vfs_vmio_release(bp) 788 struct buf *bp; 789{ 790 int i; 791 vm_page_t m; 792 793 for (i = 0; i < bp->b_npages; i++) { 794 m = bp->b_pages[i]; 795 bp->b_pages[i] = NULL; 796 vm_page_unwire(m); 797 798 /* 799 * We don't mess with busy pages, it is 800 * the responsibility of the process that 801 * busied the pages to deal with them. 802 */ 803 if ((m->flags & PG_BUSY) || (m->busy != 0)) 804 continue; 805 806 if (m->wire_count == 0) { 807 808 /* 809 * If this is an async free -- we cannot place 810 * pages onto the cache queue. If it is an 811 * async free, then we don't modify any queues. 812 * This is probably in error (for perf reasons), 813 * and we will eventually need to build 814 * a more complete infrastructure to support I/O 815 * rundown. 816 */ 817 if ((bp->b_flags & B_ASYNC) == 0) { 818 819 /* 820 * In the case of sync buffer frees, we can do pretty much 821 * anything to any of the memory queues. Specifically, 822 * the cache queue is okay to be modified. 823 */ 824 if (m->valid) { 825 if(m->dirty == 0) 826 vm_page_test_dirty(m); 827 /* 828 * this keeps pressure off of the process memory 829 */ 830 if (m->dirty == 0 && m->hold_count == 0) 831 vm_page_cache(m); 832 else 833 vm_page_deactivate(m); 834 vm_page_flag_clear(m, PG_ZERO); 835 } else if (m->hold_count == 0) { 836 vm_page_busy(m); 837 vm_page_protect(m, VM_PROT_NONE); 838 vm_page_free(m); 839 } 840 } else { 841 /* 842 * If async, then at least we clear the 843 * act_count. 844 */ 845 m->act_count = 0; 846 vm_page_flag_clear(m, PG_ZERO); 847 } 848 } 849 } 850 bufspace -= bp->b_bufsize; 851 vmiospace -= bp->b_bufsize; 852 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 853 bp->b_npages = 0; 854 bp->b_bufsize = 0; 855 bp->b_flags &= ~B_VMIO; 856 if (bp->b_vp) 857 brelvp(bp); 858} 859 860/* 861 * Check to see if a block is currently memory resident. 862 */ 863struct buf * 864gbincore(struct vnode * vp, daddr_t blkno) 865{ 866 struct buf *bp; 867 struct bufhashhdr *bh; 868 869 bh = BUFHASH(vp, blkno); 870 bp = bh->lh_first; 871 872 /* Search hash chain */ 873 while (bp != NULL) { 874 /* hit */ 875 if (bp->b_vp == vp && bp->b_lblkno == blkno && 876 (bp->b_flags & B_INVAL) == 0) { 877 break; 878 } 879 bp = bp->b_hash.le_next; 880 } 881 return (bp); 882} 883 884/* 885 * this routine implements clustered async writes for 886 * clearing out B_DELWRI buffers... This is much better 887 * than the old way of writing only one buffer at a time. 888 */ 889int 890vfs_bio_awrite(struct buf * bp) 891{ 892 int i; 893 daddr_t lblkno = bp->b_lblkno; 894 struct vnode *vp = bp->b_vp; 895 int s; 896 int ncl; 897 struct buf *bpa; 898 int nwritten; 899 int size; 900 int maxcl; 901 902 s = splbio(); 903 /* 904 * right now we support clustered writing only to regular files 905 */ 906 if ((vp->v_type == VREG) && 907 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 908 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 909 910 size = vp->v_mount->mnt_stat.f_iosize; 911 maxcl = MAXPHYS / size; 912 913 for (i = 1; i < maxcl; i++) { 914 if ((bpa = gbincore(vp, lblkno + i)) && 915 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 916 (B_DELWRI | B_CLUSTEROK)) && 917 (bpa->b_bufsize == size)) { 918 if ((bpa->b_blkno == bpa->b_lblkno) || 919 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 920 break; 921 } else { 922 break; 923 } 924 } 925 ncl = i; 926 /* 927 * this is a possible cluster write 928 */ 929 if (ncl != 1) { 930 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 931 splx(s); 932 return nwritten; 933 } 934 } 935 936 bremfree(bp); 937 bp->b_flags |= B_BUSY | B_ASYNC; 938 939 splx(s); 940 /* 941 * default (old) behavior, writing out only one block 942 */ 943 nwritten = bp->b_bufsize; 944 (void) VOP_BWRITE(bp); 945 return nwritten; 946} 947 948 949/* 950 * Find a buffer header which is available for use. 951 */ 952static struct buf * 953getnewbuf(struct vnode *vp, daddr_t blkno, 954 int slpflag, int slptimeo, int size, int maxsize) 955{ 956 struct buf *bp, *bp1; 957 int nbyteswritten = 0; 958 vm_offset_t addr; 959 static int writerecursion = 0; 960 961start: 962 if (bufspace >= maxbufspace) 963 goto trytofreespace; 964 965 /* can we constitute a new buffer? */ 966 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 967#if !defined(MAX_PERF) 968 if (bp->b_qindex != QUEUE_EMPTY) 969 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 970 bp->b_qindex); 971#endif 972 bp->b_flags |= B_BUSY; 973 bremfree(bp); 974 goto fillbuf; 975 } 976trytofreespace: 977 /* 978 * We keep the file I/O from hogging metadata I/O 979 * This is desirable because file data is cached in the 980 * VM/Buffer cache even if a buffer is freed. 981 */ 982 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 983#if !defined(MAX_PERF) 984 if (bp->b_qindex != QUEUE_AGE) 985 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 986 bp->b_qindex); 987#endif 988 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 989#if !defined(MAX_PERF) 990 if (bp->b_qindex != QUEUE_LRU) 991 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 992 bp->b_qindex); 993#endif 994 } 995 if (!bp) { 996 /* wait for a free buffer of any kind */ 997 needsbuffer |= VFS_BIO_NEED_ANY; 998 do 999 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 1000 slptimeo); 1001 while (needsbuffer & VFS_BIO_NEED_ANY); 1002 return (0); 1003 } 1004 1005#if defined(DIAGNOSTIC) 1006 if (bp->b_flags & B_BUSY) { 1007 panic("getnewbuf: busy buffer on free list\n"); 1008 } 1009#endif 1010 1011 /* 1012 * We are fairly aggressive about freeing VMIO buffers, but since 1013 * the buffering is intact without buffer headers, there is not 1014 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 1015 */ 1016 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 1017 if ((bp->b_flags & B_VMIO) == 0 || 1018 (vmiospace < maxvmiobufspace)) { 1019 --bp->b_usecount; 1020 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1021 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1022 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1023 goto start; 1024 } 1025 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1026 } 1027 } 1028 1029 1030 /* if we are a delayed write, convert to an async write */ 1031 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1032 1033 /* 1034 * If our delayed write is likely to be used soon, then 1035 * recycle back onto the LRU queue. 1036 */ 1037 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 1038 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 1039 1040 if (bp->b_usecount > 0) { 1041 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 1042 1043 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1044 1045 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1046 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1047 bp->b_usecount--; 1048 goto start; 1049 } 1050 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1051 } 1052 } 1053 } 1054 1055 /* 1056 * Certain layered filesystems can recursively re-enter the vfs_bio 1057 * code, due to delayed writes. This helps keep the system from 1058 * deadlocking. 1059 */ 1060 if (writerecursion > 0) { 1061 if (writerecursion > 5) { 1062 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1063 while (bp) { 1064 if ((bp->b_flags & B_DELWRI) == 0) 1065 break; 1066 bp = TAILQ_NEXT(bp, b_freelist); 1067 } 1068 if (bp == NULL) { 1069 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1070 while (bp) { 1071 if ((bp->b_flags & B_DELWRI) == 0) 1072 break; 1073 bp = TAILQ_NEXT(bp, b_freelist); 1074 } 1075 } 1076 if (bp == NULL) 1077 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1078 } else { 1079 bremfree(bp); 1080 bp->b_flags |= B_BUSY | B_AGE | B_ASYNC; 1081 nbyteswritten += bp->b_bufsize; 1082 ++writerecursion; 1083 VOP_BWRITE(bp); 1084 --writerecursion; 1085 if (!slpflag && !slptimeo) { 1086 return (0); 1087 } 1088 goto start; 1089 } 1090 } else { 1091 ++writerecursion; 1092 nbyteswritten += vfs_bio_awrite(bp); 1093 --writerecursion; 1094 if (!slpflag && !slptimeo) { 1095 return (0); 1096 } 1097 goto start; 1098 } 1099 } 1100 1101 if (bp->b_flags & B_WANTED) { 1102 bp->b_flags &= ~B_WANTED; 1103 wakeup(bp); 1104 } 1105 bremfree(bp); 1106 bp->b_flags |= B_BUSY; 1107 1108 if (bp->b_flags & B_VMIO) { 1109 bp->b_flags &= ~B_ASYNC; 1110 vfs_vmio_release(bp); 1111 } 1112 1113 if (bp->b_vp) 1114 brelvp(bp); 1115 1116fillbuf: 1117 1118 /* we are not free, nor do we contain interesting data */ 1119 if (bp->b_rcred != NOCRED) { 1120 crfree(bp->b_rcred); 1121 bp->b_rcred = NOCRED; 1122 } 1123 if (bp->b_wcred != NOCRED) { 1124 crfree(bp->b_wcred); 1125 bp->b_wcred = NOCRED; 1126 } 1127 if (LIST_FIRST(&bp->b_dep) != NULL && 1128 bioops.io_deallocate) 1129 (*bioops.io_deallocate)(bp); 1130 1131 LIST_REMOVE(bp, b_hash); 1132 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1133 if (bp->b_bufsize) { 1134 allocbuf(bp, 0); 1135 } 1136 bp->b_flags = B_BUSY; 1137 bp->b_dev = NODEV; 1138 bp->b_vp = NULL; 1139 bp->b_blkno = bp->b_lblkno = 0; 1140 bp->b_offset = NOOFFSET; 1141 bp->b_iodone = 0; 1142 bp->b_error = 0; 1143 bp->b_resid = 0; 1144 bp->b_bcount = 0; 1145 bp->b_npages = 0; 1146 bp->b_dirtyoff = bp->b_dirtyend = 0; 1147 bp->b_validoff = bp->b_validend = 0; 1148 bp->b_usecount = 5; 1149 /* Here, not kern_physio.c, is where this should be done*/ 1150 LIST_INIT(&bp->b_dep); 1151 1152 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1153 1154 /* 1155 * we assume that buffer_map is not at address 0 1156 */ 1157 addr = 0; 1158 if (maxsize != bp->b_kvasize) { 1159 bfreekva(bp); 1160 1161findkvaspace: 1162 /* 1163 * See if we have buffer kva space 1164 */ 1165 if (vm_map_findspace(buffer_map, 1166 vm_map_min(buffer_map), maxsize, &addr)) { 1167 if (kvafreespace > 0) { 1168 int totfree = 0, freed; 1169 do { 1170 freed = 0; 1171 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1172 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1173 if (bp1->b_kvasize != 0) { 1174 totfree += bp1->b_kvasize; 1175 freed = bp1->b_kvasize; 1176 bremfree(bp1); 1177 bfreekva(bp1); 1178 brelse(bp1); 1179 break; 1180 } 1181 } 1182 } while (freed); 1183 /* 1184 * if we found free space, then retry with the same buffer. 1185 */ 1186 if (totfree) 1187 goto findkvaspace; 1188 } 1189 bp->b_flags |= B_INVAL; 1190 brelse(bp); 1191 goto trytofreespace; 1192 } 1193 } 1194 1195 /* 1196 * See if we are below are allocated minimum 1197 */ 1198 if (bufspace >= (maxbufspace + nbyteswritten)) { 1199 bp->b_flags |= B_INVAL; 1200 brelse(bp); 1201 goto trytofreespace; 1202 } 1203 1204 /* 1205 * create a map entry for the buffer -- in essence 1206 * reserving the kva space. 1207 */ 1208 if (addr) { 1209 vm_map_insert(buffer_map, NULL, 0, 1210 addr, addr + maxsize, 1211 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1212 1213 bp->b_kvabase = (caddr_t) addr; 1214 bp->b_kvasize = maxsize; 1215 } 1216 bp->b_data = bp->b_kvabase; 1217 1218 return (bp); 1219} 1220 1221static void 1222waitfreebuffers(int slpflag, int slptimeo) { 1223 while (numfreebuffers < hifreebuffers) { 1224 flushdirtybuffers(slpflag, slptimeo); 1225 if (numfreebuffers < hifreebuffers) 1226 break; 1227 needsbuffer |= VFS_BIO_NEED_FREE; 1228 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1229 break; 1230 } 1231} 1232 1233static void 1234flushdirtybuffers(int slpflag, int slptimeo) { 1235 int s; 1236 static pid_t flushing = 0; 1237 1238 s = splbio(); 1239 1240 if (flushing) { 1241 if (flushing == curproc->p_pid) { 1242 splx(s); 1243 return; 1244 } 1245 while (flushing) { 1246 if (tsleep(&flushing, (PRIBIO + 4)|slpflag, "biofls", slptimeo)) { 1247 splx(s); 1248 return; 1249 } 1250 } 1251 } 1252 flushing = curproc->p_pid; 1253 1254 while (numdirtybuffers > lodirtybuffers) { 1255 struct buf *bp; 1256 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1257 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1258 if (bp == NULL) 1259 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1260 1261 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1262 bp = TAILQ_NEXT(bp, b_freelist); 1263 } 1264 1265 if (bp) { 1266 vfs_bio_awrite(bp); 1267 continue; 1268 } 1269 break; 1270 } 1271 1272 flushing = 0; 1273 wakeup(&flushing); 1274 splx(s); 1275} 1276 1277/* 1278 * Check to see if a block is currently memory resident. 1279 */ 1280struct buf * 1281incore(struct vnode * vp, daddr_t blkno) 1282{ 1283 struct buf *bp; 1284 1285 int s = splbio(); 1286 bp = gbincore(vp, blkno); 1287 splx(s); 1288 return (bp); 1289} 1290 1291/* 1292 * Returns true if no I/O is needed to access the 1293 * associated VM object. This is like incore except 1294 * it also hunts around in the VM system for the data. 1295 */ 1296 1297int 1298inmem(struct vnode * vp, daddr_t blkno) 1299{ 1300 vm_object_t obj; 1301 vm_offset_t toff, tinc; 1302 vm_page_t m; 1303 vm_ooffset_t off; 1304 1305 if (incore(vp, blkno)) 1306 return 1; 1307 if (vp->v_mount == NULL) 1308 return 0; 1309 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1310 return 0; 1311 1312 obj = vp->v_object; 1313 tinc = PAGE_SIZE; 1314 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1315 tinc = vp->v_mount->mnt_stat.f_iosize; 1316 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1317 1318 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1319 1320 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1321 if (!m) 1322 return 0; 1323 if (vm_page_is_valid(m, 1324 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1325 return 0; 1326 } 1327 return 1; 1328} 1329 1330/* 1331 * now we set the dirty range for the buffer -- 1332 * for NFS -- if the file is mapped and pages have 1333 * been written to, let it know. We want the 1334 * entire range of the buffer to be marked dirty if 1335 * any of the pages have been written to for consistancy 1336 * with the b_validoff, b_validend set in the nfs write 1337 * code, and used by the nfs read code. 1338 */ 1339static void 1340vfs_setdirty(struct buf *bp) { 1341 int i; 1342 vm_object_t object; 1343 vm_offset_t boffset, offset; 1344 /* 1345 * We qualify the scan for modified pages on whether the 1346 * object has been flushed yet. The OBJ_WRITEABLE flag 1347 * is not cleared simply by protecting pages off. 1348 */ 1349 if ((bp->b_flags & B_VMIO) && 1350 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1351 /* 1352 * test the pages to see if they have been modified directly 1353 * by users through the VM system. 1354 */ 1355 for (i = 0; i < bp->b_npages; i++) { 1356 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1357 vm_page_test_dirty(bp->b_pages[i]); 1358 } 1359 1360 /* 1361 * scan forwards for the first page modified 1362 */ 1363 for (i = 0; i < bp->b_npages; i++) { 1364 if (bp->b_pages[i]->dirty) { 1365 break; 1366 } 1367 } 1368 boffset = (i << PAGE_SHIFT); 1369 if (boffset < bp->b_dirtyoff) { 1370 bp->b_dirtyoff = boffset; 1371 } 1372 1373 /* 1374 * scan backwards for the last page modified 1375 */ 1376 for (i = bp->b_npages - 1; i >= 0; --i) { 1377 if (bp->b_pages[i]->dirty) { 1378 break; 1379 } 1380 } 1381 boffset = (i + 1); 1382 offset = boffset + bp->b_pages[0]->pindex; 1383 if (offset >= object->size) 1384 boffset = object->size - bp->b_pages[0]->pindex; 1385 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1386 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1387 } 1388} 1389 1390/* 1391 * Get a block given a specified block and offset into a file/device. 1392 */ 1393struct buf * 1394getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1395{ 1396 struct buf *bp; 1397 int i, s; 1398 struct bufhashhdr *bh; 1399 int maxsize; 1400 int generation; 1401 int checksize; 1402 1403 if (vp->v_mount) { 1404 maxsize = vp->v_mount->mnt_stat.f_iosize; 1405 /* 1406 * This happens on mount points. 1407 */ 1408 if (maxsize < size) 1409 maxsize = size; 1410 } else { 1411 maxsize = size; 1412 } 1413 1414#if !defined(MAX_PERF) 1415 if (size > MAXBSIZE) 1416 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1417#endif 1418 1419 s = splbio(); 1420loop: 1421 if (numfreebuffers < lofreebuffers) { 1422 waitfreebuffers(slpflag, slptimeo); 1423 } 1424 1425 if ((bp = gbincore(vp, blkno))) { 1426loop1: 1427 if (bp->b_flags & B_BUSY) { 1428 1429 bp->b_flags |= B_WANTED; 1430 if (bp->b_usecount < BUF_MAXUSE) 1431 ++bp->b_usecount; 1432 1433 if (!tsleep(bp, 1434 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) { 1435 goto loop; 1436 } 1437 1438 splx(s); 1439 return (struct buf *) NULL; 1440 } 1441 bp->b_flags |= B_BUSY | B_CACHE; 1442 bremfree(bp); 1443 1444 /* 1445 * check for size inconsistancies (note that they shouldn't 1446 * happen but do when filesystems don't handle the size changes 1447 * correctly.) We are conservative on metadata and don't just 1448 * extend the buffer but write (if needed) and re-constitute it. 1449 */ 1450 1451 if (bp->b_bcount != size) { 1452 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1453 allocbuf(bp, size); 1454 } else { 1455 if (bp->b_flags & B_DELWRI) { 1456 bp->b_flags |= B_NOCACHE; 1457 VOP_BWRITE(bp); 1458 } else { 1459 if ((bp->b_flags & B_VMIO) && 1460 (LIST_FIRST(&bp->b_dep) == NULL)) { 1461 bp->b_flags |= B_RELBUF; 1462 brelse(bp); 1463 } else { 1464 bp->b_flags |= B_NOCACHE; 1465 VOP_BWRITE(bp); 1466 } 1467 } 1468 goto loop; 1469 } 1470 } 1471 1472#ifdef DIAGNOSTIC 1473 if (bp->b_offset == NOOFFSET) 1474 panic("getblk: no buffer offset"); 1475#endif 1476 1477 /* 1478 * Check that the constituted buffer really deserves for the 1479 * B_CACHE bit to be set. B_VMIO type buffers might not 1480 * contain fully valid pages. Normal (old-style) buffers 1481 * should be fully valid. 1482 */ 1483 if (bp->b_flags & B_VMIO) { 1484 checksize = bp->b_bufsize; 1485 for (i = 0; i < bp->b_npages; i++) { 1486 int resid; 1487 int poffset; 1488 poffset = bp->b_offset & PAGE_MASK; 1489 resid = (checksize > (PAGE_SIZE - poffset)) ? 1490 (PAGE_SIZE - poffset) : checksize; 1491 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) { 1492 bp->b_flags &= ~(B_CACHE | B_DONE); 1493 break; 1494 } 1495 checksize -= resid; 1496 } 1497 } 1498 1499 if (bp->b_usecount < BUF_MAXUSE) 1500 ++bp->b_usecount; 1501 splx(s); 1502 return (bp); 1503 } else { 1504 vm_object_t obj; 1505 1506 if ((bp = getnewbuf(vp, blkno, 1507 slpflag, slptimeo, size, maxsize)) == 0) { 1508 if (slpflag || slptimeo) { 1509 splx(s); 1510 return NULL; 1511 } 1512 goto loop; 1513 } 1514 1515 /* 1516 * This code is used to make sure that a buffer is not 1517 * created while the getnewbuf routine is blocked. 1518 * Normally the vnode is locked so this isn't a problem. 1519 * VBLK type I/O requests, however, don't lock the vnode. 1520 */ 1521 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE && gbincore(vp, blkno)) { 1522 bp->b_flags |= B_INVAL; 1523 brelse(bp); 1524 goto loop; 1525 } 1526 1527 /* 1528 * Insert the buffer into the hash, so that it can 1529 * be found by incore. 1530 */ 1531 bp->b_blkno = bp->b_lblkno = blkno; 1532 1533 if (vp->v_type != VBLK) 1534 bp->b_offset = (off_t) blkno * maxsize; 1535 else 1536 bp->b_offset = (off_t) blkno * DEV_BSIZE; 1537 1538 bgetvp(vp, bp); 1539 LIST_REMOVE(bp, b_hash); 1540 bh = BUFHASH(vp, blkno); 1541 LIST_INSERT_HEAD(bh, bp, b_hash); 1542 1543 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) { 1544 bp->b_flags |= (B_VMIO | B_CACHE); 1545#if defined(VFS_BIO_DEBUG) 1546 if (vp->v_type != VREG && vp->v_type != VBLK) 1547 printf("getblk: vmioing file type %d???\n", vp->v_type); 1548#endif 1549 } else { 1550 bp->b_flags &= ~B_VMIO; 1551 } 1552 1553 allocbuf(bp, size); 1554 1555 splx(s); 1556 return (bp); 1557 } 1558} 1559 1560/* 1561 * Get an empty, disassociated buffer of given size. 1562 */ 1563struct buf * 1564geteblk(int size) 1565{ 1566 struct buf *bp; 1567 int s; 1568 1569 s = splbio(); 1570 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1571 splx(s); 1572 allocbuf(bp, size); 1573 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 1574 return (bp); 1575} 1576 1577 1578/* 1579 * This code constitutes the buffer memory from either anonymous system 1580 * memory (in the case of non-VMIO operations) or from an associated 1581 * VM object (in the case of VMIO operations). 1582 * 1583 * Note that this code is tricky, and has many complications to resolve 1584 * deadlock or inconsistant data situations. Tread lightly!!! 1585 * 1586 * Modify the length of a buffer's underlying buffer storage without 1587 * destroying information (unless, of course the buffer is shrinking). 1588 */ 1589int 1590allocbuf(struct buf * bp, int size) 1591{ 1592 1593 int s; 1594 int newbsize, mbsize; 1595 int i; 1596 1597#if !defined(MAX_PERF) 1598 if (!(bp->b_flags & B_BUSY)) 1599 panic("allocbuf: buffer not busy"); 1600 1601 if (bp->b_kvasize < size) 1602 panic("allocbuf: buffer too small"); 1603#endif 1604 1605 if ((bp->b_flags & B_VMIO) == 0) { 1606 caddr_t origbuf; 1607 int origbufsize; 1608 /* 1609 * Just get anonymous memory from the kernel 1610 */ 1611 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1612#if !defined(NO_B_MALLOC) 1613 if (bp->b_flags & B_MALLOC) 1614 newbsize = mbsize; 1615 else 1616#endif 1617 newbsize = round_page(size); 1618 1619 if (newbsize < bp->b_bufsize) { 1620#if !defined(NO_B_MALLOC) 1621 /* 1622 * malloced buffers are not shrunk 1623 */ 1624 if (bp->b_flags & B_MALLOC) { 1625 if (newbsize) { 1626 bp->b_bcount = size; 1627 } else { 1628 free(bp->b_data, M_BIOBUF); 1629 bufspace -= bp->b_bufsize; 1630 bufmallocspace -= bp->b_bufsize; 1631 bp->b_data = bp->b_kvabase; 1632 bp->b_bufsize = 0; 1633 bp->b_bcount = 0; 1634 bp->b_flags &= ~B_MALLOC; 1635 } 1636 return 1; 1637 } 1638#endif 1639 vm_hold_free_pages( 1640 bp, 1641 (vm_offset_t) bp->b_data + newbsize, 1642 (vm_offset_t) bp->b_data + bp->b_bufsize); 1643 } else if (newbsize > bp->b_bufsize) { 1644#if !defined(NO_B_MALLOC) 1645 /* 1646 * We only use malloced memory on the first allocation. 1647 * and revert to page-allocated memory when the buffer grows. 1648 */ 1649 if ( (bufmallocspace < maxbufmallocspace) && 1650 (bp->b_bufsize == 0) && 1651 (mbsize <= PAGE_SIZE/2)) { 1652 1653 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1654 bp->b_bufsize = mbsize; 1655 bp->b_bcount = size; 1656 bp->b_flags |= B_MALLOC; 1657 bufspace += mbsize; 1658 bufmallocspace += mbsize; 1659 return 1; 1660 } 1661#endif 1662 origbuf = NULL; 1663 origbufsize = 0; 1664#if !defined(NO_B_MALLOC) 1665 /* 1666 * If the buffer is growing on its other-than-first allocation, 1667 * then we revert to the page-allocation scheme. 1668 */ 1669 if (bp->b_flags & B_MALLOC) { 1670 origbuf = bp->b_data; 1671 origbufsize = bp->b_bufsize; 1672 bp->b_data = bp->b_kvabase; 1673 bufspace -= bp->b_bufsize; 1674 bufmallocspace -= bp->b_bufsize; 1675 bp->b_bufsize = 0; 1676 bp->b_flags &= ~B_MALLOC; 1677 newbsize = round_page(newbsize); 1678 } 1679#endif 1680 vm_hold_load_pages( 1681 bp, 1682 (vm_offset_t) bp->b_data + bp->b_bufsize, 1683 (vm_offset_t) bp->b_data + newbsize); 1684#if !defined(NO_B_MALLOC) 1685 if (origbuf) { 1686 bcopy(origbuf, bp->b_data, origbufsize); 1687 free(origbuf, M_BIOBUF); 1688 } 1689#endif 1690 } 1691 } else { 1692 vm_page_t m; 1693 int desiredpages; 1694 1695 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1696 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1697 1698#if !defined(NO_B_MALLOC) 1699 if (bp->b_flags & B_MALLOC) 1700 panic("allocbuf: VMIO buffer can't be malloced"); 1701#endif 1702 1703 if (newbsize < bp->b_bufsize) { 1704 if (desiredpages < bp->b_npages) { 1705 for (i = desiredpages; i < bp->b_npages; i++) { 1706 /* 1707 * the page is not freed here -- it 1708 * is the responsibility of vnode_pager_setsize 1709 */ 1710 m = bp->b_pages[i]; 1711#if defined(DIAGNOSTIC) 1712 if (m == bogus_page) 1713 panic("allocbuf: bogus page found"); 1714#endif 1715 vm_page_sleep(m, "biodep", &m->busy); 1716 1717 bp->b_pages[i] = NULL; 1718 vm_page_unwire(m); 1719 } 1720 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1721 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1722 bp->b_npages = desiredpages; 1723 } 1724 } else if (newbsize > bp->b_bufsize) { 1725 vm_object_t obj; 1726 vm_offset_t tinc, toff; 1727 vm_ooffset_t off; 1728 vm_pindex_t objoff; 1729 int pageindex, curbpnpages; 1730 struct vnode *vp; 1731 int bsize; 1732 int orig_validoff = bp->b_validoff; 1733 int orig_validend = bp->b_validend; 1734 1735 vp = bp->b_vp; 1736 1737 if (vp->v_type == VBLK) 1738 bsize = DEV_BSIZE; 1739 else 1740 bsize = vp->v_mount->mnt_stat.f_iosize; 1741 1742 if (bp->b_npages < desiredpages) { 1743 obj = vp->v_object; 1744 tinc = PAGE_SIZE; 1745 if (tinc > bsize) 1746 tinc = bsize; 1747 1748 off = bp->b_offset; 1749#ifdef DIAGNOSTIC 1750 if (bp->b_offset == NOOFFSET) 1751 panic("allocbuf: no buffer offset"); 1752#endif 1753 1754 curbpnpages = bp->b_npages; 1755 doretry: 1756 bp->b_validoff = orig_validoff; 1757 bp->b_validend = orig_validend; 1758 bp->b_flags |= B_CACHE; 1759 for (toff = 0; toff < newbsize; toff += tinc) { 1760 int bytesinpage; 1761 1762 pageindex = toff >> PAGE_SHIFT; 1763 objoff = OFF_TO_IDX(off + toff); 1764 if (pageindex < curbpnpages) { 1765 1766 m = bp->b_pages[pageindex]; 1767#ifdef VFS_BIO_DIAG 1768 if (m->pindex != objoff) 1769 panic("allocbuf: page changed offset??!!!?"); 1770#endif 1771 bytesinpage = tinc; 1772 if (tinc > (newbsize - toff)) 1773 bytesinpage = newbsize - toff; 1774 if (bp->b_flags & B_CACHE) 1775 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1776 continue; 1777 } 1778 m = vm_page_lookup(obj, objoff); 1779 if (!m) { 1780 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1781 if (!m) { 1782 VM_WAIT; 1783 vm_pageout_deficit += (desiredpages - bp->b_npages); 1784 goto doretry; 1785 } 1786 1787 vm_page_wire(m); 1788 vm_page_flag_clear(m, PG_BUSY); 1789 bp->b_flags &= ~B_CACHE; 1790 1791 } else if (m->flags & PG_BUSY) { 1792 s = splvm(); 1793 if (m->flags & PG_BUSY) { 1794 vm_page_flag_set(m, PG_WANTED); 1795 tsleep(m, PVM, "pgtblk", 0); 1796 } 1797 splx(s); 1798 goto doretry; 1799 } else { 1800 if ((curproc != pageproc) && 1801 ((m->queue - m->pc) == PQ_CACHE) && 1802 ((cnt.v_free_count + cnt.v_cache_count) < 1803 (cnt.v_free_min + cnt.v_cache_min))) { 1804 pagedaemon_wakeup(); 1805 } 1806 bytesinpage = tinc; 1807 if (tinc > (newbsize - toff)) 1808 bytesinpage = newbsize - toff; 1809 if (bp->b_flags & B_CACHE) 1810 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1811 vm_page_flag_clear(m, PG_ZERO); 1812 vm_page_wire(m); 1813 } 1814 bp->b_pages[pageindex] = m; 1815 curbpnpages = pageindex + 1; 1816 } 1817 if (vp->v_tag == VT_NFS && 1818 vp->v_type != VBLK) { 1819 if (bp->b_dirtyend > 0) { 1820 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1821 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1822 } 1823 if (bp->b_validend == 0) 1824 bp->b_flags &= ~B_CACHE; 1825 } 1826 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1827 bp->b_npages = curbpnpages; 1828 pmap_qenter((vm_offset_t) bp->b_data, 1829 bp->b_pages, bp->b_npages); 1830 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1831 } 1832 } 1833 } 1834 if (bp->b_flags & B_VMIO) 1835 vmiospace += (newbsize - bp->b_bufsize); 1836 bufspace += (newbsize - bp->b_bufsize); 1837 bp->b_bufsize = newbsize; 1838 bp->b_bcount = size; 1839 return 1; 1840} 1841 1842/* 1843 * Wait for buffer I/O completion, returning error status. 1844 */ 1845int 1846biowait(register struct buf * bp) 1847{ 1848 int s; 1849 1850 s = splbio(); 1851 while ((bp->b_flags & B_DONE) == 0) 1852#if defined(NO_SCHEDULE_MODS) 1853 tsleep(bp, PRIBIO, "biowait", 0); 1854#else 1855 if (bp->b_flags & B_READ) 1856 tsleep(bp, PRIBIO, "biord", 0); 1857 else 1858 tsleep(bp, PRIBIO, "biowr", 0); 1859#endif 1860 splx(s); 1861 if (bp->b_flags & B_EINTR) { 1862 bp->b_flags &= ~B_EINTR; 1863 return (EINTR); 1864 } 1865 if (bp->b_flags & B_ERROR) { 1866 return (bp->b_error ? bp->b_error : EIO); 1867 } else { 1868 return (0); 1869 } 1870} 1871 1872/* 1873 * Finish I/O on a buffer, calling an optional function. 1874 * This is usually called from interrupt level, so process blocking 1875 * is not *a good idea*. 1876 */ 1877void 1878biodone(register struct buf * bp) 1879{ 1880 int s; 1881 1882 s = splbio(); 1883 1884#if !defined(MAX_PERF) 1885 if (!(bp->b_flags & B_BUSY)) 1886 panic("biodone: buffer not busy"); 1887#endif 1888 1889 if (bp->b_flags & B_DONE) { 1890 splx(s); 1891#if !defined(MAX_PERF) 1892 printf("biodone: buffer already done\n"); 1893#endif 1894 return; 1895 } 1896 bp->b_flags |= B_DONE; 1897 1898 if (bp->b_flags & B_FREEBUF) { 1899 brelse(bp); 1900 splx(s); 1901 return; 1902 } 1903 1904 if ((bp->b_flags & B_READ) == 0) { 1905 vwakeup(bp); 1906 } 1907 1908#ifdef BOUNCE_BUFFERS 1909 if (bp->b_flags & B_BOUNCE) { 1910 vm_bounce_free(bp); 1911 } 1912#endif 1913 1914 /* call optional completion function if requested */ 1915 if (bp->b_flags & B_CALL) { 1916 bp->b_flags &= ~B_CALL; 1917 (*bp->b_iodone) (bp); 1918 splx(s); 1919 return; 1920 } 1921 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1922 (*bioops.io_complete)(bp); 1923 1924 if (bp->b_flags & B_VMIO) { 1925 int i, resid; 1926 vm_ooffset_t foff; 1927 vm_page_t m; 1928 vm_object_t obj; 1929 int iosize; 1930 struct vnode *vp = bp->b_vp; 1931 1932 obj = vp->v_object; 1933 1934#if defined(VFS_BIO_DEBUG) 1935 if (vp->v_usecount == 0) { 1936 panic("biodone: zero vnode ref count"); 1937 } 1938 1939 if (vp->v_object == NULL) { 1940 panic("biodone: missing VM object"); 1941 } 1942 1943 if ((vp->v_flag & VOBJBUF) == 0) { 1944 panic("biodone: vnode is not setup for merged cache"); 1945 } 1946#endif 1947 1948 foff = bp->b_offset; 1949#ifdef DIAGNOSTIC 1950 if (bp->b_offset == NOOFFSET) 1951 panic("biodone: no buffer offset"); 1952#endif 1953 1954#if !defined(MAX_PERF) 1955 if (!obj) { 1956 panic("biodone: no object"); 1957 } 1958#endif 1959#if defined(VFS_BIO_DEBUG) 1960 if (obj->paging_in_progress < bp->b_npages) { 1961 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1962 obj->paging_in_progress, bp->b_npages); 1963 } 1964#endif 1965 iosize = bp->b_bufsize; 1966 for (i = 0; i < bp->b_npages; i++) { 1967 int bogusflag = 0; 1968 m = bp->b_pages[i]; 1969 if (m == bogus_page) { 1970 bogusflag = 1; 1971 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1972 if (!m) { 1973#if defined(VFS_BIO_DEBUG) 1974 printf("biodone: page disappeared\n"); 1975#endif 1976 vm_object_pip_subtract(obj, 1); 1977 continue; 1978 } 1979 bp->b_pages[i] = m; 1980 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1981 } 1982#if defined(VFS_BIO_DEBUG) 1983 if (OFF_TO_IDX(foff) != m->pindex) { 1984 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1985 } 1986#endif 1987 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1988 if (resid > iosize) 1989 resid = iosize; 1990 1991 /* 1992 * In the write case, the valid and clean bits are 1993 * already changed correctly, so we only need to do this 1994 * here in the read case. 1995 */ 1996 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1997 vfs_page_set_valid(bp, foff, i, m); 1998 } 1999 vm_page_flag_clear(m, PG_ZERO); 2000 2001 /* 2002 * when debugging new filesystems or buffer I/O methods, this 2003 * is the most common error that pops up. if you see this, you 2004 * have not set the page busy flag correctly!!! 2005 */ 2006 if (m->busy == 0) { 2007#if !defined(MAX_PERF) 2008 printf("biodone: page busy < 0, " 2009 "pindex: %d, foff: 0x(%x,%x), " 2010 "resid: %d, index: %d\n", 2011 (int) m->pindex, (int)(foff >> 32), 2012 (int) foff & 0xffffffff, resid, i); 2013#endif 2014 if (vp->v_type != VBLK) 2015#if !defined(MAX_PERF) 2016 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2017 bp->b_vp->v_mount->mnt_stat.f_iosize, 2018 (int) bp->b_lblkno, 2019 bp->b_flags, bp->b_npages); 2020 else 2021 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2022 (int) bp->b_lblkno, 2023 bp->b_flags, bp->b_npages); 2024 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2025 m->valid, m->dirty, m->wire_count); 2026#endif 2027 panic("biodone: page busy < 0\n"); 2028 } 2029 vm_page_io_finish(m); 2030 vm_object_pip_subtract(obj, 1); 2031 foff += resid; 2032 iosize -= resid; 2033 } 2034 if (obj && 2035 (obj->paging_in_progress == 0) && 2036 (obj->flags & OBJ_PIPWNT)) { 2037 vm_object_clear_flag(obj, OBJ_PIPWNT); 2038 wakeup(obj); 2039 } 2040 } 2041 /* 2042 * For asynchronous completions, release the buffer now. The brelse 2043 * checks for B_WANTED and will do the wakeup there if necessary - so 2044 * no need to do a wakeup here in the async case. 2045 */ 2046 2047 if (bp->b_flags & B_ASYNC) { 2048 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2049 brelse(bp); 2050 else 2051 bqrelse(bp); 2052 } else { 2053 bp->b_flags &= ~B_WANTED; 2054 wakeup(bp); 2055 } 2056 splx(s); 2057} 2058 2059static int 2060count_lock_queue() 2061{ 2062 int count; 2063 struct buf *bp; 2064 2065 count = 0; 2066 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 2067 bp != NULL; 2068 bp = TAILQ_NEXT(bp, b_freelist)) 2069 count++; 2070 return (count); 2071} 2072 2073#if 0 /* not with kirks code */ 2074static int vfs_update_interval = 30; 2075 2076static void 2077vfs_update() 2078{ 2079 while (1) { 2080 tsleep(&vfs_update_wakeup, PUSER, "update", 2081 hz * vfs_update_interval); 2082 vfs_update_wakeup = 0; 2083 sync(curproc, NULL); 2084 } 2085} 2086 2087static int 2088sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2089{ 2090 int error = sysctl_handle_int(oidp, 2091 oidp->oid_arg1, oidp->oid_arg2, req); 2092 if (!error) 2093 wakeup(&vfs_update_wakeup); 2094 return error; 2095} 2096 2097SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2098 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2099 2100#endif 2101 2102 2103/* 2104 * This routine is called in lieu of iodone in the case of 2105 * incomplete I/O. This keeps the busy status for pages 2106 * consistant. 2107 */ 2108void 2109vfs_unbusy_pages(struct buf * bp) 2110{ 2111 int i, s; 2112 2113 if (bp->b_flags & B_VMIO) { 2114 struct vnode *vp = bp->b_vp; 2115 vm_object_t obj = vp->v_object; 2116 2117 for (i = 0; i < bp->b_npages; i++) { 2118 vm_page_t m = bp->b_pages[i]; 2119 2120 if (m == bogus_page) { 2121 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2122#if !defined(MAX_PERF) 2123 if (!m) { 2124 panic("vfs_unbusy_pages: page missing\n"); 2125 } 2126#endif 2127 bp->b_pages[i] = m; 2128 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2129 } 2130 vm_object_pip_subtract(obj, 1); 2131 vm_page_flag_clear(m, PG_ZERO); 2132 vm_page_io_finish(m); 2133 } 2134 if (obj->paging_in_progress == 0 && 2135 (obj->flags & OBJ_PIPWNT)) { 2136 vm_object_clear_flag(obj, OBJ_PIPWNT); 2137 wakeup(obj); 2138 } 2139 } 2140} 2141 2142/* 2143 * Set NFS' b_validoff and b_validend fields from the valid bits 2144 * of a page. If the consumer is not NFS, and the page is not 2145 * valid for the entire range, clear the B_CACHE flag to force 2146 * the consumer to re-read the page. 2147 */ 2148static void 2149vfs_buf_set_valid(struct buf *bp, 2150 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2151 vm_page_t m) 2152{ 2153 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2154 vm_offset_t svalid, evalid; 2155 int validbits = m->valid; 2156 2157 /* 2158 * This only bothers with the first valid range in the 2159 * page. 2160 */ 2161 svalid = off; 2162 while (validbits && !(validbits & 1)) { 2163 svalid += DEV_BSIZE; 2164 validbits >>= 1; 2165 } 2166 evalid = svalid; 2167 while (validbits & 1) { 2168 evalid += DEV_BSIZE; 2169 validbits >>= 1; 2170 } 2171 /* 2172 * Make sure this range is contiguous with the range 2173 * built up from previous pages. If not, then we will 2174 * just use the range from the previous pages. 2175 */ 2176 if (svalid == bp->b_validend) { 2177 bp->b_validoff = min(bp->b_validoff, svalid); 2178 bp->b_validend = max(bp->b_validend, evalid); 2179 } 2180 } else if (!vm_page_is_valid(m, 2181 (vm_offset_t) ((foff + off) & PAGE_MASK), 2182 size)) { 2183 bp->b_flags &= ~B_CACHE; 2184 } 2185} 2186 2187/* 2188 * Set the valid bits in a page, taking care of the b_validoff, 2189 * b_validend fields which NFS uses to optimise small reads. Off is 2190 * the offset within the file and pageno is the page index within the buf. 2191 */ 2192static void 2193vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2194{ 2195 struct vnode *vp = bp->b_vp; 2196 vm_ooffset_t soff, eoff; 2197 2198 soff = off; 2199 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2200 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2201 vm_ooffset_t sv, ev; 2202 vm_page_set_invalid(m, 2203 (vm_offset_t) (soff & PAGE_MASK), 2204 (vm_offset_t) (eoff - soff)); 2205 off = off - pageno * PAGE_SIZE; 2206 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2207 ev = off + ((bp->b_validend + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2208 soff = qmax(sv, soff); 2209 eoff = qmin(ev, eoff); 2210 } 2211 if (eoff > soff) 2212 vm_page_set_validclean(m, 2213 (vm_offset_t) (soff & PAGE_MASK), 2214 (vm_offset_t) (eoff - soff)); 2215} 2216 2217/* 2218 * This routine is called before a device strategy routine. 2219 * It is used to tell the VM system that paging I/O is in 2220 * progress, and treat the pages associated with the buffer 2221 * almost as being PG_BUSY. Also the object paging_in_progress 2222 * flag is handled to make sure that the object doesn't become 2223 * inconsistant. 2224 */ 2225void 2226vfs_busy_pages(struct buf * bp, int clear_modify) 2227{ 2228 int i, s; 2229 2230 if (bp->b_flags & B_VMIO) { 2231 struct vnode *vp = bp->b_vp; 2232 vm_object_t obj = vp->v_object; 2233 vm_ooffset_t foff; 2234 2235 foff = bp->b_offset; 2236#ifdef DIAGNOSTIC 2237 if (bp->b_offset == NOOFFSET) 2238 panic("vfs_busy_pages: no buffer offset"); 2239#endif 2240 2241 vfs_setdirty(bp); 2242 2243retry: 2244 for (i = 0; i < bp->b_npages; i++) { 2245 vm_page_t m = bp->b_pages[i]; 2246 if (vm_page_sleep(m, "vbpage", NULL)) 2247 goto retry; 2248 } 2249 2250 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2251 vm_page_t m = bp->b_pages[i]; 2252 2253 vm_page_flag_clear(m, PG_ZERO); 2254 if ((bp->b_flags & B_CLUSTER) == 0) { 2255 vm_object_pip_add(obj, 1); 2256 vm_page_io_start(m); 2257 } 2258 2259 vm_page_protect(m, VM_PROT_NONE); 2260 if (clear_modify) 2261 vfs_page_set_valid(bp, foff, i, m); 2262 else if (bp->b_bcount >= PAGE_SIZE) { 2263 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2264 bp->b_pages[i] = bogus_page; 2265 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2266 } 2267 } 2268 } 2269 } 2270} 2271 2272/* 2273 * Tell the VM system that the pages associated with this buffer 2274 * are clean. This is used for delayed writes where the data is 2275 * going to go to disk eventually without additional VM intevention. 2276 */ 2277void 2278vfs_clean_pages(struct buf * bp) 2279{ 2280 int i; 2281 2282 if (bp->b_flags & B_VMIO) { 2283 struct vnode *vp = bp->b_vp; 2284 vm_ooffset_t foff; 2285 foff = bp->b_offset; 2286 2287#ifdef DIAGNOSTIC 2288 if (bp->b_offset == NOOFFSET) 2289 panic("vfs_clean_pages: no buffer offset"); 2290#endif 2291 2292 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2293 vm_page_t m = bp->b_pages[i]; 2294 vfs_page_set_valid(bp, foff, i, m); 2295 } 2296 } 2297} 2298 2299void 2300vfs_bio_clrbuf(struct buf *bp) { 2301 int i; 2302 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2303 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2304 int mask; 2305 mask = 0; 2306 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2307 mask |= (1 << (i/DEV_BSIZE)); 2308 if(((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2309 (bp->b_pages[0]->valid != mask)) { 2310 bzero(bp->b_data, bp->b_bufsize); 2311 } 2312 bp->b_pages[0]->valid = mask; 2313 bp->b_resid = 0; 2314 return; 2315 } 2316 for(i=0;i<bp->b_npages;i++) { 2317 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2318 continue; 2319 if( bp->b_pages[i]->valid == 0) { 2320 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2321 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2322 } 2323 } else { 2324 int j; 2325 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2326 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2327 (bp->b_pages[i]->valid & (1<<j)) == 0) 2328 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2329 } 2330 } 2331 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 2332 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2333 } 2334 bp->b_resid = 0; 2335 } else { 2336 clrbuf(bp); 2337 } 2338} 2339 2340/* 2341 * vm_hold_load_pages and vm_hold_unload pages get pages into 2342 * a buffers address space. The pages are anonymous and are 2343 * not associated with a file object. 2344 */ 2345void 2346vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2347{ 2348 vm_offset_t pg; 2349 vm_page_t p; 2350 int index; 2351 2352 to = round_page(to); 2353 from = round_page(from); 2354 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2355 2356 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2357 2358tryagain: 2359 2360 p = vm_page_alloc(kernel_object, 2361 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2362 VM_ALLOC_NORMAL); 2363 if (!p) { 2364 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2365 VM_WAIT; 2366 goto tryagain; 2367 } 2368 vm_page_wire(p); 2369 p->valid = VM_PAGE_BITS_ALL; 2370 vm_page_flag_clear(p, PG_ZERO); 2371 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2372 bp->b_pages[index] = p; 2373 vm_page_wakeup(p); 2374 } 2375 bp->b_npages = index; 2376} 2377 2378void 2379vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2380{ 2381 vm_offset_t pg; 2382 vm_page_t p; 2383 int index, newnpages; 2384 2385 from = round_page(from); 2386 to = round_page(to); 2387 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2388 2389 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2390 p = bp->b_pages[index]; 2391 if (p && (index < bp->b_npages)) { 2392#if !defined(MAX_PERF) 2393 if (p->busy) { 2394 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2395 bp->b_blkno, bp->b_lblkno); 2396 } 2397#endif 2398 bp->b_pages[index] = NULL; 2399 pmap_kremove(pg); 2400 vm_page_busy(p); 2401 vm_page_unwire(p); 2402 vm_page_free(p); 2403 } 2404 } 2405 bp->b_npages = newnpages; 2406} 2407 2408 2409#include "opt_ddb.h" 2410#ifdef DDB 2411#include <ddb/ddb.h> 2412 2413DB_SHOW_COMMAND(buffer, db_show_buffer) 2414{ 2415 /* get args */ 2416 struct buf *bp = (struct buf *)addr; 2417 2418 if (!have_addr) { 2419 db_printf("usage: show buffer <addr>\n"); 2420 return; 2421 } 2422 2423 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2424 (u_int)bp->b_flags, PRINT_BUF_FLAGS); 2425 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2426 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2427 "b_blkno = %d, b_pblkno = %d\n", 2428 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2429 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2430 if (bp->b_npages) { 2431 int i; 2432 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2433 for (i = 0; i < bp->b_npages; i++) { 2434 vm_page_t m; 2435 m = bp->b_pages[i]; 2436 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 2437 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 2438 if ((i + 1) < bp->b_npages) 2439 db_printf(","); 2440 } 2441 db_printf("\n"); 2442 } 2443} 2444#endif /* DDB */ 2445