vfs_bio.c revision 31561
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.137 1997/12/02 21:06:45 phk Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/lock.h> 43#include <sys/sysctl.h> 44#include <sys/proc.h> 45#include <sys/vnode.h> 46#include <sys/vmmeter.h> 47#include <vm/vm.h> 48#include <vm/vm_param.h> 49#include <vm/vm_prot.h> 50#include <vm/vm_kern.h> 51#include <vm/vm_pageout.h> 52#include <vm/vm_page.h> 53#include <vm/vm_object.h> 54#include <vm/vm_extern.h> 55#include <vm/vm_map.h> 56#include <sys/buf.h> 57#include <sys/mount.h> 58#include <sys/malloc.h> 59#include <sys/resourcevar.h> 60 61static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 62 63static void vfs_update __P((void)); 64static struct proc *updateproc; 65static struct kproc_desc up_kp = { 66 "update", 67 vfs_update, 68 &updateproc 69}; 70SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 71 72struct buf *buf; /* buffer header pool */ 73struct swqueue bswlist; 74 75int count_lock_queue __P((void)); 76static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 77 vm_offset_t to); 78static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 79 vm_offset_t to); 80static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 81 vm_offset_t off, vm_offset_t size, 82 vm_page_t m); 83static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 84 int pageno, vm_page_t m); 85static void vfs_clean_pages(struct buf * bp); 86static void vfs_setdirty(struct buf *bp); 87static void vfs_vmio_release(struct buf *bp); 88static void flushdirtybuffers(int slpflag, int slptimeo); 89 90int needsbuffer; 91 92/* 93 * Internal update daemon, process 3 94 * The variable vfs_update_wakeup allows for internal syncs. 95 */ 96int vfs_update_wakeup; 97 98 99/* 100 * buffers base kva 101 */ 102 103/* 104 * bogus page -- for I/O to/from partially complete buffers 105 * this is a temporary solution to the problem, but it is not 106 * really that bad. it would be better to split the buffer 107 * for input in the case of buffers partially already in memory, 108 * but the code is intricate enough already. 109 */ 110vm_page_t bogus_page; 111static vm_offset_t bogus_offset; 112 113static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 114 bufmallocspace, maxbufmallocspace; 115int numdirtybuffers, lodirtybuffers, hidirtybuffers; 116static int numfreebuffers, lofreebuffers, hifreebuffers; 117 118SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 119 &numdirtybuffers, 0, ""); 120SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 121 &lodirtybuffers, 0, ""); 122SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 123 &hidirtybuffers, 0, ""); 124SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 125 &numfreebuffers, 0, ""); 126SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 127 &lofreebuffers, 0, ""); 128SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 129 &hifreebuffers, 0, ""); 130SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 131 &maxbufspace, 0, ""); 132SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 133 &bufspace, 0, ""); 134SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 135 &maxvmiobufspace, 0, ""); 136SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 137 &vmiospace, 0, ""); 138SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 139 &maxbufmallocspace, 0, ""); 140SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 141 &bufmallocspace, 0, ""); 142 143static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 144static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 145 146extern int vm_swap_size; 147 148#define BUF_MAXUSE 24 149 150#define VFS_BIO_NEED_ANY 1 151#define VFS_BIO_NEED_LOWLIMIT 2 152#define VFS_BIO_NEED_FREE 4 153 154/* 155 * Initialize buffer headers and related structures. 156 */ 157void 158bufinit() 159{ 160 struct buf *bp; 161 int i; 162 163 TAILQ_INIT(&bswlist); 164 LIST_INIT(&invalhash); 165 166 /* first, make a null hash table */ 167 for (i = 0; i < BUFHSZ; i++) 168 LIST_INIT(&bufhashtbl[i]); 169 170 /* next, make a null set of free lists */ 171 for (i = 0; i < BUFFER_QUEUES; i++) 172 TAILQ_INIT(&bufqueues[i]); 173 174 /* finally, initialize each buffer header and stick on empty q */ 175 for (i = 0; i < nbuf; i++) { 176 bp = &buf[i]; 177 bzero(bp, sizeof *bp); 178 bp->b_flags = B_INVAL; /* we're just an empty header */ 179 bp->b_dev = NODEV; 180 bp->b_rcred = NOCRED; 181 bp->b_wcred = NOCRED; 182 bp->b_qindex = QUEUE_EMPTY; 183 bp->b_vnbufs.le_next = NOLIST; 184 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 185 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 186 } 187/* 188 * maxbufspace is currently calculated to support all filesystem blocks 189 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 190 * cache is still the same as it would be for 8K filesystems. This 191 * keeps the size of the buffer cache "in check" for big block filesystems. 192 */ 193 maxbufspace = (nbuf + 8) * DFLTBSIZE; 194/* 195 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 196 */ 197 maxvmiobufspace = 2 * maxbufspace / 3; 198/* 199 * Limit the amount of malloc memory since it is wired permanently into 200 * the kernel space. Even though this is accounted for in the buffer 201 * allocation, we don't want the malloced region to grow uncontrolled. 202 * The malloc scheme improves memory utilization significantly on average 203 * (small) directories. 204 */ 205 maxbufmallocspace = maxbufspace / 20; 206 207/* 208 * Remove the probability of deadlock conditions by limiting the 209 * number of dirty buffers. 210 */ 211 hidirtybuffers = nbuf / 6 + 20; 212 lodirtybuffers = nbuf / 12 + 10; 213 numdirtybuffers = 0; 214 lofreebuffers = nbuf / 18 + 5; 215 hifreebuffers = 2 * lofreebuffers; 216 numfreebuffers = nbuf; 217 218 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 219 bogus_page = vm_page_alloc(kernel_object, 220 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 221 VM_ALLOC_NORMAL); 222 223} 224 225/* 226 * Free the kva allocation for a buffer 227 * Must be called only at splbio or higher, 228 * as this is the only locking for buffer_map. 229 */ 230static void 231bfreekva(struct buf * bp) 232{ 233 if (bp->b_kvasize == 0) 234 return; 235 236 vm_map_delete(buffer_map, 237 (vm_offset_t) bp->b_kvabase, 238 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 239 240 bp->b_kvasize = 0; 241 242} 243 244/* 245 * remove the buffer from the appropriate free list 246 */ 247void 248bremfree(struct buf * bp) 249{ 250 int s = splbio(); 251 252 if (bp->b_qindex != QUEUE_NONE) { 253 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 254 bp->b_qindex = QUEUE_NONE; 255 } else { 256#if !defined(MAX_PERF) 257 panic("bremfree: removing a buffer when not on a queue"); 258#endif 259 } 260 if ((bp->b_flags & B_INVAL) || 261 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 262 --numfreebuffers; 263 splx(s); 264} 265 266/* 267 * Get a buffer with the specified data. Look in the cache first. 268 */ 269int 270bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 271 struct buf ** bpp) 272{ 273 struct buf *bp; 274 275 bp = getblk(vp, blkno, size, 0, 0); 276 *bpp = bp; 277 278 /* if not found in cache, do some I/O */ 279 if ((bp->b_flags & B_CACHE) == 0) { 280 if (curproc != NULL) 281 curproc->p_stats->p_ru.ru_inblock++; 282 bp->b_flags |= B_READ; 283 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 284 if (bp->b_rcred == NOCRED) { 285 if (cred != NOCRED) 286 crhold(cred); 287 bp->b_rcred = cred; 288 } 289 vfs_busy_pages(bp, 0); 290 VOP_STRATEGY(bp); 291 return (biowait(bp)); 292 } 293 return (0); 294} 295 296/* 297 * Operates like bread, but also starts asynchronous I/O on 298 * read-ahead blocks. 299 */ 300int 301breadn(struct vnode * vp, daddr_t blkno, int size, 302 daddr_t * rablkno, int *rabsize, 303 int cnt, struct ucred * cred, struct buf ** bpp) 304{ 305 struct buf *bp, *rabp; 306 int i; 307 int rv = 0, readwait = 0; 308 309 *bpp = bp = getblk(vp, blkno, size, 0, 0); 310 311 /* if not found in cache, do some I/O */ 312 if ((bp->b_flags & B_CACHE) == 0) { 313 if (curproc != NULL) 314 curproc->p_stats->p_ru.ru_inblock++; 315 bp->b_flags |= B_READ; 316 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 317 if (bp->b_rcred == NOCRED) { 318 if (cred != NOCRED) 319 crhold(cred); 320 bp->b_rcred = cred; 321 } 322 vfs_busy_pages(bp, 0); 323 VOP_STRATEGY(bp); 324 ++readwait; 325 } 326 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 327 if (inmem(vp, *rablkno)) 328 continue; 329 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 330 331 if ((rabp->b_flags & B_CACHE) == 0) { 332 if (curproc != NULL) 333 curproc->p_stats->p_ru.ru_inblock++; 334 rabp->b_flags |= B_READ | B_ASYNC; 335 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 336 if (rabp->b_rcred == NOCRED) { 337 if (cred != NOCRED) 338 crhold(cred); 339 rabp->b_rcred = cred; 340 } 341 vfs_busy_pages(rabp, 0); 342 VOP_STRATEGY(rabp); 343 } else { 344 brelse(rabp); 345 } 346 } 347 348 if (readwait) { 349 rv = biowait(bp); 350 } 351 return (rv); 352} 353 354/* 355 * Write, release buffer on completion. (Done by iodone 356 * if async.) 357 */ 358int 359bwrite(struct buf * bp) 360{ 361 int oldflags = bp->b_flags; 362 363 if (bp->b_flags & B_INVAL) { 364 brelse(bp); 365 return (0); 366 } 367#if !defined(MAX_PERF) 368 if (!(bp->b_flags & B_BUSY)) 369 panic("bwrite: buffer is not busy???"); 370#endif 371 372 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 373 bp->b_flags |= B_WRITEINPROG; 374 375 if ((oldflags & B_DELWRI) == B_DELWRI) { 376 --numdirtybuffers; 377 reassignbuf(bp, bp->b_vp); 378 } 379 380 bp->b_vp->v_numoutput++; 381 vfs_busy_pages(bp, 1); 382 if (curproc != NULL) 383 curproc->p_stats->p_ru.ru_oublock++; 384 VOP_STRATEGY(bp); 385 386 if ((oldflags & B_ASYNC) == 0) { 387 int rtval = biowait(bp); 388 389 if (oldflags & B_DELWRI) { 390 reassignbuf(bp, bp->b_vp); 391 } 392 brelse(bp); 393 return (rtval); 394 } 395 return (0); 396} 397 398void 399vfs_bio_need_satisfy(void) { 400 ++numfreebuffers; 401 if (!needsbuffer) 402 return; 403 if (numdirtybuffers < lodirtybuffers) { 404 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 405 } else { 406 needsbuffer &= ~VFS_BIO_NEED_ANY; 407 } 408 if (numfreebuffers >= hifreebuffers) { 409 needsbuffer &= ~VFS_BIO_NEED_FREE; 410 } 411 wakeup(&needsbuffer); 412} 413 414/* 415 * Delayed write. (Buffer is marked dirty). 416 */ 417void 418bdwrite(struct buf * bp) 419{ 420 421#if !defined(MAX_PERF) 422 if ((bp->b_flags & B_BUSY) == 0) { 423 panic("bdwrite: buffer is not busy"); 424 } 425#endif 426 427 if (bp->b_flags & B_INVAL) { 428 brelse(bp); 429 return; 430 } 431 if (bp->b_flags & B_TAPE) { 432 bawrite(bp); 433 return; 434 } 435 bp->b_flags &= ~(B_READ|B_RELBUF); 436 if ((bp->b_flags & B_DELWRI) == 0) { 437 bp->b_flags |= B_DONE | B_DELWRI; 438 reassignbuf(bp, bp->b_vp); 439 ++numdirtybuffers; 440 } 441 442 /* 443 * This bmap keeps the system from needing to do the bmap later, 444 * perhaps when the system is attempting to do a sync. Since it 445 * is likely that the indirect block -- or whatever other datastructure 446 * that the filesystem needs is still in memory now, it is a good 447 * thing to do this. Note also, that if the pageout daemon is 448 * requesting a sync -- there might not be enough memory to do 449 * the bmap then... So, this is important to do. 450 */ 451 if (bp->b_lblkno == bp->b_blkno) { 452 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 453 } 454 455 /* 456 * Set the *dirty* buffer range based upon the VM system dirty pages. 457 */ 458 vfs_setdirty(bp); 459 460 /* 461 * We need to do this here to satisfy the vnode_pager and the 462 * pageout daemon, so that it thinks that the pages have been 463 * "cleaned". Note that since the pages are in a delayed write 464 * buffer -- the VFS layer "will" see that the pages get written 465 * out on the next sync, or perhaps the cluster will be completed. 466 */ 467 vfs_clean_pages(bp); 468 bqrelse(bp); 469 470 if (numdirtybuffers >= hidirtybuffers) 471 flushdirtybuffers(0, 0); 472 473 return; 474} 475 476/* 477 * Asynchronous write. 478 * Start output on a buffer, but do not wait for it to complete. 479 * The buffer is released when the output completes. 480 */ 481void 482bawrite(struct buf * bp) 483{ 484 bp->b_flags |= B_ASYNC; 485 (void) VOP_BWRITE(bp); 486} 487 488/* 489 * Ordered write. 490 * Start output on a buffer, but only wait for it to complete if the 491 * output device cannot guarantee ordering in some other way. Devices 492 * that can perform asynchronous ordered writes will set the B_ASYNC 493 * flag in their strategy routine. 494 * The buffer is released when the output completes. 495 */ 496int 497bowrite(struct buf * bp) 498{ 499 /* 500 * XXX Add in B_ASYNC once the SCSI 501 * layer can deal with ordered 502 * writes properly. 503 */ 504 bp->b_flags |= B_ORDERED; 505 return (VOP_BWRITE(bp)); 506} 507 508/* 509 * Release a buffer. 510 */ 511void 512brelse(struct buf * bp) 513{ 514 int s; 515 516 if (bp->b_flags & B_CLUSTER) { 517 relpbuf(bp); 518 return; 519 } 520 /* anyone need a "free" block? */ 521 s = splbio(); 522 523 /* anyone need this block? */ 524 if (bp->b_flags & B_WANTED) { 525 bp->b_flags &= ~(B_WANTED | B_AGE); 526 wakeup(bp); 527 } 528 529 if (bp->b_flags & B_LOCKED) 530 bp->b_flags &= ~B_ERROR; 531 532 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 533 (bp->b_bufsize <= 0)) { 534 bp->b_flags |= B_INVAL; 535 if (bp->b_flags & B_DELWRI) 536 --numdirtybuffers; 537 bp->b_flags &= ~(B_DELWRI | B_CACHE); 538 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 539 if (bp->b_bufsize) 540 allocbuf(bp, 0); 541 brelvp(bp); 542 } 543 } 544 545 /* 546 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 547 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 548 * but the VM object is kept around. The B_NOCACHE flag is used to 549 * invalidate the pages in the VM object. 550 * 551 * If the buffer is a partially filled NFS buffer, keep it 552 * since invalidating it now will lose informatio. The valid 553 * flags in the vm_pages have only DEV_BSIZE resolution but 554 * the b_validoff, b_validend fields have byte resolution. 555 * This can avoid unnecessary re-reads of the buffer. 556 * XXX this seems to cause performance problems. 557 */ 558 if ((bp->b_flags & B_VMIO) 559 && !(bp->b_vp->v_tag == VT_NFS && 560 bp->b_vp->v_type != VBLK && 561 (bp->b_flags & B_DELWRI) != 0) 562#ifdef notdef 563 && (bp->b_vp->v_tag != VT_NFS 564 || bp->b_vp->v_type == VBLK 565 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 566 || bp->b_validend == 0 567 || (bp->b_validoff == 0 568 && bp->b_validend == bp->b_bufsize)) 569#endif 570 ) { 571 vm_ooffset_t foff; 572 vm_object_t obj; 573 int i, resid; 574 vm_page_t m; 575 struct vnode *vp; 576 int iototal = bp->b_bufsize; 577 578 vp = bp->b_vp; 579 580#if !defined(MAX_PERF) 581 if (!vp) 582 panic("brelse: missing vp"); 583#endif 584 585 if (bp->b_npages) { 586 vm_pindex_t poff; 587 obj = (vm_object_t) vp->v_object; 588 if (vp->v_type == VBLK) 589 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 590 else 591 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 592 poff = OFF_TO_IDX(foff); 593 for (i = 0; i < bp->b_npages; i++) { 594 m = bp->b_pages[i]; 595 if (m == bogus_page) { 596 m = vm_page_lookup(obj, poff + i); 597#if !defined(MAX_PERF) 598 if (!m) { 599 panic("brelse: page missing\n"); 600 } 601#endif 602 bp->b_pages[i] = m; 603 pmap_qenter(trunc_page(bp->b_data), 604 bp->b_pages, bp->b_npages); 605 } 606 resid = IDX_TO_OFF(m->pindex+1) - foff; 607 if (resid > iototal) 608 resid = iototal; 609 if (resid > 0) { 610 /* 611 * Don't invalidate the page if the local machine has already 612 * modified it. This is the lesser of two evils, and should 613 * be fixed. 614 */ 615 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 616 vm_page_test_dirty(m); 617 if (m->dirty == 0) { 618 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 619 if (m->valid == 0) 620 vm_page_protect(m, VM_PROT_NONE); 621 } 622 } 623 if (resid >= PAGE_SIZE) { 624 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 625 bp->b_flags |= B_INVAL; 626 } 627 } else { 628 if (!vm_page_is_valid(m, 629 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 630 bp->b_flags |= B_INVAL; 631 } 632 } 633 } 634 foff += resid; 635 iototal -= resid; 636 } 637 } 638 if (bp->b_flags & (B_INVAL | B_RELBUF)) 639 vfs_vmio_release(bp); 640 } 641#if !defined(MAX_PERF) 642 if (bp->b_qindex != QUEUE_NONE) 643 panic("brelse: free buffer onto another queue???"); 644#endif 645 646 /* enqueue */ 647 /* buffers with no memory */ 648 if (bp->b_bufsize == 0) { 649 bp->b_flags |= B_INVAL; 650 bp->b_qindex = QUEUE_EMPTY; 651 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 652 LIST_REMOVE(bp, b_hash); 653 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 654 bp->b_dev = NODEV; 655 656 /* buffers with junk contents */ 657 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 658 bp->b_flags |= B_INVAL; 659 bp->b_qindex = QUEUE_AGE; 660 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 661 LIST_REMOVE(bp, b_hash); 662 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 663 bp->b_dev = NODEV; 664 665 /* buffers that are locked */ 666 } else if (bp->b_flags & B_LOCKED) { 667 bp->b_qindex = QUEUE_LOCKED; 668 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 669 670 /* buffers with stale but valid contents */ 671 } else if (bp->b_flags & B_AGE) { 672 bp->b_qindex = QUEUE_AGE; 673 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 674 675 /* buffers with valid and quite potentially reuseable contents */ 676 } else { 677 bp->b_qindex = QUEUE_LRU; 678 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 679 } 680 681 if ((bp->b_flags & B_INVAL) || 682 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 683 if (bp->b_flags & B_DELWRI) { 684 --numdirtybuffers; 685 bp->b_flags &= ~B_DELWRI; 686 } 687 vfs_bio_need_satisfy(); 688 } 689 690 /* unlock */ 691 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 692 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 693 splx(s); 694} 695 696/* 697 * Release a buffer. 698 */ 699void 700bqrelse(struct buf * bp) 701{ 702 int s; 703 704 s = splbio(); 705 706 /* anyone need this block? */ 707 if (bp->b_flags & B_WANTED) { 708 bp->b_flags &= ~(B_WANTED | B_AGE); 709 wakeup(bp); 710 } 711 712#if !defined(MAX_PERF) 713 if (bp->b_qindex != QUEUE_NONE) 714 panic("bqrelse: free buffer onto another queue???"); 715#endif 716 717 if (bp->b_flags & B_LOCKED) { 718 bp->b_flags &= ~B_ERROR; 719 bp->b_qindex = QUEUE_LOCKED; 720 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 721 /* buffers with stale but valid contents */ 722 } else { 723 bp->b_qindex = QUEUE_LRU; 724 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 725 } 726 727 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 728 vfs_bio_need_satisfy(); 729 } 730 731 /* unlock */ 732 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 733 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 734 splx(s); 735} 736 737static void 738vfs_vmio_release(bp) 739 struct buf *bp; 740{ 741 int i; 742 vm_page_t m; 743 744 for (i = 0; i < bp->b_npages; i++) { 745 m = bp->b_pages[i]; 746 bp->b_pages[i] = NULL; 747 vm_page_unwire(m); 748 /* 749 * We don't mess with busy pages, it is 750 * the responsibility of the process that 751 * busied the pages to deal with them. 752 */ 753 if ((m->flags & PG_BUSY) || (m->busy != 0)) 754 continue; 755 756 if (m->wire_count == 0) { 757 758 if (m->flags & PG_WANTED) { 759 m->flags &= ~PG_WANTED; 760 wakeup(m); 761 } 762 763 /* 764 * If this is an async free -- we cannot place 765 * pages onto the cache queue. If it is an 766 * async free, then we don't modify any queues. 767 * This is probably in error (for perf reasons), 768 * and we will eventually need to build 769 * a more complete infrastructure to support I/O 770 * rundown. 771 */ 772 if ((bp->b_flags & B_ASYNC) == 0) { 773 774 /* 775 * In the case of sync buffer frees, we can do pretty much 776 * anything to any of the memory queues. Specifically, 777 * the cache queue is okay to be modified. 778 */ 779 if (m->valid) { 780 if(m->dirty == 0) 781 vm_page_test_dirty(m); 782 /* 783 * this keeps pressure off of the process memory 784 */ 785 if (m->dirty == 0 && m->hold_count == 0) 786 vm_page_cache(m); 787 else 788 vm_page_deactivate(m); 789 } else if (m->hold_count == 0) { 790 vm_page_protect(m, VM_PROT_NONE); 791 vm_page_free(m); 792 } 793 } else { 794 /* 795 * If async, then at least we clear the 796 * act_count. 797 */ 798 m->act_count = 0; 799 } 800 } 801 } 802 bufspace -= bp->b_bufsize; 803 vmiospace -= bp->b_bufsize; 804 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 805 bp->b_npages = 0; 806 bp->b_bufsize = 0; 807 bp->b_flags &= ~B_VMIO; 808 if (bp->b_vp) 809 brelvp(bp); 810} 811 812/* 813 * Check to see if a block is currently memory resident. 814 */ 815struct buf * 816gbincore(struct vnode * vp, daddr_t blkno) 817{ 818 struct buf *bp; 819 struct bufhashhdr *bh; 820 821 bh = BUFHASH(vp, blkno); 822 bp = bh->lh_first; 823 824 /* Search hash chain */ 825 while (bp != NULL) { 826 /* hit */ 827 if (bp->b_vp == vp && bp->b_lblkno == blkno && 828 (bp->b_flags & B_INVAL) == 0) { 829 break; 830 } 831 bp = bp->b_hash.le_next; 832 } 833 return (bp); 834} 835 836/* 837 * this routine implements clustered async writes for 838 * clearing out B_DELWRI buffers... This is much better 839 * than the old way of writing only one buffer at a time. 840 */ 841int 842vfs_bio_awrite(struct buf * bp) 843{ 844 int i; 845 daddr_t lblkno = bp->b_lblkno; 846 struct vnode *vp = bp->b_vp; 847 int s; 848 int ncl; 849 struct buf *bpa; 850 int nwritten; 851 852 s = splbio(); 853 /* 854 * right now we support clustered writing only to regular files 855 */ 856 if ((vp->v_type == VREG) && 857 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 858 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 859 int size; 860 int maxcl; 861 862 size = vp->v_mount->mnt_stat.f_iosize; 863 maxcl = MAXPHYS / size; 864 865 for (i = 1; i < maxcl; i++) { 866 if ((bpa = gbincore(vp, lblkno + i)) && 867 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 868 (B_DELWRI | B_CLUSTEROK)) && 869 (bpa->b_bufsize == size)) { 870 if ((bpa->b_blkno == bpa->b_lblkno) || 871 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 872 break; 873 } else { 874 break; 875 } 876 } 877 ncl = i; 878 /* 879 * this is a possible cluster write 880 */ 881 if (ncl != 1) { 882 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 883 splx(s); 884 return nwritten; 885 } 886 } 887 bremfree(bp); 888 splx(s); 889 /* 890 * default (old) behavior, writing out only one block 891 */ 892 bp->b_flags |= B_BUSY | B_ASYNC; 893 nwritten = bp->b_bufsize; 894 (void) VOP_BWRITE(bp); 895 return nwritten; 896} 897 898 899/* 900 * Find a buffer header which is available for use. 901 */ 902static struct buf * 903getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize) 904{ 905 struct buf *bp, *bp1; 906 int nbyteswritten = 0; 907 vm_offset_t addr; 908 static int writerecursion = 0; 909 910start: 911 if (bufspace >= maxbufspace) 912 goto trytofreespace; 913 914 /* can we constitute a new buffer? */ 915 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 916#if !defined(MAX_PERF) 917 if (bp->b_qindex != QUEUE_EMPTY) 918 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 919 bp->b_qindex); 920#endif 921 bp->b_flags |= B_BUSY; 922 bremfree(bp); 923 goto fillbuf; 924 } 925trytofreespace: 926 /* 927 * We keep the file I/O from hogging metadata I/O 928 * This is desirable because file data is cached in the 929 * VM/Buffer cache even if a buffer is freed. 930 */ 931 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 932#if !defined(MAX_PERF) 933 if (bp->b_qindex != QUEUE_AGE) 934 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 935 bp->b_qindex); 936#endif 937 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 938#if !defined(MAX_PERF) 939 if (bp->b_qindex != QUEUE_LRU) 940 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 941 bp->b_qindex); 942#endif 943 } 944 if (!bp) { 945 /* wait for a free buffer of any kind */ 946 needsbuffer |= VFS_BIO_NEED_ANY; 947 do 948 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 949 slptimeo); 950 while (needsbuffer & VFS_BIO_NEED_ANY); 951 return (0); 952 } 953 954#if defined(DIAGNOSTIC) 955 if (bp->b_flags & B_BUSY) { 956 panic("getnewbuf: busy buffer on free list\n"); 957 } 958#endif 959 960 /* 961 * We are fairly aggressive about freeing VMIO buffers, but since 962 * the buffering is intact without buffer headers, there is not 963 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 964 */ 965 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 966 if ((bp->b_flags & B_VMIO) == 0 || 967 (vmiospace < maxvmiobufspace)) { 968 --bp->b_usecount; 969 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 970 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 971 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 972 goto start; 973 } 974 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 975 } 976 } 977 978 979 /* if we are a delayed write, convert to an async write */ 980 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 981 982 if (writerecursion > 0) { 983 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 984 while (bp) { 985 if ((bp->b_flags & B_DELWRI) == 0) 986 break; 987 bp = TAILQ_NEXT(bp, b_freelist); 988 } 989 if (bp == NULL) { 990 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 991 while (bp) { 992 if ((bp->b_flags & B_DELWRI) == 0) 993 break; 994 bp = TAILQ_NEXT(bp, b_freelist); 995 } 996 } 997 if (bp == NULL) 998 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 999 } else { 1000 ++writerecursion; 1001 nbyteswritten += vfs_bio_awrite(bp); 1002 --writerecursion; 1003 if (!slpflag && !slptimeo) { 1004 return (0); 1005 } 1006 goto start; 1007 } 1008 } 1009 1010 if (bp->b_flags & B_WANTED) { 1011 bp->b_flags &= ~B_WANTED; 1012 wakeup(bp); 1013 } 1014 bremfree(bp); 1015 bp->b_flags |= B_BUSY; 1016 1017 if (bp->b_flags & B_VMIO) { 1018 bp->b_flags &= ~B_ASYNC; 1019 vfs_vmio_release(bp); 1020 } 1021 1022 if (bp->b_vp) 1023 brelvp(bp); 1024 1025fillbuf: 1026 /* we are not free, nor do we contain interesting data */ 1027 if (bp->b_rcred != NOCRED) { 1028 crfree(bp->b_rcred); 1029 bp->b_rcred = NOCRED; 1030 } 1031 if (bp->b_wcred != NOCRED) { 1032 crfree(bp->b_wcred); 1033 bp->b_wcred = NOCRED; 1034 } 1035 1036 LIST_REMOVE(bp, b_hash); 1037 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1038 if (bp->b_bufsize) { 1039 allocbuf(bp, 0); 1040 } 1041 bp->b_flags = B_BUSY; 1042 bp->b_dev = NODEV; 1043 bp->b_vp = NULL; 1044 bp->b_blkno = bp->b_lblkno = 0; 1045 bp->b_iodone = 0; 1046 bp->b_error = 0; 1047 bp->b_resid = 0; 1048 bp->b_bcount = 0; 1049 bp->b_npages = 0; 1050 bp->b_dirtyoff = bp->b_dirtyend = 0; 1051 bp->b_validoff = bp->b_validend = 0; 1052 bp->b_usecount = 4; 1053 1054 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1055 1056 /* 1057 * we assume that buffer_map is not at address 0 1058 */ 1059 addr = 0; 1060 if (maxsize != bp->b_kvasize) { 1061 bfreekva(bp); 1062 1063findkvaspace: 1064 /* 1065 * See if we have buffer kva space 1066 */ 1067 if (vm_map_findspace(buffer_map, 1068 vm_map_min(buffer_map), maxsize, &addr)) { 1069 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1070 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) 1071 if (bp1->b_kvasize != 0) { 1072 bremfree(bp1); 1073 bfreekva(bp1); 1074 brelse(bp1); 1075 goto findkvaspace; 1076 } 1077 bp->b_flags |= B_INVAL; 1078 brelse(bp); 1079 goto trytofreespace; 1080 } 1081 } 1082 1083 /* 1084 * See if we are below are allocated minimum 1085 */ 1086 if (bufspace >= (maxbufspace + nbyteswritten)) { 1087 bp->b_flags |= B_INVAL; 1088 brelse(bp); 1089 goto trytofreespace; 1090 } 1091 1092 /* 1093 * create a map entry for the buffer -- in essence 1094 * reserving the kva space. 1095 */ 1096 if (addr) { 1097 vm_map_insert(buffer_map, NULL, 0, 1098 addr, addr + maxsize, 1099 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1100 1101 bp->b_kvabase = (caddr_t) addr; 1102 bp->b_kvasize = maxsize; 1103 } 1104 bp->b_data = bp->b_kvabase; 1105 1106 return (bp); 1107} 1108 1109static void 1110waitfreebuffers(int slpflag, int slptimeo) { 1111 while (numfreebuffers < hifreebuffers) { 1112 flushdirtybuffers(slpflag, slptimeo); 1113 if (numfreebuffers < hifreebuffers) 1114 break; 1115 needsbuffer |= VFS_BIO_NEED_FREE; 1116 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo)) 1117 break; 1118 } 1119} 1120 1121static void 1122flushdirtybuffers(int slpflag, int slptimeo) { 1123 int s; 1124 static pid_t flushing = 0; 1125 1126 s = splbio(); 1127 1128 if (flushing) { 1129 if (flushing == curproc->p_pid) { 1130 splx(s); 1131 return; 1132 } 1133 while (flushing) { 1134 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) { 1135 splx(s); 1136 return; 1137 } 1138 } 1139 } 1140 flushing = curproc->p_pid; 1141 1142 while (numdirtybuffers > lodirtybuffers) { 1143 struct buf *bp; 1144 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1145 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1146 if (bp == NULL) 1147 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1148 1149 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1150 bp = TAILQ_NEXT(bp, b_freelist); 1151 } 1152 1153 if (bp) { 1154 splx(s); 1155 vfs_bio_awrite(bp); 1156 s = splbio(); 1157 continue; 1158 } 1159 break; 1160 } 1161 1162 flushing = 0; 1163 wakeup(&flushing); 1164 splx(s); 1165} 1166 1167/* 1168 * Check to see if a block is currently memory resident. 1169 */ 1170struct buf * 1171incore(struct vnode * vp, daddr_t blkno) 1172{ 1173 struct buf *bp; 1174 1175 int s = splbio(); 1176 bp = gbincore(vp, blkno); 1177 splx(s); 1178 return (bp); 1179} 1180 1181/* 1182 * Returns true if no I/O is needed to access the 1183 * associated VM object. This is like incore except 1184 * it also hunts around in the VM system for the data. 1185 */ 1186 1187int 1188inmem(struct vnode * vp, daddr_t blkno) 1189{ 1190 vm_object_t obj; 1191 vm_offset_t toff, tinc; 1192 vm_page_t m; 1193 vm_ooffset_t off; 1194 1195 if (incore(vp, blkno)) 1196 return 1; 1197 if (vp->v_mount == NULL) 1198 return 0; 1199 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 1200 return 0; 1201 1202 obj = vp->v_object; 1203 tinc = PAGE_SIZE; 1204 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1205 tinc = vp->v_mount->mnt_stat.f_iosize; 1206 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1207 1208 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1209 1210 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1211 if (!m) 1212 return 0; 1213 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1214 return 0; 1215 } 1216 return 1; 1217} 1218 1219/* 1220 * now we set the dirty range for the buffer -- 1221 * for NFS -- if the file is mapped and pages have 1222 * been written to, let it know. We want the 1223 * entire range of the buffer to be marked dirty if 1224 * any of the pages have been written to for consistancy 1225 * with the b_validoff, b_validend set in the nfs write 1226 * code, and used by the nfs read code. 1227 */ 1228static void 1229vfs_setdirty(struct buf *bp) { 1230 int i; 1231 vm_object_t object; 1232 vm_offset_t boffset, offset; 1233 /* 1234 * We qualify the scan for modified pages on whether the 1235 * object has been flushed yet. The OBJ_WRITEABLE flag 1236 * is not cleared simply by protecting pages off. 1237 */ 1238 if ((bp->b_flags & B_VMIO) && 1239 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1240 /* 1241 * test the pages to see if they have been modified directly 1242 * by users through the VM system. 1243 */ 1244 for (i = 0; i < bp->b_npages; i++) 1245 vm_page_test_dirty(bp->b_pages[i]); 1246 1247 /* 1248 * scan forwards for the first page modified 1249 */ 1250 for (i = 0; i < bp->b_npages; i++) { 1251 if (bp->b_pages[i]->dirty) { 1252 break; 1253 } 1254 } 1255 boffset = (i << PAGE_SHIFT); 1256 if (boffset < bp->b_dirtyoff) { 1257 bp->b_dirtyoff = boffset; 1258 } 1259 1260 /* 1261 * scan backwards for the last page modified 1262 */ 1263 for (i = bp->b_npages - 1; i >= 0; --i) { 1264 if (bp->b_pages[i]->dirty) { 1265 break; 1266 } 1267 } 1268 boffset = (i + 1); 1269 offset = boffset + bp->b_pages[0]->pindex; 1270 if (offset >= object->size) 1271 boffset = object->size - bp->b_pages[0]->pindex; 1272 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1273 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1274 } 1275} 1276 1277/* 1278 * Get a block given a specified block and offset into a file/device. 1279 */ 1280struct buf * 1281getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1282{ 1283 struct buf *bp; 1284 int s; 1285 struct bufhashhdr *bh; 1286 int maxsize; 1287 1288 if (vp->v_mount) { 1289 maxsize = vp->v_mount->mnt_stat.f_iosize; 1290 /* 1291 * This happens on mount points. 1292 */ 1293 if (maxsize < size) 1294 maxsize = size; 1295 } else { 1296 maxsize = size; 1297 } 1298 1299#if !defined(MAX_PERF) 1300 if (size > MAXBSIZE) 1301 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1302#endif 1303 1304 s = splbio(); 1305loop: 1306 if (numfreebuffers < lofreebuffers) { 1307 waitfreebuffers(slpflag, slptimeo); 1308 } 1309 1310 if ((bp = gbincore(vp, blkno))) { 1311 if (bp->b_flags & B_BUSY) { 1312 bp->b_flags |= B_WANTED; 1313 if (bp->b_usecount < BUF_MAXUSE) 1314 ++bp->b_usecount; 1315 if (!tsleep(bp, 1316 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1317 goto loop; 1318 1319 splx(s); 1320 return (struct buf *) NULL; 1321 } 1322 bp->b_flags |= B_BUSY | B_CACHE; 1323 bremfree(bp); 1324 1325 /* 1326 * check for size inconsistancies (note that they shouldn't 1327 * happen but do when filesystems don't handle the size changes 1328 * correctly.) We are conservative on metadata and don't just 1329 * extend the buffer but write and re-constitute it. 1330 */ 1331 1332 if (bp->b_bcount != size) { 1333 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1334 allocbuf(bp, size); 1335 } else { 1336 bp->b_flags |= B_NOCACHE; 1337 VOP_BWRITE(bp); 1338 goto loop; 1339 } 1340 } 1341 1342 if (bp->b_usecount < BUF_MAXUSE) 1343 ++bp->b_usecount; 1344 splx(s); 1345 return (bp); 1346 } else { 1347 vm_object_t obj; 1348 1349 if ((bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize)) == 0) { 1350 if (slpflag || slptimeo) { 1351 splx(s); 1352 return NULL; 1353 } 1354 goto loop; 1355 } 1356 1357 /* 1358 * This code is used to make sure that a buffer is not 1359 * created while the getnewbuf routine is blocked. 1360 * Normally the vnode is locked so this isn't a problem. 1361 * VBLK type I/O requests, however, don't lock the vnode. 1362 */ 1363 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1364 bp->b_flags |= B_INVAL; 1365 brelse(bp); 1366 goto loop; 1367 } 1368 1369 /* 1370 * Insert the buffer into the hash, so that it can 1371 * be found by incore. 1372 */ 1373 bp->b_blkno = bp->b_lblkno = blkno; 1374 bgetvp(vp, bp); 1375 LIST_REMOVE(bp, b_hash); 1376 bh = BUFHASH(vp, blkno); 1377 LIST_INSERT_HEAD(bh, bp, b_hash); 1378 1379 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1380 bp->b_flags |= (B_VMIO | B_CACHE); 1381#if defined(VFS_BIO_DEBUG) 1382 if (vp->v_type != VREG && vp->v_type != VBLK) 1383 printf("getblk: vmioing file type %d???\n", vp->v_type); 1384#endif 1385 } else { 1386 bp->b_flags &= ~B_VMIO; 1387 } 1388 splx(s); 1389 1390 allocbuf(bp, size); 1391#ifdef PC98 1392 /* 1393 * 1024byte/sector support 1394 */ 1395#define B_XXX2 0x8000000 1396 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1397#endif 1398 return (bp); 1399 } 1400} 1401 1402/* 1403 * Get an empty, disassociated buffer of given size. 1404 */ 1405struct buf * 1406geteblk(int size) 1407{ 1408 struct buf *bp; 1409 int s; 1410 1411 s = splbio(); 1412 while ((bp = getnewbuf(0, 0, 0, size, MAXBSIZE)) == 0); 1413 splx(s); 1414 allocbuf(bp, size); 1415 bp->b_flags |= B_INVAL; 1416 return (bp); 1417} 1418 1419 1420/* 1421 * This code constitutes the buffer memory from either anonymous system 1422 * memory (in the case of non-VMIO operations) or from an associated 1423 * VM object (in the case of VMIO operations). 1424 * 1425 * Note that this code is tricky, and has many complications to resolve 1426 * deadlock or inconsistant data situations. Tread lightly!!! 1427 * 1428 * Modify the length of a buffer's underlying buffer storage without 1429 * destroying information (unless, of course the buffer is shrinking). 1430 */ 1431int 1432allocbuf(struct buf * bp, int size) 1433{ 1434 1435 int s; 1436 int newbsize, mbsize; 1437 int i; 1438 1439#if !defined(MAX_PERF) 1440 if (!(bp->b_flags & B_BUSY)) 1441 panic("allocbuf: buffer not busy"); 1442 1443 if (bp->b_kvasize < size) 1444 panic("allocbuf: buffer too small"); 1445#endif 1446 1447 if ((bp->b_flags & B_VMIO) == 0) { 1448 caddr_t origbuf; 1449 int origbufsize; 1450 /* 1451 * Just get anonymous memory from the kernel 1452 */ 1453 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1454#if !defined(NO_B_MALLOC) 1455 if (bp->b_flags & B_MALLOC) 1456 newbsize = mbsize; 1457 else 1458#endif 1459 newbsize = round_page(size); 1460 1461 if (newbsize < bp->b_bufsize) { 1462#if !defined(NO_B_MALLOC) 1463 /* 1464 * malloced buffers are not shrunk 1465 */ 1466 if (bp->b_flags & B_MALLOC) { 1467 if (newbsize) { 1468 bp->b_bcount = size; 1469 } else { 1470 free(bp->b_data, M_BIOBUF); 1471 bufspace -= bp->b_bufsize; 1472 bufmallocspace -= bp->b_bufsize; 1473 bp->b_data = bp->b_kvabase; 1474 bp->b_bufsize = 0; 1475 bp->b_bcount = 0; 1476 bp->b_flags &= ~B_MALLOC; 1477 } 1478 return 1; 1479 } 1480#endif 1481 vm_hold_free_pages( 1482 bp, 1483 (vm_offset_t) bp->b_data + newbsize, 1484 (vm_offset_t) bp->b_data + bp->b_bufsize); 1485 } else if (newbsize > bp->b_bufsize) { 1486#if !defined(NO_B_MALLOC) 1487 /* 1488 * We only use malloced memory on the first allocation. 1489 * and revert to page-allocated memory when the buffer grows. 1490 */ 1491 if ( (bufmallocspace < maxbufmallocspace) && 1492 (bp->b_bufsize == 0) && 1493 (mbsize <= PAGE_SIZE/2)) { 1494 1495 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1496 bp->b_bufsize = mbsize; 1497 bp->b_bcount = size; 1498 bp->b_flags |= B_MALLOC; 1499 bufspace += mbsize; 1500 bufmallocspace += mbsize; 1501 return 1; 1502 } 1503#endif 1504 origbuf = NULL; 1505 origbufsize = 0; 1506#if !defined(NO_B_MALLOC) 1507 /* 1508 * If the buffer is growing on it's other-than-first allocation, 1509 * then we revert to the page-allocation scheme. 1510 */ 1511 if (bp->b_flags & B_MALLOC) { 1512 origbuf = bp->b_data; 1513 origbufsize = bp->b_bufsize; 1514 bp->b_data = bp->b_kvabase; 1515 bufspace -= bp->b_bufsize; 1516 bufmallocspace -= bp->b_bufsize; 1517 bp->b_bufsize = 0; 1518 bp->b_flags &= ~B_MALLOC; 1519 newbsize = round_page(newbsize); 1520 } 1521#endif 1522 vm_hold_load_pages( 1523 bp, 1524 (vm_offset_t) bp->b_data + bp->b_bufsize, 1525 (vm_offset_t) bp->b_data + newbsize); 1526#if !defined(NO_B_MALLOC) 1527 if (origbuf) { 1528 bcopy(origbuf, bp->b_data, origbufsize); 1529 free(origbuf, M_BIOBUF); 1530 } 1531#endif 1532 } 1533 } else { 1534 vm_page_t m; 1535 int desiredpages; 1536 1537 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1538 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1539 1540#if !defined(NO_B_MALLOC) 1541 if (bp->b_flags & B_MALLOC) 1542 panic("allocbuf: VMIO buffer can't be malloced"); 1543#endif 1544 1545 if (newbsize < bp->b_bufsize) { 1546 if (desiredpages < bp->b_npages) { 1547 for (i = desiredpages; i < bp->b_npages; i++) { 1548 /* 1549 * the page is not freed here -- it 1550 * is the responsibility of vnode_pager_setsize 1551 */ 1552 m = bp->b_pages[i]; 1553#if defined(DIAGNOSTIC) 1554 if (m == bogus_page) 1555 panic("allocbuf: bogus page found"); 1556#endif 1557 s = splvm(); 1558 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1559 m->flags |= PG_WANTED; 1560 tsleep(m, PVM, "biodep", 0); 1561 } 1562 splx(s); 1563 1564 bp->b_pages[i] = NULL; 1565 vm_page_unwire(m); 1566 } 1567 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1568 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1569 bp->b_npages = desiredpages; 1570 } 1571 } else if (newbsize > bp->b_bufsize) { 1572 vm_object_t obj; 1573 vm_offset_t tinc, toff; 1574 vm_ooffset_t off; 1575 vm_pindex_t objoff; 1576 int pageindex, curbpnpages; 1577 struct vnode *vp; 1578 int bsize; 1579 1580 vp = bp->b_vp; 1581 1582 if (vp->v_type == VBLK) 1583 bsize = DEV_BSIZE; 1584 else 1585 bsize = vp->v_mount->mnt_stat.f_iosize; 1586 1587 if (bp->b_npages < desiredpages) { 1588 obj = vp->v_object; 1589 tinc = PAGE_SIZE; 1590 if (tinc > bsize) 1591 tinc = bsize; 1592 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1593 curbpnpages = bp->b_npages; 1594 doretry: 1595 bp->b_flags |= B_CACHE; 1596 bp->b_validoff = bp->b_validend = 0; 1597 for (toff = 0; toff < newbsize; toff += tinc) { 1598 int bytesinpage; 1599 1600 pageindex = toff >> PAGE_SHIFT; 1601 objoff = OFF_TO_IDX(off + toff); 1602 if (pageindex < curbpnpages) { 1603 1604 m = bp->b_pages[pageindex]; 1605#ifdef VFS_BIO_DIAG 1606 if (m->pindex != objoff) 1607 panic("allocbuf: page changed offset??!!!?"); 1608#endif 1609 bytesinpage = tinc; 1610 if (tinc > (newbsize - toff)) 1611 bytesinpage = newbsize - toff; 1612 if (bp->b_flags & B_CACHE) 1613 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1614 continue; 1615 } 1616 m = vm_page_lookup(obj, objoff); 1617 if (!m) { 1618 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1619 if (!m) { 1620 VM_WAIT; 1621 goto doretry; 1622 } 1623 /* 1624 * Normally it is unwise to clear PG_BUSY without 1625 * PAGE_WAKEUP -- but it is okay here, as there is 1626 * no chance for blocking between here and vm_page_alloc 1627 */ 1628 m->flags &= ~PG_BUSY; 1629 vm_page_wire(m); 1630 bp->b_flags &= ~B_CACHE; 1631 } else if (m->flags & PG_BUSY) { 1632 s = splvm(); 1633 if (m->flags & PG_BUSY) { 1634 m->flags |= PG_WANTED; 1635 tsleep(m, PVM, "pgtblk", 0); 1636 } 1637 splx(s); 1638 goto doretry; 1639 } else { 1640 if ((curproc != pageproc) && 1641 ((m->queue - m->pc) == PQ_CACHE) && 1642 ((cnt.v_free_count + cnt.v_cache_count) < 1643 (cnt.v_free_min + cnt.v_cache_min))) { 1644 pagedaemon_wakeup(); 1645 } 1646 bytesinpage = tinc; 1647 if (tinc > (newbsize - toff)) 1648 bytesinpage = newbsize - toff; 1649 if (bp->b_flags & B_CACHE) 1650 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1651 vm_page_wire(m); 1652 } 1653 bp->b_pages[pageindex] = m; 1654 curbpnpages = pageindex + 1; 1655 } 1656 if (vp->v_tag == VT_NFS && 1657 vp->v_type != VBLK) { 1658 if (bp->b_dirtyend > 0) { 1659 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1660 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1661 } 1662 if (bp->b_validend == 0) 1663 bp->b_flags &= ~B_CACHE; 1664 } 1665 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1666 bp->b_npages = curbpnpages; 1667 pmap_qenter((vm_offset_t) bp->b_data, 1668 bp->b_pages, bp->b_npages); 1669 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1670 } 1671 } 1672 } 1673 if (bp->b_flags & B_VMIO) 1674 vmiospace += (newbsize - bp->b_bufsize); 1675 bufspace += (newbsize - bp->b_bufsize); 1676 bp->b_bufsize = newbsize; 1677 bp->b_bcount = size; 1678 return 1; 1679} 1680 1681/* 1682 * Wait for buffer I/O completion, returning error status. 1683 */ 1684int 1685biowait(register struct buf * bp) 1686{ 1687 int s; 1688 1689 s = splbio(); 1690 while ((bp->b_flags & B_DONE) == 0) 1691#if defined(NO_SCHEDULE_MODS) 1692 tsleep(bp, PRIBIO, "biowait", 0); 1693#else 1694 tsleep(bp, curproc->p_usrpri, "biowait", 0); 1695#endif 1696 splx(s); 1697 if (bp->b_flags & B_EINTR) { 1698 bp->b_flags &= ~B_EINTR; 1699 return (EINTR); 1700 } 1701 if (bp->b_flags & B_ERROR) { 1702 return (bp->b_error ? bp->b_error : EIO); 1703 } else { 1704 return (0); 1705 } 1706} 1707 1708/* 1709 * Finish I/O on a buffer, calling an optional function. 1710 * This is usually called from interrupt level, so process blocking 1711 * is not *a good idea*. 1712 */ 1713void 1714biodone(register struct buf * bp) 1715{ 1716 int s; 1717 1718 s = splbio(); 1719 1720#if !defined(MAX_PERF) 1721 if (!(bp->b_flags & B_BUSY)) 1722 panic("biodone: buffer not busy"); 1723#endif 1724 1725 if (bp->b_flags & B_DONE) { 1726 splx(s); 1727#if !defined(MAX_PERF) 1728 printf("biodone: buffer already done\n"); 1729#endif 1730 return; 1731 } 1732 bp->b_flags |= B_DONE; 1733 1734 if ((bp->b_flags & B_READ) == 0) { 1735 vwakeup(bp); 1736 } 1737#ifdef BOUNCE_BUFFERS 1738 if (bp->b_flags & B_BOUNCE) 1739 vm_bounce_free(bp); 1740#endif 1741 1742 /* call optional completion function if requested */ 1743 if (bp->b_flags & B_CALL) { 1744 bp->b_flags &= ~B_CALL; 1745 (*bp->b_iodone) (bp); 1746 splx(s); 1747 return; 1748 } 1749 if (bp->b_flags & B_VMIO) { 1750 int i, resid; 1751 vm_ooffset_t foff; 1752 vm_page_t m; 1753 vm_object_t obj; 1754 int iosize; 1755 struct vnode *vp = bp->b_vp; 1756 1757 obj = vp->v_object; 1758 1759#if defined(VFS_BIO_DEBUG) 1760 if (vp->v_usecount == 0) { 1761 panic("biodone: zero vnode ref count"); 1762 } 1763 1764 if (vp->v_object == NULL) { 1765 panic("biodone: missing VM object"); 1766 } 1767 1768 if ((vp->v_flag & VVMIO) == 0) { 1769 panic("biodone: vnode is not setup for merged cache"); 1770 } 1771#endif 1772 1773 if (vp->v_type == VBLK) 1774 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1775 else 1776 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1777#if !defined(MAX_PERF) 1778 if (!obj) { 1779 panic("biodone: no object"); 1780 } 1781#endif 1782#if defined(VFS_BIO_DEBUG) 1783 if (obj->paging_in_progress < bp->b_npages) { 1784 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1785 obj->paging_in_progress, bp->b_npages); 1786 } 1787#endif 1788 iosize = bp->b_bufsize; 1789 for (i = 0; i < bp->b_npages; i++) { 1790 int bogusflag = 0; 1791 m = bp->b_pages[i]; 1792 if (m == bogus_page) { 1793 bogusflag = 1; 1794 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1795 if (!m) { 1796#if defined(VFS_BIO_DEBUG) 1797 printf("biodone: page disappeared\n"); 1798#endif 1799 --obj->paging_in_progress; 1800 continue; 1801 } 1802 bp->b_pages[i] = m; 1803 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1804 } 1805#if defined(VFS_BIO_DEBUG) 1806 if (OFF_TO_IDX(foff) != m->pindex) { 1807 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1808 } 1809#endif 1810 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1811 if (resid > iosize) 1812 resid = iosize; 1813 /* 1814 * In the write case, the valid and clean bits are 1815 * already changed correctly, so we only need to do this 1816 * here in the read case. 1817 */ 1818 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1819 vfs_page_set_valid(bp, foff, i, m); 1820 } 1821 1822 /* 1823 * when debugging new filesystems or buffer I/O methods, this 1824 * is the most common error that pops up. if you see this, you 1825 * have not set the page busy flag correctly!!! 1826 */ 1827 if (m->busy == 0) { 1828#if !defined(MAX_PERF) 1829 printf("biodone: page busy < 0, " 1830 "pindex: %d, foff: 0x(%x,%x), " 1831 "resid: %d, index: %d\n", 1832 (int) m->pindex, (int)(foff >> 32), 1833 (int) foff & 0xffffffff, resid, i); 1834#endif 1835 if (vp->v_type != VBLK) 1836#if !defined(MAX_PERF) 1837 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1838 bp->b_vp->v_mount->mnt_stat.f_iosize, 1839 (int) bp->b_lblkno, 1840 bp->b_flags, bp->b_npages); 1841 else 1842 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1843 (int) bp->b_lblkno, 1844 bp->b_flags, bp->b_npages); 1845 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1846 m->valid, m->dirty, m->wire_count); 1847#endif 1848 panic("biodone: page busy < 0\n"); 1849 } 1850 --m->busy; 1851 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1852 m->flags &= ~PG_WANTED; 1853 wakeup(m); 1854 } 1855 --obj->paging_in_progress; 1856 foff += resid; 1857 iosize -= resid; 1858 } 1859 if (obj && obj->paging_in_progress == 0 && 1860 (obj->flags & OBJ_PIPWNT)) { 1861 obj->flags &= ~OBJ_PIPWNT; 1862 wakeup(obj); 1863 } 1864 } 1865 /* 1866 * For asynchronous completions, release the buffer now. The brelse 1867 * checks for B_WANTED and will do the wakeup there if necessary - so 1868 * no need to do a wakeup here in the async case. 1869 */ 1870 1871 if (bp->b_flags & B_ASYNC) { 1872 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1873 brelse(bp); 1874 else 1875 bqrelse(bp); 1876 } else { 1877 bp->b_flags &= ~B_WANTED; 1878 wakeup(bp); 1879 } 1880 splx(s); 1881} 1882 1883int 1884count_lock_queue() 1885{ 1886 int count; 1887 struct buf *bp; 1888 1889 count = 0; 1890 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1891 bp != NULL; 1892 bp = TAILQ_NEXT(bp, b_freelist)) 1893 count++; 1894 return (count); 1895} 1896 1897int vfs_update_interval = 30; 1898 1899static void 1900vfs_update() 1901{ 1902 while (1) { 1903 tsleep(&vfs_update_wakeup, PUSER, "update", 1904 hz * vfs_update_interval); 1905 vfs_update_wakeup = 0; 1906 sync(curproc, NULL); 1907 } 1908} 1909 1910static int 1911sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1912{ 1913 int error = sysctl_handle_int(oidp, 1914 oidp->oid_arg1, oidp->oid_arg2, req); 1915 if (!error) 1916 wakeup(&vfs_update_wakeup); 1917 return error; 1918} 1919 1920SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1921 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1922 1923 1924/* 1925 * This routine is called in lieu of iodone in the case of 1926 * incomplete I/O. This keeps the busy status for pages 1927 * consistant. 1928 */ 1929void 1930vfs_unbusy_pages(struct buf * bp) 1931{ 1932 int i; 1933 1934 if (bp->b_flags & B_VMIO) { 1935 struct vnode *vp = bp->b_vp; 1936 vm_object_t obj = vp->v_object; 1937 vm_ooffset_t foff; 1938 1939 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1940 1941 for (i = 0; i < bp->b_npages; i++) { 1942 vm_page_t m = bp->b_pages[i]; 1943 1944 if (m == bogus_page) { 1945 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1946#if !defined(MAX_PERF) 1947 if (!m) { 1948 panic("vfs_unbusy_pages: page missing\n"); 1949 } 1950#endif 1951 bp->b_pages[i] = m; 1952 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1953 } 1954 --obj->paging_in_progress; 1955 --m->busy; 1956 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1957 m->flags &= ~PG_WANTED; 1958 wakeup(m); 1959 } 1960 } 1961 if (obj->paging_in_progress == 0 && 1962 (obj->flags & OBJ_PIPWNT)) { 1963 obj->flags &= ~OBJ_PIPWNT; 1964 wakeup(obj); 1965 } 1966 } 1967} 1968 1969/* 1970 * Set NFS' b_validoff and b_validend fields from the valid bits 1971 * of a page. If the consumer is not NFS, and the page is not 1972 * valid for the entire range, clear the B_CACHE flag to force 1973 * the consumer to re-read the page. 1974 */ 1975static void 1976vfs_buf_set_valid(struct buf *bp, 1977 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 1978 vm_page_t m) 1979{ 1980 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 1981 vm_offset_t svalid, evalid; 1982 int validbits = m->valid; 1983 1984 /* 1985 * This only bothers with the first valid range in the 1986 * page. 1987 */ 1988 svalid = off; 1989 while (validbits && !(validbits & 1)) { 1990 svalid += DEV_BSIZE; 1991 validbits >>= 1; 1992 } 1993 evalid = svalid; 1994 while (validbits & 1) { 1995 evalid += DEV_BSIZE; 1996 validbits >>= 1; 1997 } 1998 /* 1999 * Make sure this range is contiguous with the range 2000 * built up from previous pages. If not, then we will 2001 * just use the range from the previous pages. 2002 */ 2003 if (svalid == bp->b_validend) { 2004 bp->b_validoff = min(bp->b_validoff, svalid); 2005 bp->b_validend = max(bp->b_validend, evalid); 2006 } 2007 } else if (!vm_page_is_valid(m, 2008 (vm_offset_t) ((foff + off) & PAGE_MASK), 2009 size)) { 2010 bp->b_flags &= ~B_CACHE; 2011 } 2012} 2013 2014/* 2015 * Set the valid bits in a page, taking care of the b_validoff, 2016 * b_validend fields which NFS uses to optimise small reads. Off is 2017 * the offset within the file and pageno is the page index within the buf. 2018 */ 2019static void 2020vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2021{ 2022 struct vnode *vp = bp->b_vp; 2023 vm_ooffset_t soff, eoff; 2024 2025 soff = off; 2026 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2027 vm_page_set_invalid(m, 2028 (vm_offset_t) (soff & PAGE_MASK), 2029 (vm_offset_t) (eoff - soff)); 2030 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2031 vm_ooffset_t sv, ev; 2032 off = off - pageno * PAGE_SIZE; 2033 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2034 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 2035 soff = max(sv, soff); 2036 eoff = min(ev, eoff); 2037 } 2038 if (eoff > soff) 2039 vm_page_set_validclean(m, 2040 (vm_offset_t) (soff & PAGE_MASK), 2041 (vm_offset_t) (eoff - soff)); 2042} 2043 2044/* 2045 * This routine is called before a device strategy routine. 2046 * It is used to tell the VM system that paging I/O is in 2047 * progress, and treat the pages associated with the buffer 2048 * almost as being PG_BUSY. Also the object paging_in_progress 2049 * flag is handled to make sure that the object doesn't become 2050 * inconsistant. 2051 */ 2052void 2053vfs_busy_pages(struct buf * bp, int clear_modify) 2054{ 2055 int i; 2056 2057 if (bp->b_flags & B_VMIO) { 2058 struct vnode *vp = bp->b_vp; 2059 vm_object_t obj = vp->v_object; 2060 vm_ooffset_t foff; 2061 2062 if (vp->v_type == VBLK) 2063 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2064 else 2065 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2066 vfs_setdirty(bp); 2067 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2068 vm_page_t m = bp->b_pages[i]; 2069 2070 if ((bp->b_flags & B_CLUSTER) == 0) { 2071 obj->paging_in_progress++; 2072 m->busy++; 2073 } 2074 vm_page_protect(m, VM_PROT_NONE); 2075 if (clear_modify) 2076 vfs_page_set_valid(bp, foff, i, m); 2077 else if (bp->b_bcount >= PAGE_SIZE) { 2078 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2079 bp->b_pages[i] = bogus_page; 2080 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2081 } 2082 } 2083 } 2084 } 2085} 2086 2087/* 2088 * Tell the VM system that the pages associated with this buffer 2089 * are clean. This is used for delayed writes where the data is 2090 * going to go to disk eventually without additional VM intevention. 2091 */ 2092void 2093vfs_clean_pages(struct buf * bp) 2094{ 2095 int i; 2096 2097 if (bp->b_flags & B_VMIO) { 2098 struct vnode *vp = bp->b_vp; 2099 vm_ooffset_t foff; 2100 2101 if (vp->v_type == VBLK) 2102 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2103 else 2104 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2105 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2106 vm_page_t m = bp->b_pages[i]; 2107 2108 vfs_page_set_valid(bp, foff, i, m); 2109 } 2110 } 2111} 2112 2113void 2114vfs_bio_clrbuf(struct buf *bp) { 2115 int i; 2116 if( bp->b_flags & B_VMIO) { 2117 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2118 int mask; 2119 mask = 0; 2120 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2121 mask |= (1 << (i/DEV_BSIZE)); 2122 if( bp->b_pages[0]->valid != mask) { 2123 bzero(bp->b_data, bp->b_bufsize); 2124 } 2125 bp->b_pages[0]->valid = mask; 2126 bp->b_resid = 0; 2127 return; 2128 } 2129 for(i=0;i<bp->b_npages;i++) { 2130 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2131 continue; 2132 if( bp->b_pages[i]->valid == 0) { 2133 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2134 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2135 } 2136 } else { 2137 int j; 2138 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2139 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 2140 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2141 } 2142 } 2143 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 2144 } 2145 bp->b_resid = 0; 2146 } else { 2147 clrbuf(bp); 2148 } 2149} 2150 2151/* 2152 * vm_hold_load_pages and vm_hold_unload pages get pages into 2153 * a buffers address space. The pages are anonymous and are 2154 * not associated with a file object. 2155 */ 2156void 2157vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2158{ 2159 vm_offset_t pg; 2160 vm_page_t p; 2161 int index; 2162 2163 to = round_page(to); 2164 from = round_page(from); 2165 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2166 2167 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2168 2169tryagain: 2170 2171 p = vm_page_alloc(kernel_object, 2172 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2173 VM_ALLOC_NORMAL); 2174 if (!p) { 2175 VM_WAIT; 2176 goto tryagain; 2177 } 2178 vm_page_wire(p); 2179 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2180 bp->b_pages[index] = p; 2181 PAGE_WAKEUP(p); 2182 } 2183 bp->b_npages = index; 2184} 2185 2186void 2187vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2188{ 2189 vm_offset_t pg; 2190 vm_page_t p; 2191 int index, newnpages; 2192 2193 from = round_page(from); 2194 to = round_page(to); 2195 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2196 2197 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2198 p = bp->b_pages[index]; 2199 if (p && (index < bp->b_npages)) { 2200#if !defined(MAX_PERF) 2201 if (p->busy) { 2202 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2203 bp->b_blkno, bp->b_lblkno); 2204 } 2205#endif 2206 bp->b_pages[index] = NULL; 2207 pmap_kremove(pg); 2208 vm_page_unwire(p); 2209 vm_page_free(p); 2210 } 2211 } 2212 bp->b_npages = newnpages; 2213} 2214 2215 2216#include "opt_ddb.h" 2217#ifdef DDB 2218#include <ddb/ddb.h> 2219 2220DB_SHOW_COMMAND(buffer, db_show_buffer) 2221{ 2222 /* get args */ 2223 struct buf *bp = (struct buf *)addr; 2224 2225 if (!have_addr) { 2226 db_printf("usage: show buffer <addr>\n"); 2227 return; 2228 } 2229 2230 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2231 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2232 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2233 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2234 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2235 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2236 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2237 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2238 "b_blkno = %d, b_pblkno = %d\n", 2239 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2240 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2241 if (bp->b_npages) { 2242 int i; 2243 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2244 for (i = 0; i < bp->b_npages; i++) { 2245 vm_page_t m; 2246 m = bp->b_pages[i]; 2247 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2248 VM_PAGE_TO_PHYS(m)); 2249 if ((i + 1) < bp->b_npages) 2250 db_printf(","); 2251 } 2252 db_printf("\n"); 2253 } 2254} 2255#endif /* DDB */ 2256