vfs_bio.c revision 34206
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.153 1998/03/04 03:17:30 dyson Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 */ 27 28#include "opt_bounce.h" 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <vm/vm.h> 41#include <vm/vm_param.h> 42#include <vm/vm_prot.h> 43#include <vm/vm_kern.h> 44#include <vm/vm_pageout.h> 45#include <vm/vm_page.h> 46#include <vm/vm_object.h> 47#include <vm/vm_extern.h> 48#include <vm/vm_map.h> 49#include <sys/buf.h> 50#include <sys/mount.h> 51#include <sys/malloc.h> 52#include <sys/resourcevar.h> 53 54static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 55 56static void vfs_update __P((void)); 57static struct proc *updateproc; 58static struct kproc_desc up_kp = { 59 "update", 60 vfs_update, 61 &updateproc 62}; 63SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 64 65struct buf *buf; /* buffer header pool */ 66struct swqueue bswlist; 67 68static int count_lock_queue __P((void)); 69static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 70 vm_offset_t to); 71static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 72 vm_offset_t to); 73static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 74 vm_offset_t off, vm_offset_t size, 75 vm_page_t m); 76static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 77 int pageno, vm_page_t m); 78static void vfs_clean_pages(struct buf * bp); 79static void vfs_setdirty(struct buf *bp); 80static void vfs_vmio_release(struct buf *bp); 81static void flushdirtybuffers(int slpflag, int slptimeo); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95 96/* 97 * bogus page -- for I/O to/from partially complete buffers 98 * this is a temporary solution to the problem, but it is not 99 * really that bad. it would be better to split the buffer 100 * for input in the case of buffers partially already in memory, 101 * but the code is intricate enough already. 102 */ 103vm_page_t bogus_page; 104static vm_offset_t bogus_offset; 105 106static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 107 bufmallocspace, maxbufmallocspace; 108int numdirtybuffers; 109static int lodirtybuffers, hidirtybuffers; 110static int numfreebuffers, lofreebuffers, hifreebuffers; 111static int kvafreespace; 112 113SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 114 &numdirtybuffers, 0, ""); 115SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 116 &lodirtybuffers, 0, ""); 117SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 118 &hidirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 120 &numfreebuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 122 &lofreebuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 124 &hifreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 126 &maxbufspace, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 128 &bufspace, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 130 &maxvmiobufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 132 &vmiospace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 134 &maxbufmallocspace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 136 &bufmallocspace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 138 &kvafreespace, 0, ""); 139 140static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 141struct bqueues bufqueues[BUFFER_QUEUES] = {0}; 142 143extern int vm_swap_size; 144 145#define BUF_MAXUSE 24 146 147#define VFS_BIO_NEED_ANY 1 148#define VFS_BIO_NEED_LOWLIMIT 2 149#define VFS_BIO_NEED_FREE 4 150 151/* 152 * Initialize buffer headers and related structures. 153 */ 154void 155bufinit() 156{ 157 struct buf *bp; 158 int i; 159 160 TAILQ_INIT(&bswlist); 161 LIST_INIT(&invalhash); 162 163 /* first, make a null hash table */ 164 for (i = 0; i < BUFHSZ; i++) 165 LIST_INIT(&bufhashtbl[i]); 166 167 /* next, make a null set of free lists */ 168 for (i = 0; i < BUFFER_QUEUES; i++) 169 TAILQ_INIT(&bufqueues[i]); 170 171 /* finally, initialize each buffer header and stick on empty q */ 172 for (i = 0; i < nbuf; i++) { 173 bp = &buf[i]; 174 bzero(bp, sizeof *bp); 175 bp->b_flags = B_INVAL; /* we're just an empty header */ 176 bp->b_dev = NODEV; 177 bp->b_rcred = NOCRED; 178 bp->b_wcred = NOCRED; 179 bp->b_qindex = QUEUE_EMPTY; 180 bp->b_vnbufs.le_next = NOLIST; 181 bp->b_generation = 0; 182 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 183 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 184 } 185/* 186 * maxbufspace is currently calculated to support all filesystem blocks 187 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 188 * cache is still the same as it would be for 8K filesystems. This 189 * keeps the size of the buffer cache "in check" for big block filesystems. 190 */ 191 maxbufspace = (nbuf + 8) * DFLTBSIZE; 192/* 193 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 194 */ 195 maxvmiobufspace = 2 * maxbufspace / 3; 196/* 197 * Limit the amount of malloc memory since it is wired permanently into 198 * the kernel space. Even though this is accounted for in the buffer 199 * allocation, we don't want the malloced region to grow uncontrolled. 200 * The malloc scheme improves memory utilization significantly on average 201 * (small) directories. 202 */ 203 maxbufmallocspace = maxbufspace / 20; 204 205/* 206 * Remove the probability of deadlock conditions by limiting the 207 * number of dirty buffers. 208 */ 209 hidirtybuffers = nbuf / 8 + 20; 210 lodirtybuffers = nbuf / 16 + 10; 211 numdirtybuffers = 0; 212 lofreebuffers = nbuf / 18 + 5; 213 hifreebuffers = 2 * lofreebuffers; 214 numfreebuffers = nbuf; 215 kvafreespace = 0; 216 217 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 218 bogus_page = vm_page_alloc(kernel_object, 219 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 220 VM_ALLOC_NORMAL); 221 222} 223 224/* 225 * Free the kva allocation for a buffer 226 * Must be called only at splbio or higher, 227 * as this is the only locking for buffer_map. 228 */ 229static void 230bfreekva(struct buf * bp) 231{ 232 if (bp->b_kvasize == 0) 233 return; 234 235 vm_map_delete(buffer_map, 236 (vm_offset_t) bp->b_kvabase, 237 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 238 239 bp->b_kvasize = 0; 240 241} 242 243/* 244 * remove the buffer from the appropriate free list 245 */ 246void 247bremfree(struct buf * bp) 248{ 249 int s = splbio(); 250 251 if (bp->b_qindex != QUEUE_NONE) { 252 if (bp->b_qindex == QUEUE_EMPTY) { 253 kvafreespace -= bp->b_kvasize; 254 } 255 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 256 bp->b_qindex = QUEUE_NONE; 257 } else { 258#if !defined(MAX_PERF) 259 panic("bremfree: removing a buffer when not on a queue"); 260#endif 261 } 262 if ((bp->b_flags & B_INVAL) || 263 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 264 --numfreebuffers; 265 splx(s); 266} 267 268 269/* 270 * Get a buffer with the specified data. Look in the cache first. 271 */ 272int 273bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 274 struct buf ** bpp) 275{ 276 struct buf *bp; 277 278 bp = getblk(vp, blkno, size, 0, 0); 279 *bpp = bp; 280 281 /* if not found in cache, do some I/O */ 282 if ((bp->b_flags & B_CACHE) == 0) { 283 if (curproc != NULL) 284 curproc->p_stats->p_ru.ru_inblock++; 285 bp->b_flags |= B_READ; 286 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 287 if (bp->b_rcred == NOCRED) { 288 if (cred != NOCRED) 289 crhold(cred); 290 bp->b_rcred = cred; 291 } 292 vfs_busy_pages(bp, 0); 293 VOP_STRATEGY(bp); 294 return (biowait(bp)); 295 } 296 return (0); 297} 298 299/* 300 * Operates like bread, but also starts asynchronous I/O on 301 * read-ahead blocks. 302 */ 303int 304breadn(struct vnode * vp, daddr_t blkno, int size, 305 daddr_t * rablkno, int *rabsize, 306 int cnt, struct ucred * cred, struct buf ** bpp) 307{ 308 struct buf *bp, *rabp; 309 int i; 310 int rv = 0, readwait = 0; 311 312 *bpp = bp = getblk(vp, blkno, size, 0, 0); 313 314 /* if not found in cache, do some I/O */ 315 if ((bp->b_flags & B_CACHE) == 0) { 316 if (curproc != NULL) 317 curproc->p_stats->p_ru.ru_inblock++; 318 bp->b_flags |= B_READ; 319 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 320 if (bp->b_rcred == NOCRED) { 321 if (cred != NOCRED) 322 crhold(cred); 323 bp->b_rcred = cred; 324 } 325 vfs_busy_pages(bp, 0); 326 VOP_STRATEGY(bp); 327 ++readwait; 328 } 329 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 330 if (inmem(vp, *rablkno)) 331 continue; 332 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 333 334 if ((rabp->b_flags & B_CACHE) == 0) { 335 if (curproc != NULL) 336 curproc->p_stats->p_ru.ru_inblock++; 337 rabp->b_flags |= B_READ | B_ASYNC; 338 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 339 if (rabp->b_rcred == NOCRED) { 340 if (cred != NOCRED) 341 crhold(cred); 342 rabp->b_rcred = cred; 343 } 344 vfs_busy_pages(rabp, 0); 345 VOP_STRATEGY(rabp); 346 } else { 347 brelse(rabp); 348 } 349 } 350 351 if (readwait) { 352 rv = biowait(bp); 353 } 354 return (rv); 355} 356 357/* 358 * Write, release buffer on completion. (Done by iodone 359 * if async.) 360 */ 361int 362bwrite(struct buf * bp) 363{ 364 int oldflags = bp->b_flags; 365 366 if (bp->b_flags & B_INVAL) { 367 brelse(bp); 368 return (0); 369 } 370#if !defined(MAX_PERF) 371 if (!(bp->b_flags & B_BUSY)) 372 panic("bwrite: buffer is not busy???"); 373#endif 374 375 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 376 bp->b_flags |= B_WRITEINPROG; 377 378 if ((oldflags & B_DELWRI) == B_DELWRI) { 379 --numdirtybuffers; 380 reassignbuf(bp, bp->b_vp); 381 } 382 383 bp->b_vp->v_numoutput++; 384 vfs_busy_pages(bp, 1); 385 if (curproc != NULL) 386 curproc->p_stats->p_ru.ru_oublock++; 387 VOP_STRATEGY(bp); 388 389 if ((oldflags & B_ASYNC) == 0) { 390 int rtval = biowait(bp); 391 392 if (oldflags & B_DELWRI) { 393 reassignbuf(bp, bp->b_vp); 394 } 395 brelse(bp); 396 return (rtval); 397 } 398 return (0); 399} 400 401inline void 402vfs_bio_need_satisfy(void) { 403 ++numfreebuffers; 404 if (!needsbuffer) 405 return; 406 if (numdirtybuffers < lodirtybuffers) { 407 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 408 } else { 409 needsbuffer &= ~VFS_BIO_NEED_ANY; 410 } 411 if (numfreebuffers >= hifreebuffers) { 412 needsbuffer &= ~VFS_BIO_NEED_FREE; 413 } 414 wakeup(&needsbuffer); 415} 416 417/* 418 * Delayed write. (Buffer is marked dirty). 419 */ 420void 421bdwrite(struct buf * bp) 422{ 423 424#if !defined(MAX_PERF) 425 if ((bp->b_flags & B_BUSY) == 0) { 426 panic("bdwrite: buffer is not busy"); 427 } 428#endif 429 430 if (bp->b_flags & B_INVAL) { 431 brelse(bp); 432 return; 433 } 434 if (bp->b_flags & B_TAPE) { 435 bawrite(bp); 436 return; 437 } 438 bp->b_flags &= ~(B_READ|B_RELBUF); 439 if ((bp->b_flags & B_DELWRI) == 0) { 440 bp->b_flags |= B_DONE | B_DELWRI; 441 reassignbuf(bp, bp->b_vp); 442 ++numdirtybuffers; 443 } 444 445 /* 446 * This bmap keeps the system from needing to do the bmap later, 447 * perhaps when the system is attempting to do a sync. Since it 448 * is likely that the indirect block -- or whatever other datastructure 449 * that the filesystem needs is still in memory now, it is a good 450 * thing to do this. Note also, that if the pageout daemon is 451 * requesting a sync -- there might not be enough memory to do 452 * the bmap then... So, this is important to do. 453 */ 454 if (bp->b_lblkno == bp->b_blkno) { 455 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 456 } 457 458 /* 459 * Set the *dirty* buffer range based upon the VM system dirty pages. 460 */ 461 vfs_setdirty(bp); 462 463 /* 464 * We need to do this here to satisfy the vnode_pager and the 465 * pageout daemon, so that it thinks that the pages have been 466 * "cleaned". Note that since the pages are in a delayed write 467 * buffer -- the VFS layer "will" see that the pages get written 468 * out on the next sync, or perhaps the cluster will be completed. 469 */ 470 vfs_clean_pages(bp); 471 bqrelse(bp); 472 473 if (numdirtybuffers >= hidirtybuffers) 474 flushdirtybuffers(0, 0); 475 476 return; 477} 478 479/* 480 * Asynchronous write. 481 * Start output on a buffer, but do not wait for it to complete. 482 * The buffer is released when the output completes. 483 */ 484void 485bawrite(struct buf * bp) 486{ 487 bp->b_flags |= B_ASYNC; 488 (void) VOP_BWRITE(bp); 489} 490 491/* 492 * Ordered write. 493 * Start output on a buffer, but only wait for it to complete if the 494 * output device cannot guarantee ordering in some other way. Devices 495 * that can perform asynchronous ordered writes will set the B_ASYNC 496 * flag in their strategy routine. 497 * The buffer is released when the output completes. 498 */ 499int 500bowrite(struct buf * bp) 501{ 502 /* 503 * XXX Add in B_ASYNC once the SCSI 504 * layer can deal with ordered 505 * writes properly. 506 */ 507 bp->b_flags |= B_ORDERED; 508 return (VOP_BWRITE(bp)); 509} 510 511/* 512 * Release a buffer. 513 */ 514void 515brelse(struct buf * bp) 516{ 517 int s; 518 519 if (bp->b_flags & B_CLUSTER) { 520 relpbuf(bp); 521 return; 522 } 523 524 s = splbio(); 525 526 /* anyone need this block? */ 527 if (bp->b_flags & B_WANTED) { 528 bp->b_flags &= ~(B_WANTED | B_AGE); 529 wakeup(bp); 530 } 531 532 if (bp->b_flags & B_LOCKED) 533 bp->b_flags &= ~B_ERROR; 534 535 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 536 (bp->b_bufsize <= 0)) { 537 bp->b_flags |= B_INVAL; 538 if (bp->b_flags & B_DELWRI) 539 --numdirtybuffers; 540 bp->b_flags &= ~(B_DELWRI | B_CACHE); 541 if ((bp->b_flags & B_VMIO) == 0) { 542 if (bp->b_bufsize) 543 allocbuf(bp, 0); 544 if (bp->b_vp) 545 brelvp(bp); 546 } 547 } 548 549 /* 550 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 551 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 552 * but the VM object is kept around. The B_NOCACHE flag is used to 553 * invalidate the pages in the VM object. 554 * 555 * If the buffer is a partially filled NFS buffer, keep it 556 * since invalidating it now will lose informatio. The valid 557 * flags in the vm_pages have only DEV_BSIZE resolution but 558 * the b_validoff, b_validend fields have byte resolution. 559 * This can avoid unnecessary re-reads of the buffer. 560 * XXX this seems to cause performance problems. 561 */ 562 if ((bp->b_flags & B_VMIO) 563 && !(bp->b_vp->v_tag == VT_NFS && 564 bp->b_vp->v_type != VBLK && 565 (bp->b_flags & B_DELWRI) != 0) 566#ifdef notdef 567 && (bp->b_vp->v_tag != VT_NFS 568 || bp->b_vp->v_type == VBLK 569 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 570 || bp->b_validend == 0 571 || (bp->b_validoff == 0 572 && bp->b_validend == bp->b_bufsize)) 573#endif 574 ) { 575 576 int i, j, resid; 577 vm_page_t m; 578 off_t foff; 579 vm_pindex_t poff; 580 vm_object_t obj; 581 struct vnode *vp; 582 int blksize; 583 584 vp = bp->b_vp; 585 586 if (vp->v_type == VBLK) 587 blksize = DEV_BSIZE; 588 else 589 blksize = vp->v_mount->mnt_stat.f_iosize; 590 591 resid = bp->b_bufsize; 592 foff = -1LL; 593 594 for (i = 0; i < bp->b_npages; i++) { 595 m = bp->b_pages[i]; 596 if (m == bogus_page) { 597 598 obj = (vm_object_t) vp->v_object; 599 600 foff = (off_t) bp->b_lblkno * blksize; 601 poff = OFF_TO_IDX(foff); 602 603 for (j = i; j < bp->b_npages; j++) { 604 m = bp->b_pages[j]; 605 if (m == bogus_page) { 606 m = vm_page_lookup(obj, poff + j); 607#if !defined(MAX_PERF) 608 if (!m) { 609 panic("brelse: page missing\n"); 610 } 611#endif 612 bp->b_pages[j] = m; 613 } 614 } 615 616 if ((bp->b_flags & B_INVAL) == 0) { 617 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 618 } 619 break; 620 } 621 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 622 if ((blksize & PAGE_MASK) == 0) { 623 vm_page_set_invalid(m, 0, resid); 624 } else { 625 if (foff == -1LL) 626 foff = (off_t) bp->b_lblkno * blksize; 627 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 628 } 629 } 630 resid -= PAGE_SIZE; 631 } 632 633 if (bp->b_flags & (B_INVAL | B_RELBUF)) 634 vfs_vmio_release(bp); 635 636 } else if (bp->b_flags & B_VMIO) { 637 638 if (bp->b_flags & (B_INVAL | B_RELBUF)) 639 vfs_vmio_release(bp); 640 641 } 642 643#if !defined(MAX_PERF) 644 if (bp->b_qindex != QUEUE_NONE) 645 panic("brelse: free buffer onto another queue???"); 646#endif 647 648 /* enqueue */ 649 /* buffers with no memory */ 650 if (bp->b_bufsize == 0) { 651 bp->b_flags |= B_INVAL; 652 bp->b_qindex = QUEUE_EMPTY; 653 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 654 LIST_REMOVE(bp, b_hash); 655 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 656 bp->b_dev = NODEV; 657 kvafreespace += bp->b_kvasize; 658 bp->b_generation++; 659 660 /* buffers with junk contents */ 661 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 662 bp->b_flags |= B_INVAL; 663 bp->b_qindex = QUEUE_AGE; 664 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 665 LIST_REMOVE(bp, b_hash); 666 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 667 bp->b_dev = NODEV; 668 bp->b_generation++; 669 670 /* buffers that are locked */ 671 } else if (bp->b_flags & B_LOCKED) { 672 bp->b_qindex = QUEUE_LOCKED; 673 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 674 675 /* buffers with stale but valid contents */ 676 } else if (bp->b_flags & B_AGE) { 677 bp->b_qindex = QUEUE_AGE; 678 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 679 680 /* buffers with valid and quite potentially reuseable contents */ 681 } else { 682 bp->b_qindex = QUEUE_LRU; 683 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 684 } 685 686 if ((bp->b_flags & B_INVAL) || 687 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 688 if (bp->b_flags & B_DELWRI) { 689 --numdirtybuffers; 690 bp->b_flags &= ~B_DELWRI; 691 } 692 vfs_bio_need_satisfy(); 693 } 694 695 /* unlock */ 696 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 697 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 698 splx(s); 699} 700 701/* 702 * Release a buffer. 703 */ 704void 705bqrelse(struct buf * bp) 706{ 707 int s; 708 709 s = splbio(); 710 711 /* anyone need this block? */ 712 if (bp->b_flags & B_WANTED) { 713 bp->b_flags &= ~(B_WANTED | B_AGE); 714 wakeup(bp); 715 } 716 717#if !defined(MAX_PERF) 718 if (bp->b_qindex != QUEUE_NONE) 719 panic("bqrelse: free buffer onto another queue???"); 720#endif 721 722 if (bp->b_flags & B_LOCKED) { 723 bp->b_flags &= ~B_ERROR; 724 bp->b_qindex = QUEUE_LOCKED; 725 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 726 /* buffers with stale but valid contents */ 727 } else { 728 bp->b_qindex = QUEUE_LRU; 729 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 730 } 731 732 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 733 vfs_bio_need_satisfy(); 734 } 735 736 /* unlock */ 737 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 738 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 739 splx(s); 740} 741 742static void 743vfs_vmio_release(bp) 744 struct buf *bp; 745{ 746 int i; 747 vm_page_t m; 748 749 for (i = 0; i < bp->b_npages; i++) { 750 m = bp->b_pages[i]; 751 bp->b_pages[i] = NULL; 752 vm_page_unwire(m); 753 754 /* 755 * We don't mess with busy pages, it is 756 * the responsibility of the process that 757 * busied the pages to deal with them. 758 */ 759 if ((m->flags & PG_BUSY) || (m->busy != 0)) 760 continue; 761 762 if (m->wire_count == 0) { 763 764 /* 765 * If this is an async free -- we cannot place 766 * pages onto the cache queue. If it is an 767 * async free, then we don't modify any queues. 768 * This is probably in error (for perf reasons), 769 * and we will eventually need to build 770 * a more complete infrastructure to support I/O 771 * rundown. 772 */ 773 if ((bp->b_flags & B_ASYNC) == 0) { 774 775 /* 776 * In the case of sync buffer frees, we can do pretty much 777 * anything to any of the memory queues. Specifically, 778 * the cache queue is okay to be modified. 779 */ 780 if (m->valid) { 781 if(m->dirty == 0) 782 vm_page_test_dirty(m); 783 /* 784 * this keeps pressure off of the process memory 785 */ 786 if (m->dirty == 0 && m->hold_count == 0) 787 vm_page_cache(m); 788 else 789 vm_page_deactivate(m); 790 } else if (m->hold_count == 0) { 791 m->flags |= PG_BUSY; 792 vm_page_protect(m, VM_PROT_NONE); 793 vm_page_free(m); 794 } 795 } else { 796 /* 797 * If async, then at least we clear the 798 * act_count. 799 */ 800 m->act_count = 0; 801 } 802 } 803 } 804 bufspace -= bp->b_bufsize; 805 vmiospace -= bp->b_bufsize; 806 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 807 bp->b_npages = 0; 808 bp->b_bufsize = 0; 809 bp->b_flags &= ~B_VMIO; 810 if (bp->b_vp) 811 brelvp(bp); 812} 813 814/* 815 * Check to see if a block is currently memory resident. 816 */ 817struct buf * 818gbincore(struct vnode * vp, daddr_t blkno) 819{ 820 struct buf *bp; 821 struct bufhashhdr *bh; 822 823 bh = BUFHASH(vp, blkno); 824 bp = bh->lh_first; 825 826 /* Search hash chain */ 827 while (bp != NULL) { 828 /* hit */ 829 if (bp->b_vp == vp && bp->b_lblkno == blkno && 830 (bp->b_flags & B_INVAL) == 0) { 831 break; 832 } 833 bp = bp->b_hash.le_next; 834 } 835 return (bp); 836} 837 838/* 839 * this routine implements clustered async writes for 840 * clearing out B_DELWRI buffers... This is much better 841 * than the old way of writing only one buffer at a time. 842 */ 843int 844vfs_bio_awrite(struct buf * bp) 845{ 846 int i; 847 daddr_t lblkno = bp->b_lblkno; 848 struct vnode *vp = bp->b_vp; 849 int s; 850 int ncl; 851 struct buf *bpa; 852 int nwritten; 853 int size; 854 int maxcl; 855 856 s = splbio(); 857 /* 858 * right now we support clustered writing only to regular files 859 */ 860 if ((vp->v_type == VREG) && 861 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 862 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 863 864 size = vp->v_mount->mnt_stat.f_iosize; 865 maxcl = MAXPHYS / size; 866 867 for (i = 1; i < maxcl; i++) { 868 if ((bpa = gbincore(vp, lblkno + i)) && 869 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 870 (B_DELWRI | B_CLUSTEROK)) && 871 (bpa->b_bufsize == size)) { 872 if ((bpa->b_blkno == bpa->b_lblkno) || 873 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 874 break; 875 } else { 876 break; 877 } 878 } 879 ncl = i; 880 /* 881 * this is a possible cluster write 882 */ 883 if (ncl != 1) { 884 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 885 splx(s); 886 return nwritten; 887 } 888 } 889 890 bremfree(bp); 891 splx(s); 892 /* 893 * default (old) behavior, writing out only one block 894 */ 895 bp->b_flags |= B_BUSY | B_ASYNC; 896 nwritten = bp->b_bufsize; 897 (void) VOP_BWRITE(bp); 898 return nwritten; 899} 900 901 902/* 903 * Find a buffer header which is available for use. 904 */ 905static struct buf * 906getnewbuf(struct vnode *vp, daddr_t blkno, 907 int slpflag, int slptimeo, int size, int maxsize) 908{ 909 struct buf *bp, *bp1; 910 int nbyteswritten = 0; 911 vm_offset_t addr; 912 static int writerecursion = 0; 913 914start: 915 if (bufspace >= maxbufspace) 916 goto trytofreespace; 917 918 /* can we constitute a new buffer? */ 919 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 920#if !defined(MAX_PERF) 921 if (bp->b_qindex != QUEUE_EMPTY) 922 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 923 bp->b_qindex); 924#endif 925 bp->b_flags |= B_BUSY; 926 bremfree(bp); 927 goto fillbuf; 928 } 929trytofreespace: 930 /* 931 * We keep the file I/O from hogging metadata I/O 932 * This is desirable because file data is cached in the 933 * VM/Buffer cache even if a buffer is freed. 934 */ 935 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 936#if !defined(MAX_PERF) 937 if (bp->b_qindex != QUEUE_AGE) 938 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 939 bp->b_qindex); 940#endif 941 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 942#if !defined(MAX_PERF) 943 if (bp->b_qindex != QUEUE_LRU) 944 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 945 bp->b_qindex); 946#endif 947 } 948 if (!bp) { 949 /* wait for a free buffer of any kind */ 950 needsbuffer |= VFS_BIO_NEED_ANY; 951 do 952 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 953 slptimeo); 954 while (needsbuffer & VFS_BIO_NEED_ANY); 955 return (0); 956 } 957 958#if defined(DIAGNOSTIC) 959 if (bp->b_flags & B_BUSY) { 960 panic("getnewbuf: busy buffer on free list\n"); 961 } 962#endif 963 964 /* 965 * We are fairly aggressive about freeing VMIO buffers, but since 966 * the buffering is intact without buffer headers, there is not 967 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 968 */ 969 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 970 if ((bp->b_flags & B_VMIO) == 0 || 971 (vmiospace < maxvmiobufspace)) { 972 --bp->b_usecount; 973 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 974 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 975 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 976 goto start; 977 } 978 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 979 } 980 } 981 982 983 /* if we are a delayed write, convert to an async write */ 984 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 985 986 /* 987 * If our delayed write is likely to be used soon, then 988 * recycle back onto the LRU queue. 989 */ 990 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 991 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 992 993 if (bp->b_usecount > 0) { 994 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 995 996 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 997 998 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 999 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1000 bp->b_usecount--; 1001 goto start; 1002 } 1003 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1004 } 1005 } 1006 } 1007 1008 /* 1009 * Certain layered filesystems can recursively re-enter the vfs_bio 1010 * code, due to delayed writes. This helps keep the system from 1011 * deadlocking. 1012 */ 1013 if (writerecursion > 0) { 1014 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1015 while (bp) { 1016 if ((bp->b_flags & B_DELWRI) == 0) 1017 break; 1018 bp = TAILQ_NEXT(bp, b_freelist); 1019 } 1020 if (bp == NULL) { 1021 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1022 while (bp) { 1023 if ((bp->b_flags & B_DELWRI) == 0) 1024 break; 1025 bp = TAILQ_NEXT(bp, b_freelist); 1026 } 1027 } 1028 if (bp == NULL) 1029 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1030 } else { 1031 ++writerecursion; 1032 nbyteswritten += vfs_bio_awrite(bp); 1033 --writerecursion; 1034 if (!slpflag && !slptimeo) { 1035 return (0); 1036 } 1037 goto start; 1038 } 1039 } 1040 1041 if (bp->b_flags & B_WANTED) { 1042 bp->b_flags &= ~B_WANTED; 1043 wakeup(bp); 1044 } 1045 bremfree(bp); 1046 bp->b_flags |= B_BUSY; 1047 1048 if (bp->b_flags & B_VMIO) { 1049 bp->b_flags &= ~B_ASYNC; 1050 vfs_vmio_release(bp); 1051 } 1052 1053 if (bp->b_vp) 1054 brelvp(bp); 1055 1056fillbuf: 1057 bp->b_generation++; 1058 1059 /* we are not free, nor do we contain interesting data */ 1060 if (bp->b_rcred != NOCRED) { 1061 crfree(bp->b_rcred); 1062 bp->b_rcred = NOCRED; 1063 } 1064 if (bp->b_wcred != NOCRED) { 1065 crfree(bp->b_wcred); 1066 bp->b_wcred = NOCRED; 1067 } 1068 1069 LIST_REMOVE(bp, b_hash); 1070 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1071 if (bp->b_bufsize) { 1072 allocbuf(bp, 0); 1073 } 1074 bp->b_flags = B_BUSY; 1075 bp->b_dev = NODEV; 1076 bp->b_vp = NULL; 1077 bp->b_blkno = bp->b_lblkno = 0; 1078 bp->b_iodone = 0; 1079 bp->b_error = 0; 1080 bp->b_resid = 0; 1081 bp->b_bcount = 0; 1082 bp->b_npages = 0; 1083 bp->b_dirtyoff = bp->b_dirtyend = 0; 1084 bp->b_validoff = bp->b_validend = 0; 1085 bp->b_usecount = 5; 1086 1087 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1088 1089 /* 1090 * we assume that buffer_map is not at address 0 1091 */ 1092 addr = 0; 1093 if (maxsize != bp->b_kvasize) { 1094 bfreekva(bp); 1095 1096findkvaspace: 1097 /* 1098 * See if we have buffer kva space 1099 */ 1100 if (vm_map_findspace(buffer_map, 1101 vm_map_min(buffer_map), maxsize, &addr)) { 1102 if (kvafreespace > 0) { 1103 int totfree = 0, freed; 1104 do { 1105 freed = 0; 1106 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1107 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1108 if (bp1->b_kvasize != 0) { 1109 totfree += bp1->b_kvasize; 1110 freed = bp1->b_kvasize; 1111 bremfree(bp1); 1112 bfreekva(bp1); 1113 brelse(bp1); 1114 break; 1115 } 1116 } 1117 } while (freed); 1118 /* 1119 * if we found free space, then retry with the same buffer. 1120 */ 1121 if (totfree) 1122 goto findkvaspace; 1123 } 1124 bp->b_flags |= B_INVAL; 1125 brelse(bp); 1126 goto trytofreespace; 1127 } 1128 } 1129 1130 /* 1131 * See if we are below are allocated minimum 1132 */ 1133 if (bufspace >= (maxbufspace + nbyteswritten)) { 1134 bp->b_flags |= B_INVAL; 1135 brelse(bp); 1136 goto trytofreespace; 1137 } 1138 1139 /* 1140 * create a map entry for the buffer -- in essence 1141 * reserving the kva space. 1142 */ 1143 if (addr) { 1144 vm_map_insert(buffer_map, NULL, 0, 1145 addr, addr + maxsize, 1146 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1147 1148 bp->b_kvabase = (caddr_t) addr; 1149 bp->b_kvasize = maxsize; 1150 } 1151 bp->b_data = bp->b_kvabase; 1152 1153 return (bp); 1154} 1155 1156static void 1157waitfreebuffers(int slpflag, int slptimeo) { 1158 while (numfreebuffers < hifreebuffers) { 1159 flushdirtybuffers(slpflag, slptimeo); 1160 if (numfreebuffers < hifreebuffers) 1161 break; 1162 needsbuffer |= VFS_BIO_NEED_FREE; 1163 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo)) 1164 break; 1165 } 1166} 1167 1168static void 1169flushdirtybuffers(int slpflag, int slptimeo) { 1170 int s; 1171 static pid_t flushing = 0; 1172 1173 s = splbio(); 1174 1175 if (flushing) { 1176 if (flushing == curproc->p_pid) { 1177 splx(s); 1178 return; 1179 } 1180 while (flushing) { 1181 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) { 1182 splx(s); 1183 return; 1184 } 1185 } 1186 } 1187 flushing = curproc->p_pid; 1188 1189 while (numdirtybuffers > lodirtybuffers) { 1190 struct buf *bp; 1191 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1192 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1193 if (bp == NULL) 1194 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1195 1196 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1197 bp = TAILQ_NEXT(bp, b_freelist); 1198 } 1199 1200 if (bp) { 1201 vfs_bio_awrite(bp); 1202 continue; 1203 } 1204 break; 1205 } 1206 1207 flushing = 0; 1208 wakeup(&flushing); 1209 splx(s); 1210} 1211 1212/* 1213 * Check to see if a block is currently memory resident. 1214 */ 1215struct buf * 1216incore(struct vnode * vp, daddr_t blkno) 1217{ 1218 struct buf *bp; 1219 1220 int s = splbio(); 1221 bp = gbincore(vp, blkno); 1222 splx(s); 1223 return (bp); 1224} 1225 1226/* 1227 * Returns true if no I/O is needed to access the 1228 * associated VM object. This is like incore except 1229 * it also hunts around in the VM system for the data. 1230 */ 1231 1232int 1233inmem(struct vnode * vp, daddr_t blkno) 1234{ 1235 vm_object_t obj; 1236 vm_offset_t toff, tinc; 1237 vm_page_t m; 1238 vm_ooffset_t off; 1239 1240 if (incore(vp, blkno)) 1241 return 1; 1242 if (vp->v_mount == NULL) 1243 return 0; 1244 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1245 return 0; 1246 1247 obj = vp->v_object; 1248 tinc = PAGE_SIZE; 1249 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1250 tinc = vp->v_mount->mnt_stat.f_iosize; 1251 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1252 1253 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1254 1255 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1256 if (!m) 1257 return 0; 1258 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1259 return 0; 1260 } 1261 return 1; 1262} 1263 1264/* 1265 * now we set the dirty range for the buffer -- 1266 * for NFS -- if the file is mapped and pages have 1267 * been written to, let it know. We want the 1268 * entire range of the buffer to be marked dirty if 1269 * any of the pages have been written to for consistancy 1270 * with the b_validoff, b_validend set in the nfs write 1271 * code, and used by the nfs read code. 1272 */ 1273static void 1274vfs_setdirty(struct buf *bp) { 1275 int i; 1276 vm_object_t object; 1277 vm_offset_t boffset, offset; 1278 /* 1279 * We qualify the scan for modified pages on whether the 1280 * object has been flushed yet. The OBJ_WRITEABLE flag 1281 * is not cleared simply by protecting pages off. 1282 */ 1283 if ((bp->b_flags & B_VMIO) && 1284 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1285 /* 1286 * test the pages to see if they have been modified directly 1287 * by users through the VM system. 1288 */ 1289 for (i = 0; i < bp->b_npages; i++) 1290 vm_page_test_dirty(bp->b_pages[i]); 1291 1292 /* 1293 * scan forwards for the first page modified 1294 */ 1295 for (i = 0; i < bp->b_npages; i++) { 1296 if (bp->b_pages[i]->dirty) { 1297 break; 1298 } 1299 } 1300 boffset = (i << PAGE_SHIFT); 1301 if (boffset < bp->b_dirtyoff) { 1302 bp->b_dirtyoff = boffset; 1303 } 1304 1305 /* 1306 * scan backwards for the last page modified 1307 */ 1308 for (i = bp->b_npages - 1; i >= 0; --i) { 1309 if (bp->b_pages[i]->dirty) { 1310 break; 1311 } 1312 } 1313 boffset = (i + 1); 1314 offset = boffset + bp->b_pages[0]->pindex; 1315 if (offset >= object->size) 1316 boffset = object->size - bp->b_pages[0]->pindex; 1317 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1318 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1319 } 1320} 1321 1322/* 1323 * Get a block given a specified block and offset into a file/device. 1324 */ 1325struct buf * 1326getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1327{ 1328 struct buf *bp; 1329 int i, s; 1330 struct bufhashhdr *bh; 1331 int maxsize; 1332 int generation; 1333 1334 if (vp->v_mount) { 1335 maxsize = vp->v_mount->mnt_stat.f_iosize; 1336 /* 1337 * This happens on mount points. 1338 */ 1339 if (maxsize < size) 1340 maxsize = size; 1341 } else { 1342 maxsize = size; 1343 } 1344 1345#if !defined(MAX_PERF) 1346 if (size > MAXBSIZE) 1347 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1348#endif 1349 1350 s = splbio(); 1351loop: 1352 if (numfreebuffers < lofreebuffers) { 1353 waitfreebuffers(slpflag, slptimeo); 1354 } 1355 1356 if ((bp = gbincore(vp, blkno))) { 1357loop1: 1358 generation = bp->b_generation; 1359 if (bp->b_flags & B_BUSY) { 1360 bp->b_flags |= B_WANTED; 1361 if (bp->b_usecount < BUF_MAXUSE) 1362 ++bp->b_usecount; 1363 if (!tsleep(bp, 1364 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) { 1365 if (bp->b_generation != generation) 1366 goto loop; 1367 goto loop1; 1368 } else { 1369 splx(s); 1370 return (struct buf *) NULL; 1371 } 1372 } 1373 bp->b_flags |= B_BUSY | B_CACHE; 1374 bremfree(bp); 1375 1376 /* 1377 * check for size inconsistancies (note that they shouldn't 1378 * happen but do when filesystems don't handle the size changes 1379 * correctly.) We are conservative on metadata and don't just 1380 * extend the buffer but write and re-constitute it. 1381 */ 1382 1383 if (bp->b_bcount != size) { 1384 bp->b_generation++; 1385 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1386 allocbuf(bp, size); 1387 } else { 1388 bp->b_flags |= B_NOCACHE; 1389 VOP_BWRITE(bp); 1390 goto loop; 1391 } 1392 } 1393 1394 if (bp->b_usecount < BUF_MAXUSE) 1395 ++bp->b_usecount; 1396 splx(s); 1397 return (bp); 1398 } else { 1399 vm_object_t obj; 1400 1401 if ((bp = getnewbuf(vp, blkno, 1402 slpflag, slptimeo, size, maxsize)) == 0) { 1403 if (slpflag || slptimeo) { 1404 splx(s); 1405 return NULL; 1406 } 1407 goto loop; 1408 } 1409 1410 /* 1411 * This code is used to make sure that a buffer is not 1412 * created while the getnewbuf routine is blocked. 1413 * Normally the vnode is locked so this isn't a problem. 1414 * VBLK type I/O requests, however, don't lock the vnode. 1415 */ 1416 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1417 bp->b_flags |= B_INVAL; 1418 brelse(bp); 1419 goto loop; 1420 } 1421 1422 /* 1423 * Insert the buffer into the hash, so that it can 1424 * be found by incore. 1425 */ 1426 bp->b_blkno = bp->b_lblkno = blkno; 1427 bgetvp(vp, bp); 1428 LIST_REMOVE(bp, b_hash); 1429 bh = BUFHASH(vp, blkno); 1430 LIST_INSERT_HEAD(bh, bp, b_hash); 1431 1432 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) { 1433 bp->b_flags |= (B_VMIO | B_CACHE); 1434#if defined(VFS_BIO_DEBUG) 1435 if (vp->v_type != VREG && vp->v_type != VBLK) 1436 printf("getblk: vmioing file type %d???\n", vp->v_type); 1437#endif 1438 } else { 1439 bp->b_flags &= ~B_VMIO; 1440 } 1441 1442 allocbuf(bp, size); 1443 1444 splx(s); 1445#ifdef PC98 1446 /* 1447 * 1024byte/sector support 1448 */ 1449#define B_XXX2 0x8000000 1450 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1451#endif 1452 return (bp); 1453 } 1454} 1455 1456/* 1457 * Get an empty, disassociated buffer of given size. 1458 */ 1459struct buf * 1460geteblk(int size) 1461{ 1462 struct buf *bp; 1463 int s; 1464 1465 s = splbio(); 1466 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1467 splx(s); 1468 allocbuf(bp, size); 1469 bp->b_flags |= B_INVAL; 1470 return (bp); 1471} 1472 1473 1474/* 1475 * This code constitutes the buffer memory from either anonymous system 1476 * memory (in the case of non-VMIO operations) or from an associated 1477 * VM object (in the case of VMIO operations). 1478 * 1479 * Note that this code is tricky, and has many complications to resolve 1480 * deadlock or inconsistant data situations. Tread lightly!!! 1481 * 1482 * Modify the length of a buffer's underlying buffer storage without 1483 * destroying information (unless, of course the buffer is shrinking). 1484 */ 1485int 1486allocbuf(struct buf * bp, int size) 1487{ 1488 1489 int s; 1490 int newbsize, mbsize; 1491 int i; 1492 1493#if !defined(MAX_PERF) 1494 if (!(bp->b_flags & B_BUSY)) 1495 panic("allocbuf: buffer not busy"); 1496 1497 if (bp->b_kvasize < size) 1498 panic("allocbuf: buffer too small"); 1499#endif 1500 1501 if ((bp->b_flags & B_VMIO) == 0) { 1502 caddr_t origbuf; 1503 int origbufsize; 1504 /* 1505 * Just get anonymous memory from the kernel 1506 */ 1507 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1508#if !defined(NO_B_MALLOC) 1509 if (bp->b_flags & B_MALLOC) 1510 newbsize = mbsize; 1511 else 1512#endif 1513 newbsize = round_page(size); 1514 1515 if (newbsize < bp->b_bufsize) { 1516#if !defined(NO_B_MALLOC) 1517 /* 1518 * malloced buffers are not shrunk 1519 */ 1520 if (bp->b_flags & B_MALLOC) { 1521 if (newbsize) { 1522 bp->b_bcount = size; 1523 } else { 1524 free(bp->b_data, M_BIOBUF); 1525 bufspace -= bp->b_bufsize; 1526 bufmallocspace -= bp->b_bufsize; 1527 bp->b_data = bp->b_kvabase; 1528 bp->b_bufsize = 0; 1529 bp->b_bcount = 0; 1530 bp->b_flags &= ~B_MALLOC; 1531 } 1532 return 1; 1533 } 1534#endif 1535 vm_hold_free_pages( 1536 bp, 1537 (vm_offset_t) bp->b_data + newbsize, 1538 (vm_offset_t) bp->b_data + bp->b_bufsize); 1539 } else if (newbsize > bp->b_bufsize) { 1540#if !defined(NO_B_MALLOC) 1541 /* 1542 * We only use malloced memory on the first allocation. 1543 * and revert to page-allocated memory when the buffer grows. 1544 */ 1545 if ( (bufmallocspace < maxbufmallocspace) && 1546 (bp->b_bufsize == 0) && 1547 (mbsize <= PAGE_SIZE/2)) { 1548 1549 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1550 bp->b_bufsize = mbsize; 1551 bp->b_bcount = size; 1552 bp->b_flags |= B_MALLOC; 1553 bufspace += mbsize; 1554 bufmallocspace += mbsize; 1555 return 1; 1556 } 1557#endif 1558 origbuf = NULL; 1559 origbufsize = 0; 1560#if !defined(NO_B_MALLOC) 1561 /* 1562 * If the buffer is growing on it's other-than-first allocation, 1563 * then we revert to the page-allocation scheme. 1564 */ 1565 if (bp->b_flags & B_MALLOC) { 1566 origbuf = bp->b_data; 1567 origbufsize = bp->b_bufsize; 1568 bp->b_data = bp->b_kvabase; 1569 bufspace -= bp->b_bufsize; 1570 bufmallocspace -= bp->b_bufsize; 1571 bp->b_bufsize = 0; 1572 bp->b_flags &= ~B_MALLOC; 1573 newbsize = round_page(newbsize); 1574 } 1575#endif 1576 vm_hold_load_pages( 1577 bp, 1578 (vm_offset_t) bp->b_data + bp->b_bufsize, 1579 (vm_offset_t) bp->b_data + newbsize); 1580#if !defined(NO_B_MALLOC) 1581 if (origbuf) { 1582 bcopy(origbuf, bp->b_data, origbufsize); 1583 free(origbuf, M_BIOBUF); 1584 } 1585#endif 1586 } 1587 } else { 1588 vm_page_t m; 1589 int desiredpages; 1590 1591 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1592 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1593 1594#if !defined(NO_B_MALLOC) 1595 if (bp->b_flags & B_MALLOC) 1596 panic("allocbuf: VMIO buffer can't be malloced"); 1597#endif 1598 1599 if (newbsize < bp->b_bufsize) { 1600 if (desiredpages < bp->b_npages) { 1601 for (i = desiredpages; i < bp->b_npages; i++) { 1602 /* 1603 * the page is not freed here -- it 1604 * is the responsibility of vnode_pager_setsize 1605 */ 1606 m = bp->b_pages[i]; 1607#if defined(DIAGNOSTIC) 1608 if (m == bogus_page) 1609 panic("allocbuf: bogus page found"); 1610#endif 1611 vm_page_sleep(m, "biodep", &m->busy); 1612 1613 bp->b_pages[i] = NULL; 1614 vm_page_unwire(m); 1615 } 1616 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1617 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1618 bp->b_npages = desiredpages; 1619 } 1620 } else if (newbsize > bp->b_bufsize) { 1621 vm_object_t obj; 1622 vm_offset_t tinc, toff; 1623 vm_ooffset_t off; 1624 vm_pindex_t objoff; 1625 int pageindex, curbpnpages; 1626 struct vnode *vp; 1627 int bsize; 1628 int orig_validoff = bp->b_validoff; 1629 int orig_validend = bp->b_validend; 1630 1631 vp = bp->b_vp; 1632 1633 if (vp->v_type == VBLK) 1634 bsize = DEV_BSIZE; 1635 else 1636 bsize = vp->v_mount->mnt_stat.f_iosize; 1637 1638 if (bp->b_npages < desiredpages) { 1639 obj = vp->v_object; 1640 tinc = PAGE_SIZE; 1641 if (tinc > bsize) 1642 tinc = bsize; 1643 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1644 curbpnpages = bp->b_npages; 1645 doretry: 1646 bp->b_validoff = orig_validoff; 1647 bp->b_validend = orig_validend; 1648 bp->b_flags |= B_CACHE; 1649 for (toff = 0; toff < newbsize; toff += tinc) { 1650 int bytesinpage; 1651 1652 pageindex = toff >> PAGE_SHIFT; 1653 objoff = OFF_TO_IDX(off + toff); 1654 if (pageindex < curbpnpages) { 1655 1656 m = bp->b_pages[pageindex]; 1657#ifdef VFS_BIO_DIAG 1658 if (m->pindex != objoff) 1659 panic("allocbuf: page changed offset??!!!?"); 1660#endif 1661 bytesinpage = tinc; 1662 if (tinc > (newbsize - toff)) 1663 bytesinpage = newbsize - toff; 1664 if (bp->b_flags & B_CACHE) 1665 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1666 continue; 1667 } 1668 m = vm_page_lookup(obj, objoff); 1669 if (!m) { 1670 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1671 if (!m) { 1672 VM_WAIT; 1673 vm_pageout_deficit += (desiredpages - bp->b_npages); 1674 goto doretry; 1675 } 1676 1677 vm_page_wire(m); 1678 m->flags &= ~PG_BUSY; 1679 bp->b_flags &= ~B_CACHE; 1680 1681 } else if (m->flags & PG_BUSY) { 1682 s = splvm(); 1683 if (m->flags & PG_BUSY) { 1684 m->flags |= PG_WANTED; 1685 tsleep(m, PVM, "pgtblk", 0); 1686 } 1687 splx(s); 1688 goto doretry; 1689 } else { 1690 if ((curproc != pageproc) && 1691 ((m->queue - m->pc) == PQ_CACHE) && 1692 ((cnt.v_free_count + cnt.v_cache_count) < 1693 (cnt.v_free_min + cnt.v_cache_min))) { 1694 pagedaemon_wakeup(); 1695 } 1696 bytesinpage = tinc; 1697 if (tinc > (newbsize - toff)) 1698 bytesinpage = newbsize - toff; 1699 if (bp->b_flags & B_CACHE) 1700 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1701 vm_page_wire(m); 1702 } 1703 bp->b_pages[pageindex] = m; 1704 curbpnpages = pageindex + 1; 1705 } 1706 if (vp->v_tag == VT_NFS && 1707 vp->v_type != VBLK) { 1708 if (bp->b_dirtyend > 0) { 1709 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1710 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1711 } 1712 if (bp->b_validend == 0) 1713 bp->b_flags &= ~B_CACHE; 1714 } 1715 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1716 bp->b_npages = curbpnpages; 1717 pmap_qenter((vm_offset_t) bp->b_data, 1718 bp->b_pages, bp->b_npages); 1719 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1720 } 1721 } 1722 } 1723 if (bp->b_flags & B_VMIO) 1724 vmiospace += (newbsize - bp->b_bufsize); 1725 bufspace += (newbsize - bp->b_bufsize); 1726 bp->b_bufsize = newbsize; 1727 bp->b_bcount = size; 1728 return 1; 1729} 1730 1731/* 1732 * Wait for buffer I/O completion, returning error status. 1733 */ 1734int 1735biowait(register struct buf * bp) 1736{ 1737 int s; 1738 1739 s = splbio(); 1740 while ((bp->b_flags & B_DONE) == 0) 1741#if defined(NO_SCHEDULE_MODS) 1742 tsleep(bp, PRIBIO, "biowait", 0); 1743#else 1744 if (bp->b_flags & B_READ) 1745 tsleep(bp, PRIBIO, "biord", 0); 1746 else 1747 tsleep(bp, curproc->p_usrpri, "biowr", 0); 1748#endif 1749 splx(s); 1750 if (bp->b_flags & B_EINTR) { 1751 bp->b_flags &= ~B_EINTR; 1752 return (EINTR); 1753 } 1754 if (bp->b_flags & B_ERROR) { 1755 return (bp->b_error ? bp->b_error : EIO); 1756 } else { 1757 return (0); 1758 } 1759} 1760 1761/* 1762 * Finish I/O on a buffer, calling an optional function. 1763 * This is usually called from interrupt level, so process blocking 1764 * is not *a good idea*. 1765 */ 1766void 1767biodone(register struct buf * bp) 1768{ 1769 int s; 1770 1771 s = splbio(); 1772 1773#if !defined(MAX_PERF) 1774 if (!(bp->b_flags & B_BUSY)) 1775 panic("biodone: buffer not busy"); 1776#endif 1777 1778 if (bp->b_flags & B_DONE) { 1779 splx(s); 1780#if !defined(MAX_PERF) 1781 printf("biodone: buffer already done\n"); 1782#endif 1783 return; 1784 } 1785 bp->b_flags |= B_DONE; 1786 1787 if ((bp->b_flags & B_READ) == 0) { 1788 vwakeup(bp); 1789 } 1790#ifdef BOUNCE_BUFFERS 1791 if (bp->b_flags & B_BOUNCE) 1792 vm_bounce_free(bp); 1793#endif 1794 1795 /* call optional completion function if requested */ 1796 if (bp->b_flags & B_CALL) { 1797 bp->b_flags &= ~B_CALL; 1798 (*bp->b_iodone) (bp); 1799 splx(s); 1800 return; 1801 } 1802 if (bp->b_flags & B_VMIO) { 1803 int i, resid; 1804 vm_ooffset_t foff; 1805 vm_page_t m; 1806 vm_object_t obj; 1807 int iosize; 1808 struct vnode *vp = bp->b_vp; 1809 1810 obj = vp->v_object; 1811 1812#if defined(VFS_BIO_DEBUG) 1813 if (vp->v_usecount == 0) { 1814 panic("biodone: zero vnode ref count"); 1815 } 1816 1817 if (vp->v_object == NULL) { 1818 panic("biodone: missing VM object"); 1819 } 1820 1821 if ((vp->v_flag & VOBJBUF) == 0) { 1822 panic("biodone: vnode is not setup for merged cache"); 1823 } 1824#endif 1825 1826 if (vp->v_type == VBLK) 1827 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1828 else 1829 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1830#if !defined(MAX_PERF) 1831 if (!obj) { 1832 panic("biodone: no object"); 1833 } 1834#endif 1835#if defined(VFS_BIO_DEBUG) 1836 if (obj->paging_in_progress < bp->b_npages) { 1837 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1838 obj->paging_in_progress, bp->b_npages); 1839 } 1840#endif 1841 iosize = bp->b_bufsize; 1842 for (i = 0; i < bp->b_npages; i++) { 1843 int bogusflag = 0; 1844 m = bp->b_pages[i]; 1845 if (m == bogus_page) { 1846 bogusflag = 1; 1847 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1848 if (!m) { 1849#if defined(VFS_BIO_DEBUG) 1850 printf("biodone: page disappeared\n"); 1851#endif 1852 --obj->paging_in_progress; 1853 continue; 1854 } 1855 bp->b_pages[i] = m; 1856 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1857 } 1858#if defined(VFS_BIO_DEBUG) 1859 if (OFF_TO_IDX(foff) != m->pindex) { 1860 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1861 } 1862#endif 1863 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1864 if (resid > iosize) 1865 resid = iosize; 1866 /* 1867 * In the write case, the valid and clean bits are 1868 * already changed correctly, so we only need to do this 1869 * here in the read case. 1870 */ 1871 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1872 vfs_page_set_valid(bp, foff, i, m); 1873 } 1874 1875 /* 1876 * when debugging new filesystems or buffer I/O methods, this 1877 * is the most common error that pops up. if you see this, you 1878 * have not set the page busy flag correctly!!! 1879 */ 1880 if (m->busy == 0) { 1881#if !defined(MAX_PERF) 1882 printf("biodone: page busy < 0, " 1883 "pindex: %d, foff: 0x(%x,%x), " 1884 "resid: %d, index: %d\n", 1885 (int) m->pindex, (int)(foff >> 32), 1886 (int) foff & 0xffffffff, resid, i); 1887#endif 1888 if (vp->v_type != VBLK) 1889#if !defined(MAX_PERF) 1890 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1891 bp->b_vp->v_mount->mnt_stat.f_iosize, 1892 (int) bp->b_lblkno, 1893 bp->b_flags, bp->b_npages); 1894 else 1895 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1896 (int) bp->b_lblkno, 1897 bp->b_flags, bp->b_npages); 1898 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1899 m->valid, m->dirty, m->wire_count); 1900#endif 1901 panic("biodone: page busy < 0\n"); 1902 } 1903 PAGE_BWAKEUP(m); 1904 --obj->paging_in_progress; 1905 foff += resid; 1906 iosize -= resid; 1907 } 1908 if (obj && 1909 (obj->paging_in_progress == 0) && 1910 (obj->flags & OBJ_PIPWNT)) { 1911 obj->flags &= ~OBJ_PIPWNT; 1912 wakeup(obj); 1913 } 1914 } 1915 /* 1916 * For asynchronous completions, release the buffer now. The brelse 1917 * checks for B_WANTED and will do the wakeup there if necessary - so 1918 * no need to do a wakeup here in the async case. 1919 */ 1920 1921 if (bp->b_flags & B_ASYNC) { 1922 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1923 brelse(bp); 1924 else 1925 bqrelse(bp); 1926 } else { 1927 bp->b_flags &= ~B_WANTED; 1928 wakeup(bp); 1929 } 1930 splx(s); 1931} 1932 1933static int 1934count_lock_queue() 1935{ 1936 int count; 1937 struct buf *bp; 1938 1939 count = 0; 1940 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1941 bp != NULL; 1942 bp = TAILQ_NEXT(bp, b_freelist)) 1943 count++; 1944 return (count); 1945} 1946 1947static int vfs_update_interval = 30; 1948 1949static void 1950vfs_update() 1951{ 1952 while (1) { 1953 tsleep(&vfs_update_wakeup, PUSER, "update", 1954 hz * vfs_update_interval); 1955 vfs_update_wakeup = 0; 1956 sync(curproc, NULL); 1957 } 1958} 1959 1960static int 1961sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1962{ 1963 int error = sysctl_handle_int(oidp, 1964 oidp->oid_arg1, oidp->oid_arg2, req); 1965 if (!error) 1966 wakeup(&vfs_update_wakeup); 1967 return error; 1968} 1969 1970SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1971 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1972 1973 1974/* 1975 * This routine is called in lieu of iodone in the case of 1976 * incomplete I/O. This keeps the busy status for pages 1977 * consistant. 1978 */ 1979void 1980vfs_unbusy_pages(struct buf * bp) 1981{ 1982 int i; 1983 1984 if (bp->b_flags & B_VMIO) { 1985 struct vnode *vp = bp->b_vp; 1986 vm_object_t obj = vp->v_object; 1987 vm_ooffset_t foff; 1988 1989 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1990 1991 for (i = 0; i < bp->b_npages; i++) { 1992 vm_page_t m = bp->b_pages[i]; 1993 1994 if (m == bogus_page) { 1995 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1996#if !defined(MAX_PERF) 1997 if (!m) { 1998 panic("vfs_unbusy_pages: page missing\n"); 1999 } 2000#endif 2001 bp->b_pages[i] = m; 2002 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2003 } 2004 --obj->paging_in_progress; 2005 PAGE_BWAKEUP(m); 2006 } 2007 if (obj->paging_in_progress == 0 && 2008 (obj->flags & OBJ_PIPWNT)) { 2009 obj->flags &= ~OBJ_PIPWNT; 2010 wakeup(obj); 2011 } 2012 } 2013} 2014 2015/* 2016 * Set NFS' b_validoff and b_validend fields from the valid bits 2017 * of a page. If the consumer is not NFS, and the page is not 2018 * valid for the entire range, clear the B_CACHE flag to force 2019 * the consumer to re-read the page. 2020 */ 2021static void 2022vfs_buf_set_valid(struct buf *bp, 2023 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2024 vm_page_t m) 2025{ 2026 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2027 vm_offset_t svalid, evalid; 2028 int validbits = m->valid; 2029 2030 /* 2031 * This only bothers with the first valid range in the 2032 * page. 2033 */ 2034 svalid = off; 2035 while (validbits && !(validbits & 1)) { 2036 svalid += DEV_BSIZE; 2037 validbits >>= 1; 2038 } 2039 evalid = svalid; 2040 while (validbits & 1) { 2041 evalid += DEV_BSIZE; 2042 validbits >>= 1; 2043 } 2044 /* 2045 * Make sure this range is contiguous with the range 2046 * built up from previous pages. If not, then we will 2047 * just use the range from the previous pages. 2048 */ 2049 if (svalid == bp->b_validend) { 2050 bp->b_validoff = min(bp->b_validoff, svalid); 2051 bp->b_validend = max(bp->b_validend, evalid); 2052 } 2053 } else if (!vm_page_is_valid(m, 2054 (vm_offset_t) ((foff + off) & PAGE_MASK), 2055 size)) { 2056 bp->b_flags &= ~B_CACHE; 2057 } 2058} 2059 2060/* 2061 * Set the valid bits in a page, taking care of the b_validoff, 2062 * b_validend fields which NFS uses to optimise small reads. Off is 2063 * the offset within the file and pageno is the page index within the buf. 2064 */ 2065static void 2066vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2067{ 2068 struct vnode *vp = bp->b_vp; 2069 vm_ooffset_t soff, eoff; 2070 2071 soff = off; 2072 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2073 vm_page_set_invalid(m, 2074 (vm_offset_t) (soff & PAGE_MASK), 2075 (vm_offset_t) (eoff - soff)); 2076 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2077 vm_ooffset_t sv, ev; 2078 off = off - pageno * PAGE_SIZE; 2079 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2080 ev = off + ((bp->b_validend + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2081 soff = max(sv, soff); 2082 eoff = min(ev, eoff); 2083 } 2084 if (eoff > soff) 2085 vm_page_set_validclean(m, 2086 (vm_offset_t) (soff & PAGE_MASK), 2087 (vm_offset_t) (eoff - soff)); 2088} 2089 2090/* 2091 * This routine is called before a device strategy routine. 2092 * It is used to tell the VM system that paging I/O is in 2093 * progress, and treat the pages associated with the buffer 2094 * almost as being PG_BUSY. Also the object paging_in_progress 2095 * flag is handled to make sure that the object doesn't become 2096 * inconsistant. 2097 */ 2098void 2099vfs_busy_pages(struct buf * bp, int clear_modify) 2100{ 2101 int i,s; 2102 2103 if (bp->b_flags & B_VMIO) { 2104 struct vnode *vp = bp->b_vp; 2105 vm_object_t obj = vp->v_object; 2106 vm_ooffset_t foff; 2107 2108 if (vp->v_type == VBLK) 2109 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2110 else 2111 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2112 2113 vfs_setdirty(bp); 2114 2115retry: 2116 for (i = 0; i < bp->b_npages; i++) { 2117 vm_page_t m = bp->b_pages[i]; 2118 if (vm_page_sleep(m, "vbpage", NULL)) 2119 goto retry; 2120 } 2121 2122 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2123 vm_page_t m = bp->b_pages[i]; 2124 2125 if ((bp->b_flags & B_CLUSTER) == 0) { 2126 obj->paging_in_progress++; 2127 m->busy++; 2128 } 2129 2130 vm_page_protect(m, VM_PROT_NONE); 2131 if (clear_modify) 2132 vfs_page_set_valid(bp, foff, i, m); 2133 else if (bp->b_bcount >= PAGE_SIZE) { 2134 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2135 bp->b_pages[i] = bogus_page; 2136 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2137 } 2138 } 2139 } 2140 } 2141} 2142 2143/* 2144 * Tell the VM system that the pages associated with this buffer 2145 * are clean. This is used for delayed writes where the data is 2146 * going to go to disk eventually without additional VM intevention. 2147 */ 2148void 2149vfs_clean_pages(struct buf * bp) 2150{ 2151 int i; 2152 2153 if (bp->b_flags & B_VMIO) { 2154 struct vnode *vp = bp->b_vp; 2155 vm_ooffset_t foff; 2156 2157 if (vp->v_type == VBLK) 2158 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2159 else 2160 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2161 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2162 vm_page_t m = bp->b_pages[i]; 2163 2164 vfs_page_set_valid(bp, foff, i, m); 2165 } 2166 } 2167} 2168 2169void 2170vfs_bio_clrbuf(struct buf *bp) { 2171 int i; 2172 if( bp->b_flags & B_VMIO) { 2173 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2174 int mask; 2175 mask = 0; 2176 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2177 mask |= (1 << (i/DEV_BSIZE)); 2178 if( bp->b_pages[0]->valid != mask) { 2179 bzero(bp->b_data, bp->b_bufsize); 2180 } 2181 bp->b_pages[0]->valid = mask; 2182 bp->b_resid = 0; 2183 return; 2184 } 2185 for(i=0;i<bp->b_npages;i++) { 2186 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2187 continue; 2188 if( bp->b_pages[i]->valid == 0) { 2189 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2190 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2191 } 2192 } else { 2193 int j; 2194 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2195 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 2196 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2197 } 2198 } 2199 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 2200 } 2201 bp->b_resid = 0; 2202 } else { 2203 clrbuf(bp); 2204 } 2205} 2206 2207/* 2208 * vm_hold_load_pages and vm_hold_unload pages get pages into 2209 * a buffers address space. The pages are anonymous and are 2210 * not associated with a file object. 2211 */ 2212void 2213vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2214{ 2215 vm_offset_t pg; 2216 vm_page_t p; 2217 int index; 2218 2219 to = round_page(to); 2220 from = round_page(from); 2221 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2222 2223 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2224 2225tryagain: 2226 2227 p = vm_page_alloc(kernel_object, 2228 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2229 VM_ALLOC_NORMAL); 2230 if (!p) { 2231 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2232 VM_WAIT; 2233 goto tryagain; 2234 } 2235 vm_page_wire(p); 2236 p->valid = VM_PAGE_BITS_ALL; 2237 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2238 bp->b_pages[index] = p; 2239 PAGE_WAKEUP(p); 2240 } 2241 bp->b_npages = index; 2242} 2243 2244void 2245vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2246{ 2247 vm_offset_t pg; 2248 vm_page_t p; 2249 int index, newnpages; 2250 2251 from = round_page(from); 2252 to = round_page(to); 2253 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2254 2255 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2256 p = bp->b_pages[index]; 2257 if (p && (index < bp->b_npages)) { 2258#if !defined(MAX_PERF) 2259 if (p->busy) { 2260 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2261 bp->b_blkno, bp->b_lblkno); 2262 } 2263#endif 2264 bp->b_pages[index] = NULL; 2265 pmap_kremove(pg); 2266 p->flags |= PG_BUSY; 2267 vm_page_unwire(p); 2268 vm_page_free(p); 2269 } 2270 } 2271 bp->b_npages = newnpages; 2272} 2273 2274 2275#include "opt_ddb.h" 2276#ifdef DDB 2277#include <ddb/ddb.h> 2278 2279DB_SHOW_COMMAND(buffer, db_show_buffer) 2280{ 2281 /* get args */ 2282 struct buf *bp = (struct buf *)addr; 2283 2284 if (!have_addr) { 2285 db_printf("usage: show buffer <addr>\n"); 2286 return; 2287 } 2288 2289 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2290 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2291 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2292 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2293 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2294 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2295 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2296 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2297 "b_blkno = %d, b_pblkno = %d\n", 2298 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2299 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2300 if (bp->b_npages) { 2301 int i; 2302 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2303 for (i = 0; i < bp->b_npages; i++) { 2304 vm_page_t m; 2305 m = bp->b_pages[i]; 2306 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2307 VM_PAGE_TO_PHYS(m)); 2308 if ((i + 1) < bp->b_npages) 2309 db_printf(","); 2310 } 2311 db_printf("\n"); 2312 } 2313} 2314#endif /* DDB */ 2315