vfs_bio.c revision 26599
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.119 1997/06/06 09:04:28 dfr Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <vm/vm_map.h> 55#include <sys/buf.h> 56#include <sys/mount.h> 57#include <sys/malloc.h> 58#include <sys/resourcevar.h> 59#include <sys/proc.h> 60 61#include <miscfs/specfs/specdev.h> 62 63static void vfs_update __P((void)); 64static struct proc *updateproc; 65static struct kproc_desc up_kp = { 66 "update", 67 vfs_update, 68 &updateproc 69}; 70SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 71 72struct buf *buf; /* buffer header pool */ 73struct swqueue bswlist; 74 75int count_lock_queue __P((void)); 76static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 77 vm_offset_t to); 78static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 79 vm_offset_t to); 80static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 81 vm_offset_t off, vm_offset_t size, 82 vm_page_t m); 83static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 84 int pageno, vm_page_t m); 85static void vfs_clean_pages(struct buf * bp); 86static void vfs_setdirty(struct buf *bp); 87static void vfs_vmio_release(struct buf *bp); 88 89int needsbuffer; 90 91/* 92 * Internal update daemon, process 3 93 * The variable vfs_update_wakeup allows for internal syncs. 94 */ 95int vfs_update_wakeup; 96 97 98/* 99 * buffers base kva 100 */ 101 102/* 103 * bogus page -- for I/O to/from partially complete buffers 104 * this is a temporary solution to the problem, but it is not 105 * really that bad. it would be better to split the buffer 106 * for input in the case of buffers partially already in memory, 107 * but the code is intricate enough already. 108 */ 109vm_page_t bogus_page; 110static vm_offset_t bogus_offset; 111 112static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 113 bufmallocspace, maxbufmallocspace; 114 115static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 116static struct bqueues bufqueues[BUFFER_QUEUES]; 117 118extern int vm_swap_size; 119 120#define BUF_MAXUSE 16 121 122/* 123 * Initialize buffer headers and related structures. 124 */ 125void 126bufinit() 127{ 128 struct buf *bp; 129 int i; 130 131 TAILQ_INIT(&bswlist); 132 LIST_INIT(&invalhash); 133 134 /* first, make a null hash table */ 135 for (i = 0; i < BUFHSZ; i++) 136 LIST_INIT(&bufhashtbl[i]); 137 138 /* next, make a null set of free lists */ 139 for (i = 0; i < BUFFER_QUEUES; i++) 140 TAILQ_INIT(&bufqueues[i]); 141 142 /* finally, initialize each buffer header and stick on empty q */ 143 for (i = 0; i < nbuf; i++) { 144 bp = &buf[i]; 145 bzero(bp, sizeof *bp); 146 bp->b_flags = B_INVAL; /* we're just an empty header */ 147 bp->b_dev = NODEV; 148 bp->b_rcred = NOCRED; 149 bp->b_wcred = NOCRED; 150 bp->b_qindex = QUEUE_EMPTY; 151 bp->b_vnbufs.le_next = NOLIST; 152 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 153 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 154 } 155/* 156 * maxbufspace is currently calculated to support all filesystem blocks 157 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 158 * cache is still the same as it would be for 8K filesystems. This 159 * keeps the size of the buffer cache "in check" for big block filesystems. 160 */ 161 maxbufspace = (nbuf + 8) * DFLTBSIZE; 162/* 163 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 164 */ 165 maxvmiobufspace = 2 * maxbufspace / 3; 166/* 167 * Limit the amount of malloc memory since it is wired permanently into 168 * the kernel space. Even though this is accounted for in the buffer 169 * allocation, we don't want the malloced region to grow uncontrolled. 170 * The malloc scheme improves memory utilization significantly on average 171 * (small) directories. 172 */ 173 maxbufmallocspace = maxbufspace / 20; 174 175 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 176 bogus_page = vm_page_alloc(kernel_object, 177 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 178 VM_ALLOC_NORMAL); 179 180} 181 182/* 183 * Free the kva allocation for a buffer 184 * Must be called only at splbio or higher, 185 * as this is the only locking for buffer_map. 186 */ 187static void 188bfreekva(struct buf * bp) 189{ 190 if (bp->b_kvasize == 0) 191 return; 192 193 vm_map_delete(buffer_map, 194 (vm_offset_t) bp->b_kvabase, 195 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 196 197 bp->b_kvasize = 0; 198 199} 200 201/* 202 * remove the buffer from the appropriate free list 203 */ 204void 205bremfree(struct buf * bp) 206{ 207 int s = splbio(); 208 209 if (bp->b_qindex != QUEUE_NONE) { 210 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 211 bp->b_qindex = QUEUE_NONE; 212 } else { 213 panic("bremfree: removing a buffer when not on a queue"); 214 } 215 splx(s); 216} 217 218/* 219 * Get a buffer with the specified data. Look in the cache first. 220 */ 221int 222bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 223 struct buf ** bpp) 224{ 225 struct buf *bp; 226 227 bp = getblk(vp, blkno, size, 0, 0); 228 *bpp = bp; 229 230 /* if not found in cache, do some I/O */ 231 if ((bp->b_flags & B_CACHE) == 0) { 232 if (curproc != NULL) 233 curproc->p_stats->p_ru.ru_inblock++; 234 bp->b_flags |= B_READ; 235 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 236 if (bp->b_rcred == NOCRED) { 237 if (cred != NOCRED) 238 crhold(cred); 239 bp->b_rcred = cred; 240 } 241 vfs_busy_pages(bp, 0); 242 VOP_STRATEGY(bp); 243 return (biowait(bp)); 244 } 245 return (0); 246} 247 248/* 249 * Operates like bread, but also starts asynchronous I/O on 250 * read-ahead blocks. 251 */ 252int 253breadn(struct vnode * vp, daddr_t blkno, int size, 254 daddr_t * rablkno, int *rabsize, 255 int cnt, struct ucred * cred, struct buf ** bpp) 256{ 257 struct buf *bp, *rabp; 258 int i; 259 int rv = 0, readwait = 0; 260 261 *bpp = bp = getblk(vp, blkno, size, 0, 0); 262 263 /* if not found in cache, do some I/O */ 264 if ((bp->b_flags & B_CACHE) == 0) { 265 if (curproc != NULL) 266 curproc->p_stats->p_ru.ru_inblock++; 267 bp->b_flags |= B_READ; 268 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 269 if (bp->b_rcred == NOCRED) { 270 if (cred != NOCRED) 271 crhold(cred); 272 bp->b_rcred = cred; 273 } 274 vfs_busy_pages(bp, 0); 275 VOP_STRATEGY(bp); 276 ++readwait; 277 } 278 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 279 if (inmem(vp, *rablkno)) 280 continue; 281 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 282 283 if ((rabp->b_flags & B_CACHE) == 0) { 284 if (curproc != NULL) 285 curproc->p_stats->p_ru.ru_inblock++; 286 rabp->b_flags |= B_READ | B_ASYNC; 287 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 288 if (rabp->b_rcred == NOCRED) { 289 if (cred != NOCRED) 290 crhold(cred); 291 rabp->b_rcred = cred; 292 } 293 vfs_busy_pages(rabp, 0); 294 VOP_STRATEGY(rabp); 295 } else { 296 brelse(rabp); 297 } 298 } 299 300 if (readwait) { 301 rv = biowait(bp); 302 } 303 return (rv); 304} 305 306/* 307 * Write, release buffer on completion. (Done by iodone 308 * if async.) 309 */ 310int 311bwrite(struct buf * bp) 312{ 313 int oldflags = bp->b_flags; 314 315 if (bp->b_flags & B_INVAL) { 316 brelse(bp); 317 return (0); 318 } 319 if (!(bp->b_flags & B_BUSY)) 320 panic("bwrite: buffer is not busy???"); 321 322 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 323 bp->b_flags |= B_WRITEINPROG; 324 325 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 326 reassignbuf(bp, bp->b_vp); 327 } 328 329 bp->b_vp->v_numoutput++; 330 vfs_busy_pages(bp, 1); 331 if (curproc != NULL) 332 curproc->p_stats->p_ru.ru_oublock++; 333 VOP_STRATEGY(bp); 334 335 /* 336 * Handle ordered writes here. 337 * If the write was originally flagged as ordered, 338 * then we check to see if it was converted to async. 339 * If it was converted to async, and is done now, then 340 * we release the buffer. Otherwise we clear the 341 * ordered flag because it is not needed anymore. 342 * 343 * Note that biodone has been modified so that it does 344 * not release ordered buffers. This allows us to have 345 * a chance to determine whether or not the driver 346 * has set the async flag in the strategy routine. Otherwise 347 * if biodone was not modified, then the buffer may have been 348 * reused before we have had a chance to check the flag. 349 */ 350 351 if ((oldflags & B_ORDERED) == B_ORDERED) { 352 int s; 353 s = splbio(); 354 if (bp->b_flags & B_ASYNC) { 355 if ((bp->b_flags & B_DONE)) { 356 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 357 brelse(bp); 358 else 359 bqrelse(bp); 360 } 361 splx(s); 362 return (0); 363 } else { 364 bp->b_flags &= ~B_ORDERED; 365 } 366 splx(s); 367 } 368 369 if ((oldflags & B_ASYNC) == 0) { 370 int rtval = biowait(bp); 371 372 if (oldflags & B_DELWRI) { 373 reassignbuf(bp, bp->b_vp); 374 } 375 brelse(bp); 376 return (rtval); 377 } 378 return (0); 379} 380 381int 382vn_bwrite(ap) 383 struct vop_bwrite_args *ap; 384{ 385 return (bwrite(ap->a_bp)); 386} 387 388/* 389 * Delayed write. (Buffer is marked dirty). 390 */ 391void 392bdwrite(struct buf * bp) 393{ 394 395 if ((bp->b_flags & B_BUSY) == 0) { 396 panic("bdwrite: buffer is not busy"); 397 } 398 if (bp->b_flags & B_INVAL) { 399 brelse(bp); 400 return; 401 } 402 if (bp->b_flags & B_TAPE) { 403 bawrite(bp); 404 return; 405 } 406 bp->b_flags &= ~(B_READ|B_RELBUF); 407 if ((bp->b_flags & B_DELWRI) == 0) { 408 bp->b_flags |= B_DONE | B_DELWRI; 409 reassignbuf(bp, bp->b_vp); 410 } 411 412 /* 413 * This bmap keeps the system from needing to do the bmap later, 414 * perhaps when the system is attempting to do a sync. Since it 415 * is likely that the indirect block -- or whatever other datastructure 416 * that the filesystem needs is still in memory now, it is a good 417 * thing to do this. Note also, that if the pageout daemon is 418 * requesting a sync -- there might not be enough memory to do 419 * the bmap then... So, this is important to do. 420 */ 421 if( bp->b_lblkno == bp->b_blkno) { 422 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 423 } 424 425 /* 426 * Set the *dirty* buffer range based upon the VM system dirty pages. 427 */ 428 vfs_setdirty(bp); 429 430 /* 431 * We need to do this here to satisfy the vnode_pager and the 432 * pageout daemon, so that it thinks that the pages have been 433 * "cleaned". Note that since the pages are in a delayed write 434 * buffer -- the VFS layer "will" see that the pages get written 435 * out on the next sync, or perhaps the cluster will be completed. 436 */ 437 vfs_clean_pages(bp); 438 bqrelse(bp); 439 return; 440} 441 442/* 443 * Asynchronous write. 444 * Start output on a buffer, but do not wait for it to complete. 445 * The buffer is released when the output completes. 446 */ 447void 448bawrite(struct buf * bp) 449{ 450 bp->b_flags |= B_ASYNC; 451 (void) VOP_BWRITE(bp); 452} 453 454/* 455 * Ordered write. 456 * Start output on a buffer, but only wait for it to complete if the 457 * output device cannot guarantee ordering in some other way. Devices 458 * that can perform asynchronous ordered writes will set the B_ASYNC 459 * flag in their strategy routine. 460 * The buffer is released when the output completes. 461 */ 462int 463bowrite(struct buf * bp) 464{ 465 bp->b_flags |= B_ORDERED; 466 return (VOP_BWRITE(bp)); 467} 468 469/* 470 * Release a buffer. 471 */ 472void 473brelse(struct buf * bp) 474{ 475 int s; 476 477 if (bp->b_flags & B_CLUSTER) { 478 relpbuf(bp); 479 return; 480 } 481 /* anyone need a "free" block? */ 482 s = splbio(); 483 484 /* anyone need this block? */ 485 if (bp->b_flags & B_WANTED) { 486 bp->b_flags &= ~(B_WANTED | B_AGE); 487 wakeup(bp); 488 } 489 490 if (bp->b_flags & B_LOCKED) 491 bp->b_flags &= ~B_ERROR; 492 493 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 494 (bp->b_bufsize <= 0)) { 495 bp->b_flags |= B_INVAL; 496 bp->b_flags &= ~(B_DELWRI | B_CACHE); 497 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 498 if (bp->b_bufsize) 499 allocbuf(bp, 0); 500 brelvp(bp); 501 } 502 } 503 504 /* 505 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 506 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 507 * but the VM object is kept around. The B_NOCACHE flag is used to 508 * invalidate the pages in the VM object. 509 * 510 * If the buffer is a partially filled NFS buffer, keep it 511 * since invalidating it now will lose informatio. The valid 512 * flags in the vm_pages have only DEV_BSIZE resolution but 513 * the b_validoff, b_validend fields have byte resolution. 514 * This can avoid unnecessary re-reads of the buffer. 515 * XXX this seems to cause performance problems. 516 */ 517 if ((bp->b_flags & B_VMIO) 518 && !(bp->b_vp->v_tag == VT_NFS && 519 (bp->b_flags & B_DELWRI) != 0) 520#ifdef notdef 521 && (bp->b_vp->v_tag != VT_NFS 522 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 523 || bp->b_validend == 0 524 || (bp->b_validoff == 0 525 && bp->b_validend == bp->b_bufsize)) 526#endif 527 ) { 528 vm_ooffset_t foff; 529 vm_object_t obj; 530 int i, resid; 531 vm_page_t m; 532 struct vnode *vp; 533 int iototal = bp->b_bufsize; 534 535 vp = bp->b_vp; 536 if (!vp) 537 panic("brelse: missing vp"); 538 539 if (bp->b_npages) { 540 vm_pindex_t poff; 541 obj = (vm_object_t) vp->v_object; 542 if (vp->v_type == VBLK) 543 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 544 else 545 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 546 poff = OFF_TO_IDX(foff); 547 for (i = 0; i < bp->b_npages; i++) { 548 m = bp->b_pages[i]; 549 if (m == bogus_page) { 550 m = vm_page_lookup(obj, poff + i); 551 if (!m) { 552 panic("brelse: page missing\n"); 553 } 554 bp->b_pages[i] = m; 555 pmap_qenter(trunc_page(bp->b_data), 556 bp->b_pages, bp->b_npages); 557 } 558 resid = IDX_TO_OFF(m->pindex+1) - foff; 559 if (resid > iototal) 560 resid = iototal; 561 if (resid > 0) { 562 /* 563 * Don't invalidate the page if the local machine has already 564 * modified it. This is the lesser of two evils, and should 565 * be fixed. 566 */ 567 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 568 vm_page_test_dirty(m); 569 if (m->dirty == 0) { 570 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 571 if (m->valid == 0) 572 vm_page_protect(m, VM_PROT_NONE); 573 } 574 } 575 if (resid >= PAGE_SIZE) { 576 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 577 bp->b_flags |= B_INVAL; 578 } 579 } else { 580 if (!vm_page_is_valid(m, 581 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 582 bp->b_flags |= B_INVAL; 583 } 584 } 585 } 586 foff += resid; 587 iototal -= resid; 588 } 589 } 590 if (bp->b_flags & (B_INVAL | B_RELBUF)) 591 vfs_vmio_release(bp); 592 } 593 if (bp->b_qindex != QUEUE_NONE) 594 panic("brelse: free buffer onto another queue???"); 595 596 /* enqueue */ 597 /* buffers with no memory */ 598 if (bp->b_bufsize == 0) { 599 bp->b_qindex = QUEUE_EMPTY; 600 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 601 LIST_REMOVE(bp, b_hash); 602 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 603 bp->b_dev = NODEV; 604 /* 605 * Get rid of the kva allocation *now* 606 */ 607 bfreekva(bp); 608 if (needsbuffer) { 609 wakeup(&needsbuffer); 610 needsbuffer=0; 611 } 612 /* buffers with junk contents */ 613 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 614 bp->b_qindex = QUEUE_AGE; 615 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 616 LIST_REMOVE(bp, b_hash); 617 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 618 bp->b_dev = NODEV; 619 if (needsbuffer) { 620 wakeup(&needsbuffer); 621 needsbuffer=0; 622 } 623 /* buffers that are locked */ 624 } else if (bp->b_flags & B_LOCKED) { 625 bp->b_qindex = QUEUE_LOCKED; 626 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 627 /* buffers with stale but valid contents */ 628 } else if (bp->b_flags & B_AGE) { 629 bp->b_qindex = QUEUE_AGE; 630 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 631 if (needsbuffer) { 632 wakeup(&needsbuffer); 633 needsbuffer=0; 634 } 635 /* buffers with valid and quite potentially reuseable contents */ 636 } else { 637 bp->b_qindex = QUEUE_LRU; 638 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 639 if (needsbuffer) { 640 wakeup(&needsbuffer); 641 needsbuffer=0; 642 } 643 } 644 645 /* unlock */ 646 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 647 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 648 splx(s); 649} 650 651/* 652 * Release a buffer. 653 */ 654void 655bqrelse(struct buf * bp) 656{ 657 int s; 658 659 s = splbio(); 660 661 662 /* anyone need this block? */ 663 if (bp->b_flags & B_WANTED) { 664 bp->b_flags &= ~(B_WANTED | B_AGE); 665 wakeup(bp); 666 } 667 668 if (bp->b_qindex != QUEUE_NONE) 669 panic("bqrelse: free buffer onto another queue???"); 670 671 if (bp->b_flags & B_LOCKED) { 672 bp->b_flags &= ~B_ERROR; 673 bp->b_qindex = QUEUE_LOCKED; 674 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 675 /* buffers with stale but valid contents */ 676 } else { 677 bp->b_qindex = QUEUE_LRU; 678 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 679 if (needsbuffer) { 680 wakeup(&needsbuffer); 681 needsbuffer=0; 682 } 683 } 684 685 /* unlock */ 686 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 687 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 688 splx(s); 689} 690 691static void 692vfs_vmio_release(bp) 693 struct buf *bp; 694{ 695 int i; 696 vm_page_t m; 697 698 for (i = 0; i < bp->b_npages; i++) { 699 m = bp->b_pages[i]; 700 bp->b_pages[i] = NULL; 701 vm_page_unwire(m); 702 /* 703 * We don't mess with busy pages, it is 704 * the responsibility of the process that 705 * busied the pages to deal with them. 706 */ 707 if ((m->flags & PG_BUSY) || (m->busy != 0)) 708 continue; 709 710 if (m->wire_count == 0) { 711 712 if (m->flags & PG_WANTED) { 713 m->flags &= ~PG_WANTED; 714 wakeup(m); 715 } 716 717 /* 718 * If this is an async free -- we cannot place 719 * pages onto the cache queue. If it is an 720 * async free, then we don't modify any queues. 721 * This is probably in error (for perf reasons), 722 * and we will eventually need to build 723 * a more complete infrastructure to support I/O 724 * rundown. 725 */ 726 if ((bp->b_flags & B_ASYNC) == 0) { 727 728 /* 729 * In the case of sync buffer frees, we can do pretty much 730 * anything to any of the memory queues. Specifically, 731 * the cache queue is okay to be modified. 732 */ 733 if (m->valid) { 734 if(m->dirty == 0) 735 vm_page_test_dirty(m); 736 /* 737 * this keeps pressure off of the process memory 738 */ 739 if (m->dirty == 0 && m->hold_count == 0) 740 vm_page_cache(m); 741 else 742 vm_page_deactivate(m); 743 } else if (m->hold_count == 0) { 744 vm_page_protect(m, VM_PROT_NONE); 745 vm_page_free(m); 746 } 747 } else { 748 /* 749 * If async, then at least we clear the 750 * act_count. 751 */ 752 m->act_count = 0; 753 } 754 } 755 } 756 bufspace -= bp->b_bufsize; 757 vmiospace -= bp->b_bufsize; 758 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 759 bp->b_npages = 0; 760 bp->b_bufsize = 0; 761 bp->b_flags &= ~B_VMIO; 762 if (bp->b_vp) 763 brelvp(bp); 764} 765 766/* 767 * Check to see if a block is currently memory resident. 768 */ 769struct buf * 770gbincore(struct vnode * vp, daddr_t blkno) 771{ 772 struct buf *bp; 773 struct bufhashhdr *bh; 774 775 bh = BUFHASH(vp, blkno); 776 bp = bh->lh_first; 777 778 /* Search hash chain */ 779 while (bp != NULL) { 780 /* hit */ 781 if (bp->b_vp == vp && bp->b_lblkno == blkno && 782 (bp->b_flags & B_INVAL) == 0) { 783 break; 784 } 785 bp = bp->b_hash.le_next; 786 } 787 return (bp); 788} 789 790/* 791 * this routine implements clustered async writes for 792 * clearing out B_DELWRI buffers... This is much better 793 * than the old way of writing only one buffer at a time. 794 */ 795int 796vfs_bio_awrite(struct buf * bp) 797{ 798 int i; 799 daddr_t lblkno = bp->b_lblkno; 800 struct vnode *vp = bp->b_vp; 801 int s; 802 int ncl; 803 struct buf *bpa; 804 int nwritten; 805 806 s = splbio(); 807 /* 808 * right now we support clustered writing only to regular files 809 */ 810 if ((vp->v_type == VREG) && 811 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 812 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 813 int size; 814 int maxcl; 815 816 size = vp->v_mount->mnt_stat.f_iosize; 817 maxcl = MAXPHYS / size; 818 819 for (i = 1; i < maxcl; i++) { 820 if ((bpa = gbincore(vp, lblkno + i)) && 821 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 822 (B_DELWRI | B_CLUSTEROK)) && 823 (bpa->b_bufsize == size)) { 824 if ((bpa->b_blkno == bpa->b_lblkno) || 825 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 826 break; 827 } else { 828 break; 829 } 830 } 831 ncl = i; 832 /* 833 * this is a possible cluster write 834 */ 835 if (ncl != 1) { 836 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 837 splx(s); 838 return nwritten; 839 } 840 } 841 bremfree(bp); 842 splx(s); 843 /* 844 * default (old) behavior, writing out only one block 845 */ 846 bp->b_flags |= B_BUSY | B_ASYNC; 847 nwritten = bp->b_bufsize; 848 (void) VOP_BWRITE(bp); 849 return nwritten; 850} 851 852 853/* 854 * Find a buffer header which is available for use. 855 */ 856static struct buf * 857getnewbuf(int slpflag, int slptimeo, int size, int maxsize) 858{ 859 struct buf *bp; 860 int nbyteswritten = 0; 861 vm_offset_t addr; 862 863start: 864 if (bufspace >= maxbufspace) 865 goto trytofreespace; 866 867 /* can we constitute a new buffer? */ 868 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 869 if (bp->b_qindex != QUEUE_EMPTY) 870 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 871 bp->b_qindex); 872 bp->b_flags |= B_BUSY; 873 bremfree(bp); 874 goto fillbuf; 875 } 876trytofreespace: 877 /* 878 * We keep the file I/O from hogging metadata I/O 879 * This is desirable because file data is cached in the 880 * VM/Buffer cache even if a buffer is freed. 881 */ 882 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 883 if (bp->b_qindex != QUEUE_AGE) 884 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 885 bp->b_qindex); 886 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 887 if (bp->b_qindex != QUEUE_LRU) 888 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 889 bp->b_qindex); 890 } 891 if (!bp) { 892 /* wait for a free buffer of any kind */ 893 needsbuffer = 1; 894 do 895 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 896 slptimeo); 897 while (needsbuffer); 898 return (0); 899 } 900 901#if defined(DIAGNOSTIC) 902 if (bp->b_flags & B_BUSY) { 903 panic("getnewbuf: busy buffer on free list\n"); 904 } 905#endif 906 907 /* 908 * We are fairly aggressive about freeing VMIO buffers, but since 909 * the buffering is intact without buffer headers, there is not 910 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 911 */ 912 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 913 if ((bp->b_flags & B_VMIO) == 0 || 914 (vmiospace < maxvmiobufspace)) { 915 --bp->b_usecount; 916 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 917 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 918 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 919 goto start; 920 } 921 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 922 } 923 } 924 925 /* if we are a delayed write, convert to an async write */ 926 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 927 nbyteswritten += vfs_bio_awrite(bp); 928 if (!slpflag && !slptimeo) { 929 return (0); 930 } 931 goto start; 932 } 933 934 if (bp->b_flags & B_WANTED) { 935 bp->b_flags &= ~B_WANTED; 936 wakeup(bp); 937 } 938 bremfree(bp); 939 bp->b_flags |= B_BUSY; 940 941 if (bp->b_flags & B_VMIO) { 942 bp->b_flags &= ~B_ASYNC; 943 vfs_vmio_release(bp); 944 } 945 946 if (bp->b_vp) 947 brelvp(bp); 948 949fillbuf: 950 /* we are not free, nor do we contain interesting data */ 951 if (bp->b_rcred != NOCRED) { 952 crfree(bp->b_rcred); 953 bp->b_rcred = NOCRED; 954 } 955 if (bp->b_wcred != NOCRED) { 956 crfree(bp->b_wcred); 957 bp->b_wcred = NOCRED; 958 } 959 960 LIST_REMOVE(bp, b_hash); 961 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 962 if (bp->b_bufsize) { 963 allocbuf(bp, 0); 964 } 965 bp->b_flags = B_BUSY; 966 bp->b_dev = NODEV; 967 bp->b_vp = NULL; 968 bp->b_blkno = bp->b_lblkno = 0; 969 bp->b_iodone = 0; 970 bp->b_error = 0; 971 bp->b_resid = 0; 972 bp->b_bcount = 0; 973 bp->b_npages = 0; 974 bp->b_dirtyoff = bp->b_dirtyend = 0; 975 bp->b_validoff = bp->b_validend = 0; 976 bp->b_usecount = 4; 977 978 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 979 980 /* 981 * we assume that buffer_map is not at address 0 982 */ 983 addr = 0; 984 if (maxsize != bp->b_kvasize) { 985 bfreekva(bp); 986 987 /* 988 * See if we have buffer kva space 989 */ 990 if (vm_map_findspace(buffer_map, 991 vm_map_min(buffer_map), maxsize, &addr)) { 992 bp->b_flags |= B_INVAL; 993 brelse(bp); 994 goto trytofreespace; 995 } 996 } 997 998 /* 999 * See if we are below are allocated minimum 1000 */ 1001 if (bufspace >= (maxbufspace + nbyteswritten)) { 1002 bp->b_flags |= B_INVAL; 1003 brelse(bp); 1004 goto trytofreespace; 1005 } 1006 1007 /* 1008 * create a map entry for the buffer -- in essence 1009 * reserving the kva space. 1010 */ 1011 if (addr) { 1012 vm_map_insert(buffer_map, NULL, 0, 1013 addr, addr + maxsize, 1014 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1015 1016 bp->b_kvabase = (caddr_t) addr; 1017 bp->b_kvasize = maxsize; 1018 } 1019 bp->b_data = bp->b_kvabase; 1020 1021 return (bp); 1022} 1023 1024/* 1025 * Check to see if a block is currently memory resident. 1026 */ 1027struct buf * 1028incore(struct vnode * vp, daddr_t blkno) 1029{ 1030 struct buf *bp; 1031 1032 int s = splbio(); 1033 bp = gbincore(vp, blkno); 1034 splx(s); 1035 return (bp); 1036} 1037 1038/* 1039 * Returns true if no I/O is needed to access the 1040 * associated VM object. This is like incore except 1041 * it also hunts around in the VM system for the data. 1042 */ 1043 1044int 1045inmem(struct vnode * vp, daddr_t blkno) 1046{ 1047 vm_object_t obj; 1048 vm_offset_t toff, tinc; 1049 vm_page_t m; 1050 vm_ooffset_t off; 1051 1052 if (incore(vp, blkno)) 1053 return 1; 1054 if (vp->v_mount == NULL) 1055 return 0; 1056 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 1057 return 0; 1058 1059 obj = vp->v_object; 1060 tinc = PAGE_SIZE; 1061 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1062 tinc = vp->v_mount->mnt_stat.f_iosize; 1063 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1064 1065 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1066 1067 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1068 if (!m) 1069 return 0; 1070 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1071 return 0; 1072 } 1073 return 1; 1074} 1075 1076/* 1077 * now we set the dirty range for the buffer -- 1078 * for NFS -- if the file is mapped and pages have 1079 * been written to, let it know. We want the 1080 * entire range of the buffer to be marked dirty if 1081 * any of the pages have been written to for consistancy 1082 * with the b_validoff, b_validend set in the nfs write 1083 * code, and used by the nfs read code. 1084 */ 1085static void 1086vfs_setdirty(struct buf *bp) { 1087 int i; 1088 vm_object_t object; 1089 vm_offset_t boffset, offset; 1090 /* 1091 * We qualify the scan for modified pages on whether the 1092 * object has been flushed yet. The OBJ_WRITEABLE flag 1093 * is not cleared simply by protecting pages off. 1094 */ 1095 if ((bp->b_flags & B_VMIO) && 1096 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1097 /* 1098 * test the pages to see if they have been modified directly 1099 * by users through the VM system. 1100 */ 1101 for (i = 0; i < bp->b_npages; i++) 1102 vm_page_test_dirty(bp->b_pages[i]); 1103 1104 /* 1105 * scan forwards for the first page modified 1106 */ 1107 for (i = 0; i < bp->b_npages; i++) { 1108 if (bp->b_pages[i]->dirty) { 1109 break; 1110 } 1111 } 1112 boffset = (i << PAGE_SHIFT); 1113 if (boffset < bp->b_dirtyoff) { 1114 bp->b_dirtyoff = boffset; 1115 } 1116 1117 /* 1118 * scan backwards for the last page modified 1119 */ 1120 for (i = bp->b_npages - 1; i >= 0; --i) { 1121 if (bp->b_pages[i]->dirty) { 1122 break; 1123 } 1124 } 1125 boffset = (i + 1); 1126 offset = boffset + bp->b_pages[0]->pindex; 1127 if (offset >= object->size) 1128 boffset = object->size - bp->b_pages[0]->pindex; 1129 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1130 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1131 } 1132} 1133 1134/* 1135 * Get a block given a specified block and offset into a file/device. 1136 */ 1137struct buf * 1138getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1139{ 1140 struct buf *bp; 1141 int s; 1142 struct bufhashhdr *bh; 1143 int maxsize; 1144 1145 if (vp->v_mount) { 1146 maxsize = vp->v_mount->mnt_stat.f_iosize; 1147 /* 1148 * This happens on mount points. 1149 */ 1150 if (maxsize < size) 1151 maxsize = size; 1152 } else { 1153 maxsize = size; 1154 } 1155 1156 if (size > MAXBSIZE) 1157 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1158 1159 s = splbio(); 1160loop: 1161 if ((bp = gbincore(vp, blkno))) { 1162 if (bp->b_flags & B_BUSY) { 1163 bp->b_flags |= B_WANTED; 1164 if (bp->b_usecount < BUF_MAXUSE) 1165 ++bp->b_usecount; 1166 if (!tsleep(bp, 1167 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1168 goto loop; 1169 1170 splx(s); 1171 return (struct buf *) NULL; 1172 } 1173 bp->b_flags |= B_BUSY | B_CACHE; 1174 bremfree(bp); 1175 1176 /* 1177 * check for size inconsistancies (note that they shouldn't happen 1178 * but do when filesystems don't handle the size changes correctly.) 1179 * We are conservative on metadata and don't just extend the buffer 1180 * but write and re-constitute it. 1181 */ 1182 1183 if (bp->b_bcount != size) { 1184 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1185 allocbuf(bp, size); 1186 } else { 1187 bp->b_flags |= B_NOCACHE; 1188 VOP_BWRITE(bp); 1189 goto loop; 1190 } 1191 } 1192 1193 if (bp->b_usecount < BUF_MAXUSE) 1194 ++bp->b_usecount; 1195 splx(s); 1196 return (bp); 1197 } else { 1198 vm_object_t obj; 1199 1200 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == 0) { 1201 if (slpflag || slptimeo) { 1202 splx(s); 1203 return NULL; 1204 } 1205 goto loop; 1206 } 1207 1208 /* 1209 * This code is used to make sure that a buffer is not 1210 * created while the getnewbuf routine is blocked. 1211 * Normally the vnode is locked so this isn't a problem. 1212 * VBLK type I/O requests, however, don't lock the vnode. 1213 */ 1214 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1215 bp->b_flags |= B_INVAL; 1216 brelse(bp); 1217 goto loop; 1218 } 1219 1220 /* 1221 * Insert the buffer into the hash, so that it can 1222 * be found by incore. 1223 */ 1224 bp->b_blkno = bp->b_lblkno = blkno; 1225 bgetvp(vp, bp); 1226 LIST_REMOVE(bp, b_hash); 1227 bh = BUFHASH(vp, blkno); 1228 LIST_INSERT_HEAD(bh, bp, b_hash); 1229 1230 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1231 bp->b_flags |= (B_VMIO | B_CACHE); 1232#if defined(VFS_BIO_DEBUG) 1233 if (vp->v_type != VREG && vp->v_type != VBLK) 1234 printf("getblk: vmioing file type %d???\n", vp->v_type); 1235#endif 1236 } else { 1237 bp->b_flags &= ~B_VMIO; 1238 } 1239 splx(s); 1240 1241 allocbuf(bp, size); 1242#ifdef PC98 1243 /* 1244 * 1024byte/sector support 1245 */ 1246#define B_XXX2 0x8000000 1247 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1248#endif 1249 return (bp); 1250 } 1251} 1252 1253/* 1254 * Get an empty, disassociated buffer of given size. 1255 */ 1256struct buf * 1257geteblk(int size) 1258{ 1259 struct buf *bp; 1260 int s; 1261 1262 s = splbio(); 1263 while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0); 1264 splx(s); 1265 allocbuf(bp, size); 1266 bp->b_flags |= B_INVAL; 1267 return (bp); 1268} 1269 1270 1271/* 1272 * This code constitutes the buffer memory from either anonymous system 1273 * memory (in the case of non-VMIO operations) or from an associated 1274 * VM object (in the case of VMIO operations). 1275 * 1276 * Note that this code is tricky, and has many complications to resolve 1277 * deadlock or inconsistant data situations. Tread lightly!!! 1278 * 1279 * Modify the length of a buffer's underlying buffer storage without 1280 * destroying information (unless, of course the buffer is shrinking). 1281 */ 1282int 1283allocbuf(struct buf * bp, int size) 1284{ 1285 1286 int s; 1287 int newbsize, mbsize; 1288 int i; 1289 1290 if (!(bp->b_flags & B_BUSY)) 1291 panic("allocbuf: buffer not busy"); 1292 1293 if (bp->b_kvasize < size) 1294 panic("allocbuf: buffer too small"); 1295 1296 if ((bp->b_flags & B_VMIO) == 0) { 1297 caddr_t origbuf; 1298 int origbufsize; 1299 /* 1300 * Just get anonymous memory from the kernel 1301 */ 1302 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1303#if !defined(NO_B_MALLOC) 1304 if (bp->b_flags & B_MALLOC) 1305 newbsize = mbsize; 1306 else 1307#endif 1308 newbsize = round_page(size); 1309 1310 if (newbsize < bp->b_bufsize) { 1311#if !defined(NO_B_MALLOC) 1312 /* 1313 * malloced buffers are not shrunk 1314 */ 1315 if (bp->b_flags & B_MALLOC) { 1316 if (newbsize) { 1317 bp->b_bcount = size; 1318 } else { 1319 free(bp->b_data, M_BIOBUF); 1320 bufspace -= bp->b_bufsize; 1321 bufmallocspace -= bp->b_bufsize; 1322 bp->b_data = bp->b_kvabase; 1323 bp->b_bufsize = 0; 1324 bp->b_bcount = 0; 1325 bp->b_flags &= ~B_MALLOC; 1326 } 1327 return 1; 1328 } 1329#endif 1330 vm_hold_free_pages( 1331 bp, 1332 (vm_offset_t) bp->b_data + newbsize, 1333 (vm_offset_t) bp->b_data + bp->b_bufsize); 1334 } else if (newbsize > bp->b_bufsize) { 1335#if !defined(NO_B_MALLOC) 1336 /* 1337 * We only use malloced memory on the first allocation. 1338 * and revert to page-allocated memory when the buffer grows. 1339 */ 1340 if ( (bufmallocspace < maxbufmallocspace) && 1341 (bp->b_bufsize == 0) && 1342 (mbsize <= PAGE_SIZE/2)) { 1343 1344 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1345 bp->b_bufsize = mbsize; 1346 bp->b_bcount = size; 1347 bp->b_flags |= B_MALLOC; 1348 bufspace += mbsize; 1349 bufmallocspace += mbsize; 1350 return 1; 1351 } 1352#endif 1353 origbuf = NULL; 1354 origbufsize = 0; 1355#if !defined(NO_B_MALLOC) 1356 /* 1357 * If the buffer is growing on it's other-than-first allocation, 1358 * then we revert to the page-allocation scheme. 1359 */ 1360 if (bp->b_flags & B_MALLOC) { 1361 origbuf = bp->b_data; 1362 origbufsize = bp->b_bufsize; 1363 bp->b_data = bp->b_kvabase; 1364 bufspace -= bp->b_bufsize; 1365 bufmallocspace -= bp->b_bufsize; 1366 bp->b_bufsize = 0; 1367 bp->b_flags &= ~B_MALLOC; 1368 newbsize = round_page(newbsize); 1369 } 1370#endif 1371 vm_hold_load_pages( 1372 bp, 1373 (vm_offset_t) bp->b_data + bp->b_bufsize, 1374 (vm_offset_t) bp->b_data + newbsize); 1375#if !defined(NO_B_MALLOC) 1376 if (origbuf) { 1377 bcopy(origbuf, bp->b_data, origbufsize); 1378 free(origbuf, M_BIOBUF); 1379 } 1380#endif 1381 } 1382 } else { 1383 vm_page_t m; 1384 int desiredpages; 1385 1386 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1387 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1388 1389#if !defined(NO_B_MALLOC) 1390 if (bp->b_flags & B_MALLOC) 1391 panic("allocbuf: VMIO buffer can't be malloced"); 1392#endif 1393 1394 if (newbsize < bp->b_bufsize) { 1395 if (desiredpages < bp->b_npages) { 1396 for (i = desiredpages; i < bp->b_npages; i++) { 1397 /* 1398 * the page is not freed here -- it 1399 * is the responsibility of vnode_pager_setsize 1400 */ 1401 m = bp->b_pages[i]; 1402#if defined(DIAGNOSTIC) 1403 if (m == bogus_page) 1404 panic("allocbuf: bogus page found"); 1405#endif 1406 s = splvm(); 1407 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1408 m->flags |= PG_WANTED; 1409 tsleep(m, PVM, "biodep", 0); 1410 } 1411 splx(s); 1412 1413 bp->b_pages[i] = NULL; 1414 vm_page_unwire(m); 1415 } 1416 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1417 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1418 bp->b_npages = desiredpages; 1419 } 1420 } else if (newbsize > bp->b_bufsize) { 1421 vm_object_t obj; 1422 vm_offset_t tinc, toff; 1423 vm_ooffset_t off; 1424 vm_pindex_t objoff; 1425 int pageindex, curbpnpages; 1426 struct vnode *vp; 1427 int bsize; 1428 1429 vp = bp->b_vp; 1430 1431 if (vp->v_type == VBLK) 1432 bsize = DEV_BSIZE; 1433 else 1434 bsize = vp->v_mount->mnt_stat.f_iosize; 1435 1436 if (bp->b_npages < desiredpages) { 1437 obj = vp->v_object; 1438 tinc = PAGE_SIZE; 1439 if (tinc > bsize) 1440 tinc = bsize; 1441 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1442 curbpnpages = bp->b_npages; 1443 doretry: 1444 bp->b_flags |= B_CACHE; 1445 bp->b_validoff = bp->b_validend = 0; 1446 for (toff = 0; toff < newbsize; toff += tinc) { 1447 int bytesinpage; 1448 1449 pageindex = toff >> PAGE_SHIFT; 1450 objoff = OFF_TO_IDX(off + toff); 1451 if (pageindex < curbpnpages) { 1452 1453 m = bp->b_pages[pageindex]; 1454#ifdef VFS_BIO_DIAG 1455 if (m->pindex != objoff) 1456 panic("allocbuf: page changed offset??!!!?"); 1457#endif 1458 bytesinpage = tinc; 1459 if (tinc > (newbsize - toff)) 1460 bytesinpage = newbsize - toff; 1461 if (bp->b_flags & B_CACHE) 1462 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1463 continue; 1464 } 1465 m = vm_page_lookup(obj, objoff); 1466 if (!m) { 1467 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1468 if (!m) { 1469 VM_WAIT; 1470 goto doretry; 1471 } 1472 /* 1473 * Normally it is unwise to clear PG_BUSY without 1474 * PAGE_WAKEUP -- but it is okay here, as there is 1475 * no chance for blocking between here and vm_page_alloc 1476 */ 1477 m->flags &= ~PG_BUSY; 1478 vm_page_wire(m); 1479 bp->b_flags &= ~B_CACHE; 1480 } else if (m->flags & PG_BUSY) { 1481 s = splvm(); 1482 if (m->flags & PG_BUSY) { 1483 m->flags |= PG_WANTED; 1484 tsleep(m, PVM, "pgtblk", 0); 1485 } 1486 splx(s); 1487 goto doretry; 1488 } else { 1489 if ((curproc != pageproc) && 1490 ((m->queue - m->pc) == PQ_CACHE) && 1491 ((cnt.v_free_count + cnt.v_cache_count) < 1492 (cnt.v_free_min + cnt.v_cache_min))) { 1493 pagedaemon_wakeup(); 1494 } 1495 bytesinpage = tinc; 1496 if (tinc > (newbsize - toff)) 1497 bytesinpage = newbsize - toff; 1498 if (bp->b_flags & B_CACHE) 1499 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1500 vm_page_wire(m); 1501 } 1502 bp->b_pages[pageindex] = m; 1503 curbpnpages = pageindex + 1; 1504 } 1505 if (vp->v_tag == VT_NFS) { 1506 if (bp->b_dirtyend > 0) { 1507 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1508 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1509 } 1510 if (bp->b_validend == 0) 1511 bp->b_flags &= ~B_CACHE; 1512 } 1513 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1514 bp->b_npages = curbpnpages; 1515 pmap_qenter((vm_offset_t) bp->b_data, 1516 bp->b_pages, bp->b_npages); 1517 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1518 } 1519 } 1520 } 1521 if (bp->b_flags & B_VMIO) 1522 vmiospace += bp->b_bufsize; 1523 bufspace += (newbsize - bp->b_bufsize); 1524 bp->b_bufsize = newbsize; 1525 bp->b_bcount = size; 1526 return 1; 1527} 1528 1529/* 1530 * Wait for buffer I/O completion, returning error status. 1531 */ 1532int 1533biowait(register struct buf * bp) 1534{ 1535 int s; 1536 1537 s = splbio(); 1538 while ((bp->b_flags & B_DONE) == 0) 1539 tsleep(bp, PRIBIO, "biowait", 0); 1540 splx(s); 1541 if (bp->b_flags & B_EINTR) { 1542 bp->b_flags &= ~B_EINTR; 1543 return (EINTR); 1544 } 1545 if (bp->b_flags & B_ERROR) { 1546 return (bp->b_error ? bp->b_error : EIO); 1547 } else { 1548 return (0); 1549 } 1550} 1551 1552/* 1553 * Finish I/O on a buffer, calling an optional function. 1554 * This is usually called from interrupt level, so process blocking 1555 * is not *a good idea*. 1556 */ 1557void 1558biodone(register struct buf * bp) 1559{ 1560 int s; 1561 1562 s = splbio(); 1563 if (!(bp->b_flags & B_BUSY)) 1564 panic("biodone: buffer not busy"); 1565 1566 if (bp->b_flags & B_DONE) { 1567 splx(s); 1568 printf("biodone: buffer already done\n"); 1569 return; 1570 } 1571 bp->b_flags |= B_DONE; 1572 1573 if ((bp->b_flags & B_READ) == 0) { 1574 vwakeup(bp); 1575 } 1576#ifdef BOUNCE_BUFFERS 1577 if (bp->b_flags & B_BOUNCE) 1578 vm_bounce_free(bp); 1579#endif 1580 1581 /* call optional completion function if requested */ 1582 if (bp->b_flags & B_CALL) { 1583 bp->b_flags &= ~B_CALL; 1584 (*bp->b_iodone) (bp); 1585 splx(s); 1586 return; 1587 } 1588 if (bp->b_flags & B_VMIO) { 1589 int i, resid; 1590 vm_ooffset_t foff; 1591 vm_page_t m; 1592 vm_object_t obj; 1593 int iosize; 1594 struct vnode *vp = bp->b_vp; 1595 1596 if (vp->v_type == VBLK) 1597 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1598 else 1599 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1600 obj = vp->v_object; 1601 if (!obj) { 1602 panic("biodone: no object"); 1603 } 1604#if defined(VFS_BIO_DEBUG) 1605 if (obj->paging_in_progress < bp->b_npages) { 1606 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1607 obj->paging_in_progress, bp->b_npages); 1608 } 1609#endif 1610 iosize = bp->b_bufsize; 1611 for (i = 0; i < bp->b_npages; i++) { 1612 int bogusflag = 0; 1613 m = bp->b_pages[i]; 1614 if (m == bogus_page) { 1615 bogusflag = 1; 1616 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1617 if (!m) { 1618#if defined(VFS_BIO_DEBUG) 1619 printf("biodone: page disappeared\n"); 1620#endif 1621 --obj->paging_in_progress; 1622 continue; 1623 } 1624 bp->b_pages[i] = m; 1625 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1626 } 1627#if defined(VFS_BIO_DEBUG) 1628 if (OFF_TO_IDX(foff) != m->pindex) { 1629 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1630 } 1631#endif 1632 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1633 if (resid > iosize) 1634 resid = iosize; 1635 /* 1636 * In the write case, the valid and clean bits are 1637 * already changed correctly, so we only need to do this 1638 * here in the read case. 1639 */ 1640 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1641 vfs_page_set_valid(bp, foff, i, m); 1642 } 1643 1644 /* 1645 * when debugging new filesystems or buffer I/O methods, this 1646 * is the most common error that pops up. if you see this, you 1647 * have not set the page busy flag correctly!!! 1648 */ 1649 if (m->busy == 0) { 1650 printf("biodone: page busy < 0, " 1651 "pindex: %d, foff: 0x(%x,%x), " 1652 "resid: %d, index: %d\n", 1653 (int) m->pindex, (int)(foff >> 32), 1654 (int) foff & 0xffffffff, resid, i); 1655 if (vp->v_type != VBLK) 1656 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1657 bp->b_vp->v_mount->mnt_stat.f_iosize, 1658 (int) bp->b_lblkno, 1659 bp->b_flags, bp->b_npages); 1660 else 1661 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1662 (int) bp->b_lblkno, 1663 bp->b_flags, bp->b_npages); 1664 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1665 m->valid, m->dirty, m->wire_count); 1666 panic("biodone: page busy < 0\n"); 1667 } 1668 --m->busy; 1669 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1670 m->flags &= ~PG_WANTED; 1671 wakeup(m); 1672 } 1673 --obj->paging_in_progress; 1674 foff += resid; 1675 iosize -= resid; 1676 } 1677 if (obj && obj->paging_in_progress == 0 && 1678 (obj->flags & OBJ_PIPWNT)) { 1679 obj->flags &= ~OBJ_PIPWNT; 1680 wakeup(obj); 1681 } 1682 } 1683 /* 1684 * For asynchronous completions, release the buffer now. The brelse 1685 * checks for B_WANTED and will do the wakeup there if necessary - so 1686 * no need to do a wakeup here in the async case. 1687 */ 1688 1689 if (bp->b_flags & B_ASYNC) { 1690 if ((bp->b_flags & B_ORDERED) == 0) { 1691 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1692 brelse(bp); 1693 else 1694 bqrelse(bp); 1695 } 1696 } else { 1697 bp->b_flags &= ~B_WANTED; 1698 wakeup(bp); 1699 } 1700 splx(s); 1701} 1702 1703int 1704count_lock_queue() 1705{ 1706 int count; 1707 struct buf *bp; 1708 1709 count = 0; 1710 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1711 bp != NULL; 1712 bp = TAILQ_NEXT(bp, b_freelist)) 1713 count++; 1714 return (count); 1715} 1716 1717int vfs_update_interval = 30; 1718 1719static void 1720vfs_update() 1721{ 1722 while (1) { 1723 tsleep(&vfs_update_wakeup, PUSER, "update", 1724 hz * vfs_update_interval); 1725 vfs_update_wakeup = 0; 1726 sync(curproc, NULL, NULL); 1727 } 1728} 1729 1730static int 1731sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1732{ 1733 int error = sysctl_handle_int(oidp, 1734 oidp->oid_arg1, oidp->oid_arg2, req); 1735 if (!error) 1736 wakeup(&vfs_update_wakeup); 1737 return error; 1738} 1739 1740SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1741 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1742 1743 1744/* 1745 * This routine is called in lieu of iodone in the case of 1746 * incomplete I/O. This keeps the busy status for pages 1747 * consistant. 1748 */ 1749void 1750vfs_unbusy_pages(struct buf * bp) 1751{ 1752 int i; 1753 1754 if (bp->b_flags & B_VMIO) { 1755 struct vnode *vp = bp->b_vp; 1756 vm_object_t obj = vp->v_object; 1757 vm_ooffset_t foff; 1758 1759 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1760 1761 for (i = 0; i < bp->b_npages; i++) { 1762 vm_page_t m = bp->b_pages[i]; 1763 1764 if (m == bogus_page) { 1765 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1766 if (!m) { 1767 panic("vfs_unbusy_pages: page missing\n"); 1768 } 1769 bp->b_pages[i] = m; 1770 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1771 } 1772 --obj->paging_in_progress; 1773 --m->busy; 1774 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1775 m->flags &= ~PG_WANTED; 1776 wakeup(m); 1777 } 1778 } 1779 if (obj->paging_in_progress == 0 && 1780 (obj->flags & OBJ_PIPWNT)) { 1781 obj->flags &= ~OBJ_PIPWNT; 1782 wakeup(obj); 1783 } 1784 } 1785} 1786 1787/* 1788 * Set NFS' b_validoff and b_validend fields from the valid bits 1789 * of a page. If the consumer is not NFS, and the page is not 1790 * valid for the entire range, clear the B_CACHE flag to force 1791 * the consumer to re-read the page. 1792 */ 1793static void 1794vfs_buf_set_valid(struct buf *bp, 1795 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 1796 vm_page_t m) 1797{ 1798 if (bp->b_vp->v_tag == VT_NFS) { 1799 vm_offset_t svalid, evalid; 1800 int validbits = m->valid; 1801 1802 /* 1803 * This only bothers with the first valid range in the 1804 * page. 1805 */ 1806 svalid = off; 1807 while (validbits && !(validbits & 1)) { 1808 svalid += DEV_BSIZE; 1809 validbits >>= 1; 1810 } 1811 evalid = svalid; 1812 while (validbits & 1) { 1813 evalid += DEV_BSIZE; 1814 validbits >>= 1; 1815 } 1816 /* 1817 * Make sure this range is contiguous with the range 1818 * built up from previous pages. If not, then we will 1819 * just use the range from the previous pages. 1820 */ 1821 if (svalid == bp->b_validend) { 1822 bp->b_validoff = min(bp->b_validoff, svalid); 1823 bp->b_validend = max(bp->b_validend, evalid); 1824 } 1825 } else if (!vm_page_is_valid(m, 1826 (vm_offset_t) ((foff + off) & PAGE_MASK), 1827 size)) { 1828 bp->b_flags &= ~B_CACHE; 1829 } 1830} 1831 1832/* 1833 * Set the valid bits in a page, taking care of the b_validoff, 1834 * b_validend fields which NFS uses to optimise small reads. Off is 1835 * the offset within the file and pageno is the page index within the buf. 1836 */ 1837static void 1838vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 1839{ 1840 struct vnode *vp = bp->b_vp; 1841 vm_ooffset_t soff, eoff; 1842 1843 soff = off; 1844 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 1845 vm_page_set_invalid(m, 1846 (vm_offset_t) (soff & PAGE_MASK), 1847 (vm_offset_t) (eoff - soff)); 1848 if (vp->v_tag == VT_NFS) { 1849 vm_ooffset_t sv, ev; 1850 off = off - pageno * PAGE_SIZE; 1851 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 1852 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 1853 soff = max(sv, soff); 1854 eoff = min(ev, eoff); 1855 } 1856 if (eoff > soff) 1857 vm_page_set_validclean(m, 1858 (vm_offset_t) (soff & PAGE_MASK), 1859 (vm_offset_t) (eoff - soff)); 1860} 1861 1862/* 1863 * This routine is called before a device strategy routine. 1864 * It is used to tell the VM system that paging I/O is in 1865 * progress, and treat the pages associated with the buffer 1866 * almost as being PG_BUSY. Also the object paging_in_progress 1867 * flag is handled to make sure that the object doesn't become 1868 * inconsistant. 1869 */ 1870void 1871vfs_busy_pages(struct buf * bp, int clear_modify) 1872{ 1873 int i; 1874 1875 if (bp->b_flags & B_VMIO) { 1876 struct vnode *vp = bp->b_vp; 1877 vm_object_t obj = vp->v_object; 1878 vm_ooffset_t foff; 1879 1880 if (vp->v_type == VBLK) 1881 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1882 else 1883 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1884 vfs_setdirty(bp); 1885 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 1886 vm_page_t m = bp->b_pages[i]; 1887 1888 if ((bp->b_flags & B_CLUSTER) == 0) { 1889 obj->paging_in_progress++; 1890 m->busy++; 1891 } 1892 vm_page_protect(m, VM_PROT_NONE); 1893 if (clear_modify) 1894 vfs_page_set_valid(bp, foff, i, m); 1895 else if (bp->b_bcount >= PAGE_SIZE) { 1896 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1897 bp->b_pages[i] = bogus_page; 1898 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1899 } 1900 } 1901 } 1902 } 1903} 1904 1905/* 1906 * Tell the VM system that the pages associated with this buffer 1907 * are clean. This is used for delayed writes where the data is 1908 * going to go to disk eventually without additional VM intevention. 1909 */ 1910void 1911vfs_clean_pages(struct buf * bp) 1912{ 1913 int i; 1914 1915 if (bp->b_flags & B_VMIO) { 1916 struct vnode *vp = bp->b_vp; 1917 vm_object_t obj = vp->v_object; 1918 vm_ooffset_t foff; 1919 1920 if (vp->v_type == VBLK) 1921 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1922 else 1923 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1924 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 1925 vm_page_t m = bp->b_pages[i]; 1926 1927 vfs_page_set_valid(bp, foff, i, m); 1928 } 1929 } 1930} 1931 1932void 1933vfs_bio_clrbuf(struct buf *bp) { 1934 int i; 1935 if( bp->b_flags & B_VMIO) { 1936 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1937 int mask; 1938 mask = 0; 1939 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1940 mask |= (1 << (i/DEV_BSIZE)); 1941 if( bp->b_pages[0]->valid != mask) { 1942 bzero(bp->b_data, bp->b_bufsize); 1943 } 1944 bp->b_pages[0]->valid = mask; 1945 bp->b_resid = 0; 1946 return; 1947 } 1948 for(i=0;i<bp->b_npages;i++) { 1949 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1950 continue; 1951 if( bp->b_pages[i]->valid == 0) { 1952 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1953 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1954 } 1955 } else { 1956 int j; 1957 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1958 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1959 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1960 } 1961 } 1962 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1963 } 1964 bp->b_resid = 0; 1965 } else { 1966 clrbuf(bp); 1967 } 1968} 1969 1970/* 1971 * vm_hold_load_pages and vm_hold_unload pages get pages into 1972 * a buffers address space. The pages are anonymous and are 1973 * not associated with a file object. 1974 */ 1975void 1976vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1977{ 1978 vm_offset_t pg; 1979 vm_page_t p; 1980 int index; 1981 1982 to = round_page(to); 1983 from = round_page(from); 1984 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1985 1986 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1987 1988tryagain: 1989 1990 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1991 VM_ALLOC_NORMAL); 1992 if (!p) { 1993 VM_WAIT; 1994 goto tryagain; 1995 } 1996 vm_page_wire(p); 1997 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1998 bp->b_pages[index] = p; 1999 PAGE_WAKEUP(p); 2000 } 2001 bp->b_npages = to >> PAGE_SHIFT; 2002} 2003 2004void 2005vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2006{ 2007 vm_offset_t pg; 2008 vm_page_t p; 2009 int index; 2010 2011 from = round_page(from); 2012 to = round_page(to); 2013 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2014 2015 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2016 p = bp->b_pages[index]; 2017 if (p && (index < bp->b_npages)) { 2018 if (p->busy) { 2019 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2020 bp->b_blkno, bp->b_lblkno); 2021 } 2022 bp->b_pages[index] = NULL; 2023 pmap_kremove(pg); 2024 vm_page_unwire(p); 2025 vm_page_free(p); 2026 } 2027 } 2028 bp->b_npages = from >> PAGE_SHIFT; 2029} 2030 2031 2032#include "opt_ddb.h" 2033#ifdef DDB 2034#include <ddb/ddb.h> 2035 2036DB_SHOW_COMMAND(buffer, db_show_buffer) 2037{ 2038 /* get args */ 2039 struct buf *bp = (struct buf *)addr; 2040 2041 if (!have_addr) { 2042 db_printf("usage: show buffer <addr>\n"); 2043 return; 2044 } 2045 2046 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2047 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2048 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2049 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2050 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2051 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2052 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2053 "b_resid = %ld\nb_dev = 0x%x, b_un.b_addr = %p, " 2054 "b_blkno = %d, b_pblkno = %d\n", 2055 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2056 bp->b_dev, bp->b_un.b_addr, bp->b_blkno, bp->b_pblkno); 2057} 2058#endif /* DDB */ 2059