vfs_bio.c revision 20054
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.106 1996/11/28 04:26:04 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <vm/lock.h> 55#include <vm/vm_map.h> 56#include <sys/buf.h> 57#include <sys/mount.h> 58#include <sys/malloc.h> 59#include <sys/resourcevar.h> 60#include <sys/proc.h> 61 62#include <miscfs/specfs/specdev.h> 63 64static void vfs_update __P((void)); 65static struct proc *updateproc; 66static struct kproc_desc up_kp = { 67 "update", 68 vfs_update, 69 &updateproc 70}; 71SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 72 73struct buf *buf; /* buffer header pool */ 74struct swqueue bswlist; 75 76int count_lock_queue __P((void)); 77static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 80 vm_offset_t to); 81static void vfs_clean_pages(struct buf * bp); 82static void vfs_setdirty(struct buf *bp); 83static void vfs_vmio_release(struct buf *bp); 84 85int needsbuffer; 86 87/* 88 * Internal update daemon, process 3 89 * The variable vfs_update_wakeup allows for internal syncs. 90 */ 91int vfs_update_wakeup; 92 93 94/* 95 * buffers base kva 96 */ 97 98/* 99 * bogus page -- for I/O to/from partially complete buffers 100 * this is a temporary solution to the problem, but it is not 101 * really that bad. it would be better to split the buffer 102 * for input in the case of buffers partially already in memory, 103 * but the code is intricate enough already. 104 */ 105vm_page_t bogus_page; 106static vm_offset_t bogus_offset; 107 108static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 109 bufmallocspace, maxbufmallocspace; 110 111static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 112static struct bqueues bufqueues[BUFFER_QUEUES]; 113 114extern int vm_swap_size; 115 116#define BUF_MAXUSE 16 117 118/* 119 * Initialize buffer headers and related structures. 120 */ 121void 122bufinit() 123{ 124 struct buf *bp; 125 int i; 126 127 TAILQ_INIT(&bswlist); 128 LIST_INIT(&invalhash); 129 130 /* first, make a null hash table */ 131 for (i = 0; i < BUFHSZ; i++) 132 LIST_INIT(&bufhashtbl[i]); 133 134 /* next, make a null set of free lists */ 135 for (i = 0; i < BUFFER_QUEUES; i++) 136 TAILQ_INIT(&bufqueues[i]); 137 138 /* finally, initialize each buffer header and stick on empty q */ 139 for (i = 0; i < nbuf; i++) { 140 bp = &buf[i]; 141 bzero(bp, sizeof *bp); 142 bp->b_flags = B_INVAL; /* we're just an empty header */ 143 bp->b_dev = NODEV; 144 bp->b_rcred = NOCRED; 145 bp->b_wcred = NOCRED; 146 bp->b_qindex = QUEUE_EMPTY; 147 bp->b_vnbufs.le_next = NOLIST; 148 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 149 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 150 } 151/* 152 * maxbufspace is currently calculated to support all filesystem blocks 153 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 154 * cache is still the same as it would be for 8K filesystems. This 155 * keeps the size of the buffer cache "in check" for big block filesystems. 156 */ 157 maxbufspace = (nbuf + 8) * PAGE_SIZE; 158/* 159 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 160 */ 161 maxvmiobufspace = 2 * maxbufspace / 3; 162/* 163 * Limit the amount of malloc memory since it is wired permanently into 164 * the kernel space. Even though this is accounted for in the buffer 165 * allocation, we don't want the malloced region to grow uncontrolled. 166 * The malloc scheme improves memory utilization significantly on average 167 * (small) directories. 168 */ 169 maxbufmallocspace = maxbufspace / 20; 170 171 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 172 bogus_page = vm_page_alloc(kernel_object, 173 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 174 VM_ALLOC_NORMAL); 175 176} 177 178/* 179 * Free the kva allocation for a buffer 180 * Must be called only at splbio or higher, 181 * as this is the only locking for buffer_map. 182 */ 183static void 184bfreekva(struct buf * bp) 185{ 186 if (bp->b_kvasize == 0) 187 return; 188 189 vm_map_delete(buffer_map, 190 (vm_offset_t) bp->b_kvabase, 191 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 192 193 bp->b_kvasize = 0; 194 195} 196 197/* 198 * remove the buffer from the appropriate free list 199 */ 200void 201bremfree(struct buf * bp) 202{ 203 int s = splbio(); 204 205 if (bp->b_qindex != QUEUE_NONE) { 206 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 207 bp->b_qindex = QUEUE_NONE; 208 } else { 209 panic("bremfree: removing a buffer when not on a queue"); 210 } 211 splx(s); 212} 213 214/* 215 * Get a buffer with the specified data. Look in the cache first. 216 */ 217int 218bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 219 struct buf ** bpp) 220{ 221 struct buf *bp; 222 223 bp = getblk(vp, blkno, size, 0, 0); 224 *bpp = bp; 225 226 /* if not found in cache, do some I/O */ 227 if ((bp->b_flags & B_CACHE) == 0) { 228 if (curproc != NULL) 229 curproc->p_stats->p_ru.ru_inblock++; 230 bp->b_flags |= B_READ; 231 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 232 if (bp->b_rcred == NOCRED) { 233 if (cred != NOCRED) 234 crhold(cred); 235 bp->b_rcred = cred; 236 } 237 vfs_busy_pages(bp, 0); 238 VOP_STRATEGY(bp); 239 return (biowait(bp)); 240 } 241 return (0); 242} 243 244/* 245 * Operates like bread, but also starts asynchronous I/O on 246 * read-ahead blocks. 247 */ 248int 249breadn(struct vnode * vp, daddr_t blkno, int size, 250 daddr_t * rablkno, int *rabsize, 251 int cnt, struct ucred * cred, struct buf ** bpp) 252{ 253 struct buf *bp, *rabp; 254 int i; 255 int rv = 0, readwait = 0; 256 257 *bpp = bp = getblk(vp, blkno, size, 0, 0); 258 259 /* if not found in cache, do some I/O */ 260 if ((bp->b_flags & B_CACHE) == 0) { 261 if (curproc != NULL) 262 curproc->p_stats->p_ru.ru_inblock++; 263 bp->b_flags |= B_READ; 264 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 265 if (bp->b_rcred == NOCRED) { 266 if (cred != NOCRED) 267 crhold(cred); 268 bp->b_rcred = cred; 269 } 270 vfs_busy_pages(bp, 0); 271 VOP_STRATEGY(bp); 272 ++readwait; 273 } 274 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 275 if (inmem(vp, *rablkno)) 276 continue; 277 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 278 279 if ((rabp->b_flags & B_CACHE) == 0) { 280 if (curproc != NULL) 281 curproc->p_stats->p_ru.ru_inblock++; 282 rabp->b_flags |= B_READ | B_ASYNC; 283 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 284 if (rabp->b_rcred == NOCRED) { 285 if (cred != NOCRED) 286 crhold(cred); 287 rabp->b_rcred = cred; 288 } 289 vfs_busy_pages(rabp, 0); 290 VOP_STRATEGY(rabp); 291 } else { 292 brelse(rabp); 293 } 294 } 295 296 if (readwait) { 297 rv = biowait(bp); 298 } 299 return (rv); 300} 301 302/* 303 * Write, release buffer on completion. (Done by iodone 304 * if async.) 305 */ 306int 307bwrite(struct buf * bp) 308{ 309 int oldflags = bp->b_flags; 310 311 if (bp->b_flags & B_INVAL) { 312 brelse(bp); 313 return (0); 314 } 315 if (!(bp->b_flags & B_BUSY)) 316 panic("bwrite: buffer is not busy???"); 317 318 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 319 bp->b_flags |= B_WRITEINPROG; 320 321 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 322 reassignbuf(bp, bp->b_vp); 323 } 324 325 bp->b_vp->v_numoutput++; 326 vfs_busy_pages(bp, 1); 327 if (curproc != NULL) 328 curproc->p_stats->p_ru.ru_oublock++; 329 VOP_STRATEGY(bp); 330 331 /* 332 * Handle ordered writes here. 333 * If the write was originally flagged as ordered, 334 * then we check to see if it was converted to async. 335 * If it was converted to async, and is done now, then 336 * we release the buffer. Otherwise we clear the 337 * ordered flag because it is not needed anymore. 338 * 339 * Note that biodone has been modified so that it does 340 * not release ordered buffers. This allows us to have 341 * a chance to determine whether or not the driver 342 * has set the async flag in the strategy routine. Otherwise 343 * if biodone was not modified, then the buffer may have been 344 * reused before we have had a chance to check the flag. 345 */ 346 347 if ((oldflags & B_ORDERED) == B_ORDERED) { 348 int s; 349 s = splbio(); 350 if (bp->b_flags & B_ASYNC) { 351 if ((bp->b_flags & B_DONE)) { 352 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 353 brelse(bp); 354 else 355 bqrelse(bp); 356 } 357 splx(s); 358 return (0); 359 } else { 360 bp->b_flags &= ~B_ORDERED; 361 } 362 splx(s); 363 } 364 365 if ((oldflags & B_ASYNC) == 0) { 366 int rtval = biowait(bp); 367 368 if (oldflags & B_DELWRI) { 369 reassignbuf(bp, bp->b_vp); 370 } 371 brelse(bp); 372 return (rtval); 373 } 374 return (0); 375} 376 377int 378vn_bwrite(ap) 379 struct vop_bwrite_args *ap; 380{ 381 return (bwrite(ap->a_bp)); 382} 383 384/* 385 * Delayed write. (Buffer is marked dirty). 386 */ 387void 388bdwrite(struct buf * bp) 389{ 390 391 if ((bp->b_flags & B_BUSY) == 0) { 392 panic("bdwrite: buffer is not busy"); 393 } 394 if (bp->b_flags & B_INVAL) { 395 brelse(bp); 396 return; 397 } 398 if (bp->b_flags & B_TAPE) { 399 bawrite(bp); 400 return; 401 } 402 bp->b_flags &= ~(B_READ|B_RELBUF); 403 if ((bp->b_flags & B_DELWRI) == 0) { 404 bp->b_flags |= B_DONE | B_DELWRI; 405 reassignbuf(bp, bp->b_vp); 406 } 407 408 /* 409 * This bmap keeps the system from needing to do the bmap later, 410 * perhaps when the system is attempting to do a sync. Since it 411 * is likely that the indirect block -- or whatever other datastructure 412 * that the filesystem needs is still in memory now, it is a good 413 * thing to do this. Note also, that if the pageout daemon is 414 * requesting a sync -- there might not be enough memory to do 415 * the bmap then... So, this is important to do. 416 */ 417 if( bp->b_lblkno == bp->b_blkno) { 418 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 419 } 420 421 /* 422 * Set the *dirty* buffer range based upon the VM system dirty pages. 423 */ 424 vfs_setdirty(bp); 425 426 /* 427 * We need to do this here to satisfy the vnode_pager and the 428 * pageout daemon, so that it thinks that the pages have been 429 * "cleaned". Note that since the pages are in a delayed write 430 * buffer -- the VFS layer "will" see that the pages get written 431 * out on the next sync, or perhaps the cluster will be completed. 432 */ 433 vfs_clean_pages(bp); 434 bqrelse(bp); 435 return; 436} 437 438/* 439 * Asynchronous write. 440 * Start output on a buffer, but do not wait for it to complete. 441 * The buffer is released when the output completes. 442 */ 443void 444bawrite(struct buf * bp) 445{ 446 bp->b_flags |= B_ASYNC; 447 (void) VOP_BWRITE(bp); 448} 449 450/* 451 * Ordered write. 452 * Start output on a buffer, but only wait for it to complete if the 453 * output device cannot guarantee ordering in some other way. Devices 454 * that can perform asynchronous ordered writes will set the B_ASYNC 455 * flag in their strategy routine. 456 * The buffer is released when the output completes. 457 */ 458int 459bowrite(struct buf * bp) 460{ 461 bp->b_flags |= B_ORDERED; 462 return (VOP_BWRITE(bp)); 463} 464 465/* 466 * Release a buffer. 467 */ 468void 469brelse(struct buf * bp) 470{ 471 int s; 472 473 if (bp->b_flags & B_CLUSTER) { 474 relpbuf(bp); 475 return; 476 } 477 /* anyone need a "free" block? */ 478 s = splbio(); 479 480 /* anyone need this block? */ 481 if (bp->b_flags & B_WANTED) { 482 bp->b_flags &= ~(B_WANTED | B_AGE); 483 wakeup(bp); 484 } 485 486 if (bp->b_flags & B_LOCKED) 487 bp->b_flags &= ~B_ERROR; 488 489 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 490 (bp->b_bufsize <= 0)) { 491 bp->b_flags |= B_INVAL; 492 bp->b_flags &= ~(B_DELWRI | B_CACHE); 493 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 494 if (bp->b_bufsize) 495 allocbuf(bp, 0); 496 brelvp(bp); 497 } 498 } 499 500 /* 501 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 502 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 503 * but the VM object is kept around. The B_NOCACHE flag is used to 504 * invalidate the pages in the VM object. 505 */ 506 if (bp->b_flags & B_VMIO) { 507 vm_ooffset_t foff; 508 vm_object_t obj; 509 int i, resid; 510 vm_page_t m; 511 struct vnode *vp; 512 int iototal = bp->b_bufsize; 513 514 vp = bp->b_vp; 515 if (!vp) 516 panic("brelse: missing vp"); 517 518 if (bp->b_npages) { 519 vm_pindex_t poff; 520 obj = (vm_object_t) vp->v_object; 521 if (vp->v_type == VBLK) 522 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 523 else 524 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 525 poff = OFF_TO_IDX(foff); 526 for (i = 0; i < bp->b_npages; i++) { 527 m = bp->b_pages[i]; 528 if (m == bogus_page) { 529 m = vm_page_lookup(obj, poff + i); 530 if (!m) { 531 panic("brelse: page missing\n"); 532 } 533 bp->b_pages[i] = m; 534 pmap_qenter(trunc_page(bp->b_data), 535 bp->b_pages, bp->b_npages); 536 } 537 resid = IDX_TO_OFF(m->pindex+1) - foff; 538 if (resid > iototal) 539 resid = iototal; 540 if (resid > 0) { 541 /* 542 * Don't invalidate the page if the local machine has already 543 * modified it. This is the lesser of two evils, and should 544 * be fixed. 545 */ 546 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 547 vm_page_test_dirty(m); 548 if (m->dirty == 0) { 549 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 550 if (m->valid == 0) 551 vm_page_protect(m, VM_PROT_NONE); 552 } 553 } 554 if (resid >= PAGE_SIZE) { 555 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 556 bp->b_flags |= B_INVAL; 557 } 558 } else { 559 if (!vm_page_is_valid(m, 560 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 561 bp->b_flags |= B_INVAL; 562 } 563 } 564 } 565 foff += resid; 566 iototal -= resid; 567 } 568 } 569 if (bp->b_flags & (B_INVAL | B_RELBUF)) 570 vfs_vmio_release(bp); 571 } 572 if (bp->b_qindex != QUEUE_NONE) 573 panic("brelse: free buffer onto another queue???"); 574 575 /* enqueue */ 576 /* buffers with no memory */ 577 if (bp->b_bufsize == 0) { 578 bp->b_qindex = QUEUE_EMPTY; 579 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 580 LIST_REMOVE(bp, b_hash); 581 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 582 bp->b_dev = NODEV; 583 /* 584 * Get rid of the kva allocation *now* 585 */ 586 bfreekva(bp); 587 if (needsbuffer) { 588 wakeup(&needsbuffer); 589 needsbuffer=0; 590 } 591 /* buffers with junk contents */ 592 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 593 bp->b_qindex = QUEUE_AGE; 594 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 595 LIST_REMOVE(bp, b_hash); 596 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 597 bp->b_dev = NODEV; 598 if (needsbuffer) { 599 wakeup(&needsbuffer); 600 needsbuffer=0; 601 } 602 /* buffers that are locked */ 603 } else if (bp->b_flags & B_LOCKED) { 604 bp->b_qindex = QUEUE_LOCKED; 605 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 606 /* buffers with stale but valid contents */ 607 } else if (bp->b_flags & B_AGE) { 608 bp->b_qindex = QUEUE_AGE; 609 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 610 if (needsbuffer) { 611 wakeup(&needsbuffer); 612 needsbuffer=0; 613 } 614 /* buffers with valid and quite potentially reuseable contents */ 615 } else { 616 bp->b_qindex = QUEUE_LRU; 617 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 618 if (needsbuffer) { 619 wakeup(&needsbuffer); 620 needsbuffer=0; 621 } 622 } 623 624 /* unlock */ 625 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 626 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 627 splx(s); 628} 629 630/* 631 * Release a buffer. 632 */ 633void 634bqrelse(struct buf * bp) 635{ 636 int s; 637 638 s = splbio(); 639 640 641 /* anyone need this block? */ 642 if (bp->b_flags & B_WANTED) { 643 bp->b_flags &= ~(B_WANTED | B_AGE); 644 wakeup(bp); 645 } 646 647 if (bp->b_qindex != QUEUE_NONE) 648 panic("bqrelse: free buffer onto another queue???"); 649 650 if (bp->b_flags & B_LOCKED) { 651 bp->b_flags &= ~B_ERROR; 652 bp->b_qindex = QUEUE_LOCKED; 653 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 654 /* buffers with stale but valid contents */ 655 } else { 656 bp->b_qindex = QUEUE_LRU; 657 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 658 if (needsbuffer) { 659 wakeup(&needsbuffer); 660 needsbuffer=0; 661 } 662 } 663 664 /* unlock */ 665 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 666 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 667 splx(s); 668} 669 670static void 671vfs_vmio_release(bp) 672 struct buf *bp; 673{ 674 int i; 675 vm_page_t m; 676 677 for (i = 0; i < bp->b_npages; i++) { 678 m = bp->b_pages[i]; 679 bp->b_pages[i] = NULL; 680 vm_page_unwire(m); 681 /* 682 * We don't mess with busy pages, it is 683 * the responsibility of the process that 684 * busied the pages to deal with them. 685 */ 686 if ((m->flags & PG_BUSY) || (m->busy != 0)) 687 continue; 688 689 if (m->wire_count == 0) { 690 691 if (m->flags & PG_WANTED) { 692 m->flags &= ~PG_WANTED; 693 wakeup(m); 694 } 695 696 /* 697 * If this is an async free -- we cannot place 698 * pages onto the cache queue, so our policy for 699 * such buffers is to avoid the cache queue, and 700 * only modify the active queue or free queue. 701 */ 702 if ((bp->b_flags & B_ASYNC) == 0) { 703 704 /* 705 * In the case of sync buffer frees, we can do pretty much 706 * anything to any of the memory queues. Specifically, 707 * the cache queue is free to be modified. 708 */ 709 if (m->valid) { 710 if(m->dirty == 0) 711 vm_page_test_dirty(m); 712 /* 713 * this keeps pressure off of the process memory 714 */ 715 if ((vm_swap_size == 0) || 716 (cnt.v_free_count < cnt.v_free_min)) { 717 if ((m->dirty == 0) && 718 (m->hold_count == 0)) 719 vm_page_cache(m); 720 else 721 vm_page_deactivate(m); 722 } 723 } else if (m->hold_count == 0) { 724 vm_page_protect(m, VM_PROT_NONE); 725 vm_page_free(m); 726 } 727 } else { 728 /* 729 * If async, then at least we clear the 730 * act_count. 731 */ 732 m->act_count = 0; 733 } 734 } 735 } 736 bufspace -= bp->b_bufsize; 737 vmiospace -= bp->b_bufsize; 738 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 739 bp->b_npages = 0; 740 bp->b_bufsize = 0; 741 bp->b_flags &= ~B_VMIO; 742 if (bp->b_vp) 743 brelvp(bp); 744} 745 746/* 747 * Check to see if a block is currently memory resident. 748 */ 749struct buf * 750gbincore(struct vnode * vp, daddr_t blkno) 751{ 752 struct buf *bp; 753 struct bufhashhdr *bh; 754 755 bh = BUFHASH(vp, blkno); 756 bp = bh->lh_first; 757 758 /* Search hash chain */ 759 while (bp != NULL) { 760 /* hit */ 761 if (bp->b_vp == vp && bp->b_lblkno == blkno && 762 (bp->b_flags & B_INVAL) == 0) { 763 break; 764 } 765 bp = bp->b_hash.le_next; 766 } 767 return (bp); 768} 769 770/* 771 * this routine implements clustered async writes for 772 * clearing out B_DELWRI buffers... This is much better 773 * than the old way of writing only one buffer at a time. 774 */ 775int 776vfs_bio_awrite(struct buf * bp) 777{ 778 int i; 779 daddr_t lblkno = bp->b_lblkno; 780 struct vnode *vp = bp->b_vp; 781 int s; 782 int ncl; 783 struct buf *bpa; 784 int nwritten; 785 786 s = splbio(); 787 /* 788 * right now we support clustered writing only to regular files 789 */ 790 if ((vp->v_type == VREG) && 791 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 792 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 793 int size; 794 int maxcl; 795 796 size = vp->v_mount->mnt_stat.f_iosize; 797 maxcl = MAXPHYS / size; 798 799 for (i = 1; i < maxcl; i++) { 800 if ((bpa = gbincore(vp, lblkno + i)) && 801 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 802 (B_DELWRI | B_CLUSTEROK)) && 803 (bpa->b_bufsize == size)) { 804 if ((bpa->b_blkno == bpa->b_lblkno) || 805 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 806 break; 807 } else { 808 break; 809 } 810 } 811 ncl = i; 812 /* 813 * this is a possible cluster write 814 */ 815 if (ncl != 1) { 816 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 817 splx(s); 818 return nwritten; 819 } 820 } 821 bremfree(bp); 822 splx(s); 823 /* 824 * default (old) behavior, writing out only one block 825 */ 826 bp->b_flags |= B_BUSY | B_ASYNC; 827 nwritten = bp->b_bufsize; 828 (void) VOP_BWRITE(bp); 829 return nwritten; 830} 831 832 833/* 834 * Find a buffer header which is available for use. 835 */ 836static struct buf * 837getnewbuf(int slpflag, int slptimeo, int size, int maxsize) 838{ 839 struct buf *bp; 840 int nbyteswritten = 0; 841 vm_offset_t addr; 842 843start: 844 if (bufspace >= maxbufspace) 845 goto trytofreespace; 846 847 /* can we constitute a new buffer? */ 848 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 849 if (bp->b_qindex != QUEUE_EMPTY) 850 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 851 bp->b_qindex); 852 bp->b_flags |= B_BUSY; 853 bremfree(bp); 854 goto fillbuf; 855 } 856trytofreespace: 857 /* 858 * We keep the file I/O from hogging metadata I/O 859 * This is desirable because file data is cached in the 860 * VM/Buffer cache even if a buffer is freed. 861 */ 862 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 863 if (bp->b_qindex != QUEUE_AGE) 864 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 865 bp->b_qindex); 866 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 867 if (bp->b_qindex != QUEUE_LRU) 868 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 869 bp->b_qindex); 870 } 871 if (!bp) { 872 /* wait for a free buffer of any kind */ 873 needsbuffer = 1; 874 tsleep(&needsbuffer, 875 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 876 return (0); 877 } 878 879#if defined(DIAGNOSTIC) 880 if (bp->b_flags & B_BUSY) { 881 panic("getnewbuf: busy buffer on free list\n"); 882 } 883#endif 884 885 /* 886 * We are fairly aggressive about freeing VMIO buffers, but since 887 * the buffering is intact without buffer headers, there is not 888 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 889 */ 890 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 891 if ((bp->b_flags & B_VMIO) == 0 || 892 (vmiospace < maxvmiobufspace)) { 893 --bp->b_usecount; 894 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 895 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 896 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 897 goto start; 898 } 899 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 900 } 901 } 902 903 /* if we are a delayed write, convert to an async write */ 904 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 905 nbyteswritten += vfs_bio_awrite(bp); 906 if (!slpflag && !slptimeo) { 907 return (0); 908 } 909 goto start; 910 } 911 912 if (bp->b_flags & B_WANTED) { 913 bp->b_flags &= ~B_WANTED; 914 wakeup(bp); 915 } 916 bremfree(bp); 917 bp->b_flags |= B_BUSY; 918 919 if (bp->b_flags & B_VMIO) { 920 bp->b_flags &= ~B_ASYNC; 921 vfs_vmio_release(bp); 922 } 923 924 if (bp->b_vp) 925 brelvp(bp); 926 927fillbuf: 928 /* we are not free, nor do we contain interesting data */ 929 if (bp->b_rcred != NOCRED) { 930 crfree(bp->b_rcred); 931 bp->b_rcred = NOCRED; 932 } 933 if (bp->b_wcred != NOCRED) { 934 crfree(bp->b_wcred); 935 bp->b_wcred = NOCRED; 936 } 937 938 LIST_REMOVE(bp, b_hash); 939 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 940 if (bp->b_bufsize) { 941 allocbuf(bp, 0); 942 } 943 bp->b_flags = B_BUSY; 944 bp->b_dev = NODEV; 945 bp->b_vp = NULL; 946 bp->b_blkno = bp->b_lblkno = 0; 947 bp->b_iodone = 0; 948 bp->b_error = 0; 949 bp->b_resid = 0; 950 bp->b_bcount = 0; 951 bp->b_npages = 0; 952 bp->b_dirtyoff = bp->b_dirtyend = 0; 953 bp->b_validoff = bp->b_validend = 0; 954 bp->b_usecount = 4; 955 956 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 957 bfreekva(bp); 958 959 /* 960 * See if we have buffer kva space 961 */ 962 if (vm_map_findspace(buffer_map, 0, maxsize, &addr)) { 963 bp->b_flags |= B_INVAL; 964 brelse(bp); 965 goto trytofreespace; 966 } 967 968 /* 969 * See if we are below are allocated minimum 970 */ 971 if (bufspace >= (maxbufspace + nbyteswritten)) { 972 bp->b_flags |= B_INVAL; 973 brelse(bp); 974 goto trytofreespace; 975 } 976 977 /* 978 * create a map entry for the buffer -- in essence 979 * reserving the kva space. 980 */ 981 vm_map_insert(buffer_map, NULL, 0, 982 addr, addr + maxsize, 983 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 984 985 bp->b_data = (caddr_t) addr; 986 bp->b_kvabase = (caddr_t) addr; 987 bp->b_kvasize = maxsize; 988 989 return (bp); 990} 991 992/* 993 * Check to see if a block is currently memory resident. 994 */ 995struct buf * 996incore(struct vnode * vp, daddr_t blkno) 997{ 998 struct buf *bp; 999 1000 int s = splbio(); 1001 bp = gbincore(vp, blkno); 1002 splx(s); 1003 return (bp); 1004} 1005 1006/* 1007 * Returns true if no I/O is needed to access the 1008 * associated VM object. This is like incore except 1009 * it also hunts around in the VM system for the data. 1010 */ 1011 1012int 1013inmem(struct vnode * vp, daddr_t blkno) 1014{ 1015 vm_object_t obj; 1016 vm_offset_t toff, tinc; 1017 vm_page_t m; 1018 vm_ooffset_t off; 1019 1020 if (incore(vp, blkno)) 1021 return 1; 1022 if (vp->v_mount == NULL) 1023 return 0; 1024 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 1025 return 0; 1026 1027 obj = vp->v_object; 1028 tinc = PAGE_SIZE; 1029 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1030 tinc = vp->v_mount->mnt_stat.f_iosize; 1031 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1032 1033 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1034 1035 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1036 if (!m) 1037 return 0; 1038 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1039 return 0; 1040 } 1041 return 1; 1042} 1043 1044/* 1045 * now we set the dirty range for the buffer -- 1046 * for NFS -- if the file is mapped and pages have 1047 * been written to, let it know. We want the 1048 * entire range of the buffer to be marked dirty if 1049 * any of the pages have been written to for consistancy 1050 * with the b_validoff, b_validend set in the nfs write 1051 * code, and used by the nfs read code. 1052 */ 1053static void 1054vfs_setdirty(struct buf *bp) { 1055 int i; 1056 vm_object_t object; 1057 vm_offset_t boffset, offset; 1058 /* 1059 * We qualify the scan for modified pages on whether the 1060 * object has been flushed yet. The OBJ_WRITEABLE flag 1061 * is not cleared simply by protecting pages off. 1062 */ 1063 if ((bp->b_flags & B_VMIO) && 1064 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1065 /* 1066 * test the pages to see if they have been modified directly 1067 * by users through the VM system. 1068 */ 1069 for (i = 0; i < bp->b_npages; i++) 1070 vm_page_test_dirty(bp->b_pages[i]); 1071 1072 /* 1073 * scan forwards for the first page modified 1074 */ 1075 for (i = 0; i < bp->b_npages; i++) { 1076 if (bp->b_pages[i]->dirty) { 1077 break; 1078 } 1079 } 1080 boffset = (i << PAGE_SHIFT); 1081 if (boffset < bp->b_dirtyoff) { 1082 bp->b_dirtyoff = boffset; 1083 } 1084 1085 /* 1086 * scan backwards for the last page modified 1087 */ 1088 for (i = bp->b_npages - 1; i >= 0; --i) { 1089 if (bp->b_pages[i]->dirty) { 1090 break; 1091 } 1092 } 1093 boffset = (i + 1); 1094 offset = boffset + bp->b_pages[0]->pindex; 1095 if (offset >= object->size) 1096 boffset = object->size - bp->b_pages[0]->pindex; 1097 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1098 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1099 } 1100} 1101 1102/* 1103 * Get a block given a specified block and offset into a file/device. 1104 */ 1105struct buf * 1106getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1107{ 1108 struct buf *bp; 1109 int s; 1110 struct bufhashhdr *bh; 1111 int maxsize; 1112 1113 if (vp->v_mount) { 1114 maxsize = vp->v_mount->mnt_stat.f_iosize; 1115 /* 1116 * This happens on mount points. 1117 */ 1118 if (maxsize < size) 1119 maxsize = size; 1120 } else { 1121 maxsize = size; 1122 } 1123 1124 if (size > MAXBSIZE) 1125 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1126 1127 s = splbio(); 1128loop: 1129 if ((bp = gbincore(vp, blkno))) { 1130 if (bp->b_flags & B_BUSY) { 1131 bp->b_flags |= B_WANTED; 1132 if (bp->b_usecount < BUF_MAXUSE) 1133 ++bp->b_usecount; 1134 if (!tsleep(bp, 1135 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1136 goto loop; 1137 1138 splx(s); 1139 return (struct buf *) NULL; 1140 } 1141 bp->b_flags |= B_BUSY | B_CACHE; 1142 bremfree(bp); 1143 1144 /* 1145 * check for size inconsistancies (note that they shouldn't happen 1146 * but do when filesystems don't handle the size changes correctly.) 1147 * We are conservative on metadata and don't just extend the buffer 1148 * but write and re-constitute it. 1149 */ 1150 1151 if (bp->b_bcount != size) { 1152 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1153 allocbuf(bp, size); 1154 } else { 1155 bp->b_flags |= B_NOCACHE; 1156 VOP_BWRITE(bp); 1157 goto loop; 1158 } 1159 } 1160 1161 if (bp->b_usecount < BUF_MAXUSE) 1162 ++bp->b_usecount; 1163 splx(s); 1164 return (bp); 1165 } else { 1166 vm_object_t obj; 1167 1168 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == 0) { 1169 if (slpflag || slptimeo) { 1170 splx(s); 1171 return NULL; 1172 } 1173 goto loop; 1174 } 1175 1176 /* 1177 * This code is used to make sure that a buffer is not 1178 * created while the getnewbuf routine is blocked. 1179 * Normally the vnode is locked so this isn't a problem. 1180 * VBLK type I/O requests, however, don't lock the vnode. 1181 */ 1182 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1183 bp->b_flags |= B_INVAL; 1184 brelse(bp); 1185 goto loop; 1186 } 1187 1188 /* 1189 * Insert the buffer into the hash, so that it can 1190 * be found by incore. 1191 */ 1192 bp->b_blkno = bp->b_lblkno = blkno; 1193 bgetvp(vp, bp); 1194 LIST_REMOVE(bp, b_hash); 1195 bh = BUFHASH(vp, blkno); 1196 LIST_INSERT_HEAD(bh, bp, b_hash); 1197 1198 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1199 bp->b_flags |= (B_VMIO | B_CACHE); 1200#if defined(VFS_BIO_DEBUG) 1201 if (vp->v_type != VREG && vp->v_type != VBLK) 1202 printf("getblk: vmioing file type %d???\n", vp->v_type); 1203#endif 1204 } else { 1205 bp->b_flags &= ~B_VMIO; 1206 } 1207 splx(s); 1208 1209 allocbuf(bp, size); 1210#ifdef PC98 1211 /* 1212 * 1024byte/sector support 1213 */ 1214#define B_XXX2 0x8000000 1215 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1216#endif 1217 return (bp); 1218 } 1219} 1220 1221/* 1222 * Get an empty, disassociated buffer of given size. 1223 */ 1224struct buf * 1225geteblk(int size) 1226{ 1227 struct buf *bp; 1228 int s; 1229 1230 s = splbio(); 1231 while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0); 1232 splx(s); 1233 allocbuf(bp, size); 1234 bp->b_flags |= B_INVAL; 1235 return (bp); 1236} 1237 1238 1239/* 1240 * This code constitutes the buffer memory from either anonymous system 1241 * memory (in the case of non-VMIO operations) or from an associated 1242 * VM object (in the case of VMIO operations). 1243 * 1244 * Note that this code is tricky, and has many complications to resolve 1245 * deadlock or inconsistant data situations. Tread lightly!!! 1246 * 1247 * Modify the length of a buffer's underlying buffer storage without 1248 * destroying information (unless, of course the buffer is shrinking). 1249 */ 1250int 1251allocbuf(struct buf * bp, int size) 1252{ 1253 1254 int s; 1255 int newbsize, mbsize; 1256 int i; 1257 1258 if (!(bp->b_flags & B_BUSY)) 1259 panic("allocbuf: buffer not busy"); 1260 1261 if (bp->b_kvasize < size) 1262 panic("allocbuf: buffer too small"); 1263 1264 if ((bp->b_flags & B_VMIO) == 0) { 1265 caddr_t origbuf; 1266 int origbufsize; 1267 /* 1268 * Just get anonymous memory from the kernel 1269 */ 1270 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1271#if !defined(NO_B_MALLOC) 1272 if (bp->b_flags & B_MALLOC) 1273 newbsize = mbsize; 1274 else 1275#endif 1276 newbsize = round_page(size); 1277 1278 if (newbsize < bp->b_bufsize) { 1279#if !defined(NO_B_MALLOC) 1280 /* 1281 * malloced buffers are not shrunk 1282 */ 1283 if (bp->b_flags & B_MALLOC) { 1284 if (newbsize) { 1285 bp->b_bcount = size; 1286 } else { 1287 free(bp->b_data, M_BIOBUF); 1288 bufspace -= bp->b_bufsize; 1289 bufmallocspace -= bp->b_bufsize; 1290 bp->b_data = bp->b_kvabase; 1291 bp->b_bufsize = 0; 1292 bp->b_bcount = 0; 1293 bp->b_flags &= ~B_MALLOC; 1294 } 1295 return 1; 1296 } 1297#endif 1298 vm_hold_free_pages( 1299 bp, 1300 (vm_offset_t) bp->b_data + newbsize, 1301 (vm_offset_t) bp->b_data + bp->b_bufsize); 1302 } else if (newbsize > bp->b_bufsize) { 1303#if !defined(NO_B_MALLOC) 1304 /* 1305 * We only use malloced memory on the first allocation. 1306 * and revert to page-allocated memory when the buffer grows. 1307 */ 1308 if ( (bufmallocspace < maxbufmallocspace) && 1309 (bp->b_bufsize == 0) && 1310 (mbsize <= PAGE_SIZE/2)) { 1311 1312 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1313 bp->b_bufsize = mbsize; 1314 bp->b_bcount = size; 1315 bp->b_flags |= B_MALLOC; 1316 bufspace += mbsize; 1317 bufmallocspace += mbsize; 1318 return 1; 1319 } 1320#endif 1321 origbuf = NULL; 1322 origbufsize = 0; 1323#if !defined(NO_B_MALLOC) 1324 /* 1325 * If the buffer is growing on it's other-than-first allocation, 1326 * then we revert to the page-allocation scheme. 1327 */ 1328 if (bp->b_flags & B_MALLOC) { 1329 origbuf = bp->b_data; 1330 origbufsize = bp->b_bufsize; 1331 bp->b_data = bp->b_kvabase; 1332 bufspace -= bp->b_bufsize; 1333 bufmallocspace -= bp->b_bufsize; 1334 bp->b_bufsize = 0; 1335 bp->b_flags &= ~B_MALLOC; 1336 newbsize = round_page(newbsize); 1337 } 1338#endif 1339 vm_hold_load_pages( 1340 bp, 1341 (vm_offset_t) bp->b_data + bp->b_bufsize, 1342 (vm_offset_t) bp->b_data + newbsize); 1343#if !defined(NO_B_MALLOC) 1344 if (origbuf) { 1345 bcopy(origbuf, bp->b_data, origbufsize); 1346 free(origbuf, M_BIOBUF); 1347 } 1348#endif 1349 } 1350 } else { 1351 vm_page_t m; 1352 int desiredpages; 1353 1354 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1355 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1356 1357#if !defined(NO_B_MALLOC) 1358 if (bp->b_flags & B_MALLOC) 1359 panic("allocbuf: VMIO buffer can't be malloced"); 1360#endif 1361 1362 if (newbsize < bp->b_bufsize) { 1363 if (desiredpages < bp->b_npages) { 1364 for (i = desiredpages; i < bp->b_npages; i++) { 1365 /* 1366 * the page is not freed here -- it 1367 * is the responsibility of vnode_pager_setsize 1368 */ 1369 m = bp->b_pages[i]; 1370#if defined(DIAGNOSTIC) 1371 if (m == bogus_page) 1372 panic("allocbuf: bogus page found"); 1373#endif 1374 s = splvm(); 1375 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1376 m->flags |= PG_WANTED; 1377 tsleep(m, PVM, "biodep", 0); 1378 } 1379 splx(s); 1380 1381 bp->b_pages[i] = NULL; 1382 vm_page_unwire(m); 1383 } 1384 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1385 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1386 bp->b_npages = desiredpages; 1387 } 1388 } else if (newbsize > bp->b_bufsize) { 1389 vm_object_t obj; 1390 vm_offset_t tinc, toff; 1391 vm_ooffset_t off; 1392 vm_pindex_t objoff; 1393 int pageindex, curbpnpages; 1394 struct vnode *vp; 1395 int bsize; 1396 1397 vp = bp->b_vp; 1398 1399 if (vp->v_type == VBLK) 1400 bsize = DEV_BSIZE; 1401 else 1402 bsize = vp->v_mount->mnt_stat.f_iosize; 1403 1404 if (bp->b_npages < desiredpages) { 1405 obj = vp->v_object; 1406 tinc = PAGE_SIZE; 1407 if (tinc > bsize) 1408 tinc = bsize; 1409 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1410 curbpnpages = bp->b_npages; 1411 doretry: 1412 bp->b_flags |= B_CACHE; 1413 for (toff = 0; toff < newbsize; toff += tinc) { 1414 int bytesinpage; 1415 1416 pageindex = toff >> PAGE_SHIFT; 1417 objoff = OFF_TO_IDX(off + toff); 1418 if (pageindex < curbpnpages) { 1419 1420 m = bp->b_pages[pageindex]; 1421#ifdef VFS_BIO_DIAG 1422 if (m->pindex != objoff) 1423 panic("allocbuf: page changed offset??!!!?"); 1424#endif 1425 bytesinpage = tinc; 1426 if (tinc > (newbsize - toff)) 1427 bytesinpage = newbsize - toff; 1428 if ((bp->b_flags & B_CACHE) && 1429 !vm_page_is_valid(m, 1430 (vm_offset_t) ((toff + off) & PAGE_MASK), 1431 bytesinpage)) { 1432 bp->b_flags &= ~B_CACHE; 1433 } 1434 continue; 1435 } 1436 m = vm_page_lookup(obj, objoff); 1437 if (!m) { 1438 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1439 if (!m) { 1440 VM_WAIT; 1441 goto doretry; 1442 } 1443 /* 1444 * Normally it is unwise to clear PG_BUSY without 1445 * PAGE_WAKEUP -- but it is okay here, as there is 1446 * no chance for blocking between here and vm_page_alloc 1447 */ 1448 m->flags &= ~PG_BUSY; 1449 vm_page_wire(m); 1450 bp->b_flags &= ~B_CACHE; 1451 } else if (m->flags & PG_BUSY) { 1452 s = splvm(); 1453 if (m->flags & PG_BUSY) { 1454 m->flags |= PG_WANTED; 1455 tsleep(m, PVM, "pgtblk", 0); 1456 } 1457 splx(s); 1458 goto doretry; 1459 } else { 1460 if ((curproc != pageproc) && 1461 ((m->queue - m->pc) == PQ_CACHE) && 1462 ((cnt.v_free_count + cnt.v_cache_count) < 1463 (cnt.v_free_min + cnt.v_cache_min))) { 1464 pagedaemon_wakeup(); 1465 } 1466 bytesinpage = tinc; 1467 if (tinc > (newbsize - toff)) 1468 bytesinpage = newbsize - toff; 1469 if ((bp->b_flags & B_CACHE) && 1470 !vm_page_is_valid(m, 1471 (vm_offset_t) ((toff + off) & PAGE_MASK), 1472 bytesinpage)) { 1473 bp->b_flags &= ~B_CACHE; 1474 } 1475 vm_page_wire(m); 1476 } 1477 bp->b_pages[pageindex] = m; 1478 curbpnpages = pageindex + 1; 1479 } 1480 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1481 bp->b_npages = curbpnpages; 1482 pmap_qenter((vm_offset_t) bp->b_data, 1483 bp->b_pages, bp->b_npages); 1484 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1485 } 1486 } 1487 } 1488 if (bp->b_flags & B_VMIO) 1489 vmiospace += bp->b_bufsize; 1490 bufspace += (newbsize - bp->b_bufsize); 1491 bp->b_bufsize = newbsize; 1492 bp->b_bcount = size; 1493 return 1; 1494} 1495 1496/* 1497 * Wait for buffer I/O completion, returning error status. 1498 */ 1499int 1500biowait(register struct buf * bp) 1501{ 1502 int s; 1503 1504 s = splbio(); 1505 while ((bp->b_flags & B_DONE) == 0) 1506 tsleep(bp, PRIBIO, "biowait", 0); 1507 splx(s); 1508 if (bp->b_flags & B_EINTR) { 1509 bp->b_flags &= ~B_EINTR; 1510 return (EINTR); 1511 } 1512 if (bp->b_flags & B_ERROR) { 1513 return (bp->b_error ? bp->b_error : EIO); 1514 } else { 1515 return (0); 1516 } 1517} 1518 1519/* 1520 * Finish I/O on a buffer, calling an optional function. 1521 * This is usually called from interrupt level, so process blocking 1522 * is not *a good idea*. 1523 */ 1524void 1525biodone(register struct buf * bp) 1526{ 1527 int s; 1528 1529 s = splbio(); 1530 if (!(bp->b_flags & B_BUSY)) 1531 panic("biodone: buffer not busy"); 1532 1533 if (bp->b_flags & B_DONE) { 1534 splx(s); 1535 printf("biodone: buffer already done\n"); 1536 return; 1537 } 1538 bp->b_flags |= B_DONE; 1539 1540 if ((bp->b_flags & B_READ) == 0) { 1541 vwakeup(bp); 1542 } 1543#ifdef BOUNCE_BUFFERS 1544 if (bp->b_flags & B_BOUNCE) 1545 vm_bounce_free(bp); 1546#endif 1547 1548 /* call optional completion function if requested */ 1549 if (bp->b_flags & B_CALL) { 1550 bp->b_flags &= ~B_CALL; 1551 (*bp->b_iodone) (bp); 1552 splx(s); 1553 return; 1554 } 1555 if (bp->b_flags & B_VMIO) { 1556 int i, resid; 1557 vm_ooffset_t foff; 1558 vm_page_t m; 1559 vm_object_t obj; 1560 int iosize; 1561 struct vnode *vp = bp->b_vp; 1562 1563 if (vp->v_type == VBLK) 1564 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1565 else 1566 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1567 obj = vp->v_object; 1568 if (!obj) { 1569 panic("biodone: no object"); 1570 } 1571#if defined(VFS_BIO_DEBUG) 1572 if (obj->paging_in_progress < bp->b_npages) { 1573 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1574 obj->paging_in_progress, bp->b_npages); 1575 } 1576#endif 1577 iosize = bp->b_bufsize; 1578 for (i = 0; i < bp->b_npages; i++) { 1579 int bogusflag = 0; 1580 m = bp->b_pages[i]; 1581 if (m == bogus_page) { 1582 bogusflag = 1; 1583 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1584 if (!m) { 1585#if defined(VFS_BIO_DEBUG) 1586 printf("biodone: page disappeared\n"); 1587#endif 1588 --obj->paging_in_progress; 1589 continue; 1590 } 1591 bp->b_pages[i] = m; 1592 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1593 } 1594#if defined(VFS_BIO_DEBUG) 1595 if (OFF_TO_IDX(foff) != m->pindex) { 1596 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1597 } 1598#endif 1599 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1600 if (resid > iosize) 1601 resid = iosize; 1602 /* 1603 * In the write case, the valid and clean bits are 1604 * already changed correctly, so we only need to do this 1605 * here in the read case. 1606 */ 1607 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1608 vm_page_set_validclean(m, 1609 (vm_offset_t) (foff & PAGE_MASK), resid); 1610 } 1611 1612 /* 1613 * when debugging new filesystems or buffer I/O methods, this 1614 * is the most common error that pops up. if you see this, you 1615 * have not set the page busy flag correctly!!! 1616 */ 1617 if (m->busy == 0) { 1618 printf("biodone: page busy < 0, " 1619 "pindex: %d, foff: 0x(%x,%x), " 1620 "resid: %d, index: %d\n", 1621 (int) m->pindex, (int)(foff >> 32), 1622 (int) foff & 0xffffffff, resid, i); 1623 if (vp->v_type != VBLK) 1624 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1625 bp->b_vp->v_mount->mnt_stat.f_iosize, 1626 (int) bp->b_lblkno, 1627 bp->b_flags, bp->b_npages); 1628 else 1629 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1630 (int) bp->b_lblkno, 1631 bp->b_flags, bp->b_npages); 1632 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1633 m->valid, m->dirty, m->wire_count); 1634 panic("biodone: page busy < 0\n"); 1635 } 1636 --m->busy; 1637 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1638 m->flags &= ~PG_WANTED; 1639 wakeup(m); 1640 } 1641 --obj->paging_in_progress; 1642 foff += resid; 1643 iosize -= resid; 1644 } 1645 if (obj && obj->paging_in_progress == 0 && 1646 (obj->flags & OBJ_PIPWNT)) { 1647 obj->flags &= ~OBJ_PIPWNT; 1648 wakeup(obj); 1649 } 1650 } 1651 /* 1652 * For asynchronous completions, release the buffer now. The brelse 1653 * checks for B_WANTED and will do the wakeup there if necessary - so 1654 * no need to do a wakeup here in the async case. 1655 */ 1656 1657 if (bp->b_flags & B_ASYNC) { 1658 if ((bp->b_flags & B_ORDERED) == 0) { 1659 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1660 brelse(bp); 1661 else 1662 bqrelse(bp); 1663 } 1664 } else { 1665 bp->b_flags &= ~B_WANTED; 1666 wakeup(bp); 1667 } 1668 splx(s); 1669} 1670 1671int 1672count_lock_queue() 1673{ 1674 int count; 1675 struct buf *bp; 1676 1677 count = 0; 1678 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1679 bp != NULL; 1680 bp = TAILQ_NEXT(bp, b_freelist)) 1681 count++; 1682 return (count); 1683} 1684 1685int vfs_update_interval = 30; 1686 1687static void 1688vfs_update() 1689{ 1690 (void) spl0(); /* XXX redundant? wrong place? */ 1691 while (1) { 1692 tsleep(&vfs_update_wakeup, PUSER, "update", 1693 hz * vfs_update_interval); 1694 vfs_update_wakeup = 0; 1695 sync(curproc, NULL, NULL); 1696 } 1697} 1698 1699static int 1700sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1701{ 1702 int error = sysctl_handle_int(oidp, 1703 oidp->oid_arg1, oidp->oid_arg2, req); 1704 if (!error) 1705 wakeup(&vfs_update_wakeup); 1706 return error; 1707} 1708 1709SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1710 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1711 1712 1713/* 1714 * This routine is called in lieu of iodone in the case of 1715 * incomplete I/O. This keeps the busy status for pages 1716 * consistant. 1717 */ 1718void 1719vfs_unbusy_pages(struct buf * bp) 1720{ 1721 int i; 1722 1723 if (bp->b_flags & B_VMIO) { 1724 struct vnode *vp = bp->b_vp; 1725 vm_object_t obj = vp->v_object; 1726 vm_ooffset_t foff; 1727 1728 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1729 1730 for (i = 0; i < bp->b_npages; i++) { 1731 vm_page_t m = bp->b_pages[i]; 1732 1733 if (m == bogus_page) { 1734 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1735 if (!m) { 1736 panic("vfs_unbusy_pages: page missing\n"); 1737 } 1738 bp->b_pages[i] = m; 1739 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1740 } 1741 --obj->paging_in_progress; 1742 --m->busy; 1743 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1744 m->flags &= ~PG_WANTED; 1745 wakeup(m); 1746 } 1747 } 1748 if (obj->paging_in_progress == 0 && 1749 (obj->flags & OBJ_PIPWNT)) { 1750 obj->flags &= ~OBJ_PIPWNT; 1751 wakeup(obj); 1752 } 1753 } 1754} 1755 1756/* 1757 * This routine is called before a device strategy routine. 1758 * It is used to tell the VM system that paging I/O is in 1759 * progress, and treat the pages associated with the buffer 1760 * almost as being PG_BUSY. Also the object paging_in_progress 1761 * flag is handled to make sure that the object doesn't become 1762 * inconsistant. 1763 */ 1764void 1765vfs_busy_pages(struct buf * bp, int clear_modify) 1766{ 1767 int i; 1768 1769 if (bp->b_flags & B_VMIO) { 1770 vm_object_t obj = bp->b_vp->v_object; 1771 vm_ooffset_t foff; 1772 int iocount = bp->b_bufsize; 1773 1774 if (bp->b_vp->v_type == VBLK) 1775 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1776 else 1777 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1778 vfs_setdirty(bp); 1779 for (i = 0; i < bp->b_npages; i++) { 1780 vm_page_t m = bp->b_pages[i]; 1781 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1782 1783 if (resid > iocount) 1784 resid = iocount; 1785 if ((bp->b_flags & B_CLUSTER) == 0) { 1786 obj->paging_in_progress++; 1787 m->busy++; 1788 } 1789 vm_page_protect(m, VM_PROT_NONE); 1790 if (clear_modify) { 1791 vm_page_set_validclean(m, 1792 (vm_offset_t) (foff & PAGE_MASK), resid); 1793 } else if (bp->b_bcount >= PAGE_SIZE) { 1794 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1795 bp->b_pages[i] = bogus_page; 1796 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1797 } 1798 } 1799 foff += resid; 1800 iocount -= resid; 1801 } 1802 } 1803} 1804 1805/* 1806 * Tell the VM system that the pages associated with this buffer 1807 * are clean. This is used for delayed writes where the data is 1808 * going to go to disk eventually without additional VM intevention. 1809 */ 1810void 1811vfs_clean_pages(struct buf * bp) 1812{ 1813 int i; 1814 1815 if (bp->b_flags & B_VMIO) { 1816 vm_ooffset_t foff; 1817 int iocount = bp->b_bufsize; 1818 1819 if (bp->b_vp->v_type == VBLK) 1820 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1821 else 1822 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1823 1824 for (i = 0; i < bp->b_npages; i++) { 1825 vm_page_t m = bp->b_pages[i]; 1826 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1827 1828 if (resid > iocount) 1829 resid = iocount; 1830 if (resid > 0) { 1831 vm_page_set_validclean(m, 1832 ((vm_offset_t) foff & PAGE_MASK), resid); 1833 } 1834 foff += resid; 1835 iocount -= resid; 1836 } 1837 } 1838} 1839 1840void 1841vfs_bio_clrbuf(struct buf *bp) { 1842 int i; 1843 if( bp->b_flags & B_VMIO) { 1844 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1845 int mask; 1846 mask = 0; 1847 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1848 mask |= (1 << (i/DEV_BSIZE)); 1849 if( bp->b_pages[0]->valid != mask) { 1850 bzero(bp->b_data, bp->b_bufsize); 1851 } 1852 bp->b_pages[0]->valid = mask; 1853 bp->b_resid = 0; 1854 return; 1855 } 1856 for(i=0;i<bp->b_npages;i++) { 1857 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1858 continue; 1859 if( bp->b_pages[i]->valid == 0) { 1860 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1861 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1862 } 1863 } else { 1864 int j; 1865 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1866 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1867 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1868 } 1869 } 1870 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1871 } 1872 bp->b_resid = 0; 1873 } else { 1874 clrbuf(bp); 1875 } 1876} 1877 1878/* 1879 * vm_hold_load_pages and vm_hold_unload pages get pages into 1880 * a buffers address space. The pages are anonymous and are 1881 * not associated with a file object. 1882 */ 1883void 1884vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1885{ 1886 vm_offset_t pg; 1887 vm_page_t p; 1888 int index; 1889 1890 to = round_page(to); 1891 from = round_page(from); 1892 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1893 1894 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1895 1896tryagain: 1897 1898 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1899 VM_ALLOC_NORMAL); 1900 if (!p) { 1901 VM_WAIT; 1902 goto tryagain; 1903 } 1904 vm_page_wire(p); 1905 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1906 bp->b_pages[index] = p; 1907 PAGE_WAKEUP(p); 1908 } 1909 bp->b_npages = to >> PAGE_SHIFT; 1910} 1911 1912void 1913vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1914{ 1915 vm_offset_t pg; 1916 vm_page_t p; 1917 int index; 1918 1919 from = round_page(from); 1920 to = round_page(to); 1921 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1922 1923 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1924 p = bp->b_pages[index]; 1925 if (p && (index < bp->b_npages)) { 1926 if (p->busy) { 1927 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1928 bp->b_blkno, bp->b_lblkno); 1929 } 1930 bp->b_pages[index] = NULL; 1931 pmap_kremove(pg); 1932 vm_page_unwire(p); 1933 vm_page_free(p); 1934 } 1935 } 1936 bp->b_npages = from >> PAGE_SHIFT; 1937} 1938