vfs_bio.c revision 18737
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.102 1996/09/20 02:26:35 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81static void vfs_vmio_release(struct buf *bp); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95caddr_t buffers_kva; 96 97/* 98 * bogus page -- for I/O to/from partially complete buffers 99 * this is a temporary solution to the problem, but it is not 100 * really that bad. it would be better to split the buffer 101 * for input in the case of buffers partially already in memory, 102 * but the code is intricate enough already. 103 */ 104vm_page_t bogus_page; 105static vm_offset_t bogus_offset; 106 107static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 108 bufmallocspace, maxbufmallocspace; 109 110static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 111static struct bqueues bufqueues[BUFFER_QUEUES]; 112 113extern int vm_swap_size; 114 115#define BUF_MAXUSE 16 116 117/* 118 * Initialize buffer headers and related structures. 119 */ 120void 121bufinit() 122{ 123 struct buf *bp; 124 int i; 125 126 TAILQ_INIT(&bswlist); 127 LIST_INIT(&invalhash); 128 129 /* first, make a null hash table */ 130 for (i = 0; i < BUFHSZ; i++) 131 LIST_INIT(&bufhashtbl[i]); 132 133 /* next, make a null set of free lists */ 134 for (i = 0; i < BUFFER_QUEUES; i++) 135 TAILQ_INIT(&bufqueues[i]); 136 137 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 138 /* finally, initialize each buffer header and stick on empty q */ 139 for (i = 0; i < nbuf; i++) { 140 bp = &buf[i]; 141 bzero(bp, sizeof *bp); 142 bp->b_flags = B_INVAL; /* we're just an empty header */ 143 bp->b_dev = NODEV; 144 bp->b_rcred = NOCRED; 145 bp->b_wcred = NOCRED; 146 bp->b_qindex = QUEUE_EMPTY; 147 bp->b_vnbufs.le_next = NOLIST; 148 bp->b_data = buffers_kva + i * MAXBSIZE; 149 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 150 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 151 } 152/* 153 * maxbufspace is currently calculated to support all filesystem blocks 154 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 155 * cache is still the same as it would be for 8K filesystems. This 156 * keeps the size of the buffer cache "in check" for big block filesystems. 157 */ 158 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 159/* 160 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 161 */ 162 maxvmiobufspace = 2 * maxbufspace / 3; 163/* 164 * Limit the amount of malloc memory since it is wired permanently into 165 * the kernel space. Even though this is accounted for in the buffer 166 * allocation, we don't want the malloced region to grow uncontrolled. 167 * The malloc scheme improves memory utilization significantly on average 168 * (small) directories. 169 */ 170 maxbufmallocspace = maxbufspace / 20; 171 172 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 173 bogus_page = vm_page_alloc(kernel_object, 174 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 175 VM_ALLOC_NORMAL); 176 177} 178 179/* 180 * remove the buffer from the appropriate free list 181 */ 182void 183bremfree(struct buf * bp) 184{ 185 int s = splbio(); 186 187 if (bp->b_qindex != QUEUE_NONE) { 188 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 189 bp->b_qindex = QUEUE_NONE; 190 } else { 191 panic("bremfree: removing a buffer when not on a queue"); 192 } 193 splx(s); 194} 195 196/* 197 * Get a buffer with the specified data. Look in the cache first. 198 */ 199int 200bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 201 struct buf ** bpp) 202{ 203 struct buf *bp; 204 205 bp = getblk(vp, blkno, size, 0, 0); 206 *bpp = bp; 207 208 /* if not found in cache, do some I/O */ 209 if ((bp->b_flags & B_CACHE) == 0) { 210 if (curproc != NULL) 211 curproc->p_stats->p_ru.ru_inblock++; 212 bp->b_flags |= B_READ; 213 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 214 if (bp->b_rcred == NOCRED) { 215 if (cred != NOCRED) 216 crhold(cred); 217 bp->b_rcred = cred; 218 } 219 vfs_busy_pages(bp, 0); 220 VOP_STRATEGY(bp); 221 return (biowait(bp)); 222 } 223 return (0); 224} 225 226/* 227 * Operates like bread, but also starts asynchronous I/O on 228 * read-ahead blocks. 229 */ 230int 231breadn(struct vnode * vp, daddr_t blkno, int size, 232 daddr_t * rablkno, int *rabsize, 233 int cnt, struct ucred * cred, struct buf ** bpp) 234{ 235 struct buf *bp, *rabp; 236 int i; 237 int rv = 0, readwait = 0; 238 239 *bpp = bp = getblk(vp, blkno, size, 0, 0); 240 241 /* if not found in cache, do some I/O */ 242 if ((bp->b_flags & B_CACHE) == 0) { 243 if (curproc != NULL) 244 curproc->p_stats->p_ru.ru_inblock++; 245 bp->b_flags |= B_READ; 246 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 247 if (bp->b_rcred == NOCRED) { 248 if (cred != NOCRED) 249 crhold(cred); 250 bp->b_rcred = cred; 251 } 252 vfs_busy_pages(bp, 0); 253 VOP_STRATEGY(bp); 254 ++readwait; 255 } 256 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 257 if (inmem(vp, *rablkno)) 258 continue; 259 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 260 261 if ((rabp->b_flags & B_CACHE) == 0) { 262 if (curproc != NULL) 263 curproc->p_stats->p_ru.ru_inblock++; 264 rabp->b_flags |= B_READ | B_ASYNC; 265 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 266 if (rabp->b_rcred == NOCRED) { 267 if (cred != NOCRED) 268 crhold(cred); 269 rabp->b_rcred = cred; 270 } 271 vfs_busy_pages(rabp, 0); 272 VOP_STRATEGY(rabp); 273 } else { 274 brelse(rabp); 275 } 276 } 277 278 if (readwait) { 279 rv = biowait(bp); 280 } 281 return (rv); 282} 283 284/* 285 * Write, release buffer on completion. (Done by iodone 286 * if async.) 287 */ 288int 289bwrite(struct buf * bp) 290{ 291 int oldflags = bp->b_flags; 292 293 if (bp->b_flags & B_INVAL) { 294 brelse(bp); 295 return (0); 296 } 297 if (!(bp->b_flags & B_BUSY)) 298 panic("bwrite: buffer is not busy???"); 299 300 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 301 bp->b_flags |= B_WRITEINPROG; 302 303 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 304 reassignbuf(bp, bp->b_vp); 305 } 306 307 bp->b_vp->v_numoutput++; 308 vfs_busy_pages(bp, 1); 309 if (curproc != NULL) 310 curproc->p_stats->p_ru.ru_oublock++; 311 VOP_STRATEGY(bp); 312 313 /* 314 * Handle ordered writes here. 315 * If the write was originally flagged as ordered, 316 * then we check to see if it was converted to async. 317 * If it was converted to async, and is done now, then 318 * we release the buffer. Otherwise we clear the 319 * ordered flag because it is not needed anymore. 320 * 321 * Note that biodone has been modified so that it does 322 * not release ordered buffers. This allows us to have 323 * a chance to determine whether or not the driver 324 * has set the async flag in the strategy routine. Otherwise 325 * if biodone was not modified, then the buffer may have been 326 * reused before we have had a chance to check the flag. 327 */ 328 329 if ((oldflags & B_ORDERED) == B_ORDERED) { 330 int s; 331 s = splbio(); 332 if (bp->b_flags & B_ASYNC) { 333 if ((bp->b_flags & B_DONE)) { 334 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 335 brelse(bp); 336 else 337 bqrelse(bp); 338 } 339 splx(s); 340 return (0); 341 } else { 342 bp->b_flags &= ~B_ORDERED; 343 } 344 splx(s); 345 } 346 347 if ((oldflags & B_ASYNC) == 0) { 348 int rtval = biowait(bp); 349 350 if (oldflags & B_DELWRI) { 351 reassignbuf(bp, bp->b_vp); 352 } 353 brelse(bp); 354 return (rtval); 355 } 356 return (0); 357} 358 359int 360vn_bwrite(ap) 361 struct vop_bwrite_args *ap; 362{ 363 return (bwrite(ap->a_bp)); 364} 365 366/* 367 * Delayed write. (Buffer is marked dirty). 368 */ 369void 370bdwrite(struct buf * bp) 371{ 372 373 if ((bp->b_flags & B_BUSY) == 0) { 374 panic("bdwrite: buffer is not busy"); 375 } 376 if (bp->b_flags & B_INVAL) { 377 brelse(bp); 378 return; 379 } 380 if (bp->b_flags & B_TAPE) { 381 bawrite(bp); 382 return; 383 } 384 bp->b_flags &= ~(B_READ|B_RELBUF); 385 if ((bp->b_flags & B_DELWRI) == 0) { 386 bp->b_flags |= B_DONE | B_DELWRI; 387 reassignbuf(bp, bp->b_vp); 388 } 389 390 /* 391 * This bmap keeps the system from needing to do the bmap later, 392 * perhaps when the system is attempting to do a sync. Since it 393 * is likely that the indirect block -- or whatever other datastructure 394 * that the filesystem needs is still in memory now, it is a good 395 * thing to do this. Note also, that if the pageout daemon is 396 * requesting a sync -- there might not be enough memory to do 397 * the bmap then... So, this is important to do. 398 */ 399 if( bp->b_lblkno == bp->b_blkno) { 400 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 401 } 402 403 /* 404 * Set the *dirty* buffer range based upon the VM system dirty pages. 405 */ 406 vfs_setdirty(bp); 407 408 /* 409 * We need to do this here to satisfy the vnode_pager and the 410 * pageout daemon, so that it thinks that the pages have been 411 * "cleaned". Note that since the pages are in a delayed write 412 * buffer -- the VFS layer "will" see that the pages get written 413 * out on the next sync, or perhaps the cluster will be completed. 414 */ 415 vfs_clean_pages(bp); 416 bqrelse(bp); 417 return; 418} 419 420/* 421 * Asynchronous write. 422 * Start output on a buffer, but do not wait for it to complete. 423 * The buffer is released when the output completes. 424 */ 425void 426bawrite(struct buf * bp) 427{ 428 bp->b_flags |= B_ASYNC; 429 (void) VOP_BWRITE(bp); 430} 431 432/* 433 * Ordered write. 434 * Start output on a buffer, but only wait for it to complete if the 435 * output device cannot guarantee ordering in some other way. Devices 436 * that can perform asynchronous ordered writes will set the B_ASYNC 437 * flag in their strategy routine. 438 * The buffer is released when the output completes. 439 */ 440int 441bowrite(struct buf * bp) 442{ 443 bp->b_flags |= B_ORDERED; 444 return (VOP_BWRITE(bp)); 445} 446 447/* 448 * Release a buffer. 449 */ 450void 451brelse(struct buf * bp) 452{ 453 int s; 454 455 if (bp->b_flags & B_CLUSTER) { 456 relpbuf(bp); 457 return; 458 } 459 /* anyone need a "free" block? */ 460 s = splbio(); 461 462 /* anyone need this block? */ 463 if (bp->b_flags & B_WANTED) { 464 bp->b_flags &= ~(B_WANTED | B_AGE); 465 wakeup(bp); 466 } 467 468 if (bp->b_flags & B_LOCKED) 469 bp->b_flags &= ~B_ERROR; 470 471 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 472 (bp->b_bufsize <= 0)) { 473 bp->b_flags |= B_INVAL; 474 bp->b_flags &= ~(B_DELWRI | B_CACHE); 475 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 476 if (bp->b_bufsize) 477 allocbuf(bp, 0); 478 brelvp(bp); 479 } 480 } 481 482 /* 483 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 484 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 485 * but the VM object is kept around. The B_NOCACHE flag is used to 486 * invalidate the pages in the VM object. 487 */ 488 if (bp->b_flags & B_VMIO) { 489 vm_ooffset_t foff; 490 vm_object_t obj; 491 int i, resid; 492 vm_page_t m; 493 struct vnode *vp; 494 int iototal = bp->b_bufsize; 495 496 vp = bp->b_vp; 497 if (!vp) 498 panic("brelse: missing vp"); 499 500 if (bp->b_npages) { 501 vm_pindex_t poff; 502 obj = (vm_object_t) vp->v_object; 503 if (vp->v_type == VBLK) 504 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 505 else 506 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 507 poff = OFF_TO_IDX(foff); 508 for (i = 0; i < bp->b_npages; i++) { 509 m = bp->b_pages[i]; 510 if (m == bogus_page) { 511 m = vm_page_lookup(obj, poff + i); 512 if (!m) { 513 panic("brelse: page missing\n"); 514 } 515 bp->b_pages[i] = m; 516 pmap_qenter(trunc_page(bp->b_data), 517 bp->b_pages, bp->b_npages); 518 } 519 resid = IDX_TO_OFF(m->pindex+1) - foff; 520 if (resid > iototal) 521 resid = iototal; 522 if (resid > 0) { 523 /* 524 * Don't invalidate the page if the local machine has already 525 * modified it. This is the lesser of two evils, and should 526 * be fixed. 527 */ 528 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 529 vm_page_test_dirty(m); 530 if (m->dirty == 0) { 531 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 532 if (m->valid == 0) 533 vm_page_protect(m, VM_PROT_NONE); 534 } 535 } 536 if (resid >= PAGE_SIZE) { 537 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 538 bp->b_flags |= B_INVAL; 539 } 540 } else { 541 if (!vm_page_is_valid(m, 542 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 543 bp->b_flags |= B_INVAL; 544 } 545 } 546 } 547 foff += resid; 548 iototal -= resid; 549 } 550 } 551 if (bp->b_flags & (B_INVAL | B_RELBUF)) 552 vfs_vmio_release(bp); 553 } 554 if (bp->b_qindex != QUEUE_NONE) 555 panic("brelse: free buffer onto another queue???"); 556 557 /* enqueue */ 558 /* buffers with no memory */ 559 if (bp->b_bufsize == 0) { 560 bp->b_qindex = QUEUE_EMPTY; 561 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 562 LIST_REMOVE(bp, b_hash); 563 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 564 bp->b_dev = NODEV; 565 if (needsbuffer) { 566 wakeup(&needsbuffer); 567 needsbuffer=0; 568 } 569 /* buffers with junk contents */ 570 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 571 bp->b_qindex = QUEUE_AGE; 572 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 573 LIST_REMOVE(bp, b_hash); 574 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 575 bp->b_dev = NODEV; 576 if (needsbuffer) { 577 wakeup(&needsbuffer); 578 needsbuffer=0; 579 } 580 /* buffers that are locked */ 581 } else if (bp->b_flags & B_LOCKED) { 582 bp->b_qindex = QUEUE_LOCKED; 583 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 584 /* buffers with stale but valid contents */ 585 } else if (bp->b_flags & B_AGE) { 586 bp->b_qindex = QUEUE_AGE; 587 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 588 if (needsbuffer) { 589 wakeup(&needsbuffer); 590 needsbuffer=0; 591 } 592 /* buffers with valid and quite potentially reuseable contents */ 593 } else { 594 bp->b_qindex = QUEUE_LRU; 595 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 596 if (needsbuffer) { 597 wakeup(&needsbuffer); 598 needsbuffer=0; 599 } 600 } 601 602 /* unlock */ 603 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 604 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 605 splx(s); 606} 607 608/* 609 * Release a buffer. 610 */ 611void 612bqrelse(struct buf * bp) 613{ 614 int s; 615 616 s = splbio(); 617 618 619 /* anyone need this block? */ 620 if (bp->b_flags & B_WANTED) { 621 bp->b_flags &= ~(B_WANTED | B_AGE); 622 wakeup(bp); 623 } 624 625 if (bp->b_qindex != QUEUE_NONE) 626 panic("bqrelse: free buffer onto another queue???"); 627 628 if (bp->b_flags & B_LOCKED) { 629 bp->b_flags &= ~B_ERROR; 630 bp->b_qindex = QUEUE_LOCKED; 631 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 632 /* buffers with stale but valid contents */ 633 } else { 634 bp->b_qindex = QUEUE_LRU; 635 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 636 if (needsbuffer) { 637 wakeup(&needsbuffer); 638 needsbuffer=0; 639 } 640 } 641 642 /* unlock */ 643 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 644 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 645 splx(s); 646} 647 648static void 649vfs_vmio_release(bp) 650 struct buf *bp; 651{ 652 int i; 653 vm_page_t m; 654 655 for (i = 0; i < bp->b_npages; i++) { 656 m = bp->b_pages[i]; 657 bp->b_pages[i] = NULL; 658 if ((bp->b_flags & B_ASYNC) == 0) { 659 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 660 m->flags |= PG_WANTED; 661 tsleep(m, PVM, "vmiorl", 0); 662 } 663 } 664 665 vm_page_unwire(m); 666 667 if (m->wire_count == 0) { 668 669 if (m->flags & PG_WANTED) { 670 m->flags &= ~PG_WANTED; 671 wakeup(m); 672 } 673 674 if (bp->b_flags & B_ASYNC) { 675 if (m->hold_count == 0) { 676 if ((m->flags & PG_BUSY) == 0 && 677 (m->busy == 0) && 678 (m->valid == 0)) { 679 if(m->dirty == 0) 680 vm_page_test_dirty(m); 681 if (m->dirty == 0) { 682 vm_page_protect(m, VM_PROT_NONE); 683 vm_page_free(m); 684 } else { 685 pagedaemon_wakeup(); 686 } 687 /* 688 * This is likely at interrupt time, 689 * and we cannot block here. 690 */ 691 } else if (cnt.v_free_count < cnt.v_free_min) { 692 pagedaemon_wakeup(); 693 } 694 } 695 continue; 696 } 697 698 if (m->valid) { 699 if(m->dirty == 0) 700 vm_page_test_dirty(m); 701 /* 702 * this keeps pressure off of the process memory 703 */ 704 if ((vm_swap_size == 0) || 705 (cnt.v_free_count < cnt.v_free_min)) { 706 if ((m->dirty == 0) && 707 (m->hold_count == 0)) 708 vm_page_cache(m); 709 else 710 vm_page_deactivate(m); 711 } 712 } else if (m->hold_count == 0) { 713 vm_page_protect(m, VM_PROT_NONE); 714 vm_page_free(m); 715 } 716 } 717 } 718 bufspace -= bp->b_bufsize; 719 vmiospace -= bp->b_bufsize; 720 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 721 bp->b_npages = 0; 722 bp->b_bufsize = 0; 723 bp->b_flags &= ~B_VMIO; 724 if (bp->b_vp) 725 brelvp(bp); 726} 727 728/* 729 * Check to see if a block is currently memory resident. 730 */ 731__inline struct buf * 732gbincore(struct vnode * vp, daddr_t blkno) 733{ 734 struct buf *bp; 735 struct bufhashhdr *bh; 736 737 bh = BUFHASH(vp, blkno); 738 bp = bh->lh_first; 739 740 /* Search hash chain */ 741 while (bp != NULL) { 742 /* hit */ 743 if (bp->b_vp == vp && bp->b_lblkno == blkno && 744 (bp->b_flags & B_INVAL) == 0) { 745 break; 746 } 747 bp = bp->b_hash.le_next; 748 } 749 return (bp); 750} 751 752/* 753 * this routine implements clustered async writes for 754 * clearing out B_DELWRI buffers... This is much better 755 * than the old way of writing only one buffer at a time. 756 */ 757int 758vfs_bio_awrite(struct buf * bp) 759{ 760 int i; 761 daddr_t lblkno = bp->b_lblkno; 762 struct vnode *vp = bp->b_vp; 763 int s; 764 int ncl; 765 struct buf *bpa; 766 int nwritten; 767 768 s = splbio(); 769 /* 770 * right now we support clustered writing only to regular files 771 */ 772 if ((vp->v_type == VREG) && 773 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 774 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 775 int size; 776 int maxcl; 777 778 size = vp->v_mount->mnt_stat.f_iosize; 779 maxcl = MAXPHYS / size; 780 781 for (i = 1; i < maxcl; i++) { 782 if ((bpa = gbincore(vp, lblkno + i)) && 783 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 784 (B_DELWRI | B_CLUSTEROK)) && 785 (bpa->b_bufsize == size)) { 786 if ((bpa->b_blkno == bpa->b_lblkno) || 787 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 788 break; 789 } else { 790 break; 791 } 792 } 793 ncl = i; 794 /* 795 * this is a possible cluster write 796 */ 797 if (ncl != 1) { 798 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 799 splx(s); 800 return nwritten; 801 } 802 } 803 bremfree(bp); 804 splx(s); 805 /* 806 * default (old) behavior, writing out only one block 807 */ 808 bp->b_flags |= B_BUSY | B_ASYNC; 809 nwritten = bp->b_bufsize; 810 (void) VOP_BWRITE(bp); 811 return nwritten; 812} 813 814 815/* 816 * Find a buffer header which is available for use. 817 */ 818static struct buf * 819getnewbuf(int slpflag, int slptimeo, int doingvmio) 820{ 821 struct buf *bp; 822 int nbyteswritten = 0; 823 824start: 825 if (bufspace >= maxbufspace) 826 goto trytofreespace; 827 828 /* can we constitute a new buffer? */ 829 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 830 if (bp->b_qindex != QUEUE_EMPTY) 831 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 832 bp->b_qindex); 833 bp->b_flags |= B_BUSY; 834 bremfree(bp); 835 goto fillbuf; 836 } 837trytofreespace: 838 /* 839 * We keep the file I/O from hogging metadata I/O 840 * This is desirable because file data is cached in the 841 * VM/Buffer cache even if a buffer is freed. 842 */ 843 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 844 if (bp->b_qindex != QUEUE_AGE) 845 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 846 bp->b_qindex); 847 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 848 if (bp->b_qindex != QUEUE_LRU) 849 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 850 bp->b_qindex); 851 } 852 if (!bp) { 853 /* wait for a free buffer of any kind */ 854 needsbuffer = 1; 855 tsleep(&needsbuffer, 856 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 857 return (0); 858 } 859 860#if defined(DIAGNOSTIC) 861 if (bp->b_flags & B_BUSY) { 862 panic("getnewbuf: busy buffer on free list\n"); 863 } 864#endif 865 866 /* 867 * We are fairly aggressive about freeing VMIO buffers, but since 868 * the buffering is intact without buffer headers, there is not 869 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 870 */ 871 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 872 if ((bp->b_flags & B_VMIO) == 0 || 873 (vmiospace < maxvmiobufspace)) { 874 --bp->b_usecount; 875 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 876 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 877 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 878 goto start; 879 } 880 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 881 } 882 } 883 884 /* if we are a delayed write, convert to an async write */ 885 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 886 nbyteswritten += vfs_bio_awrite(bp); 887 if (!slpflag && !slptimeo) { 888 return (0); 889 } 890 goto start; 891 } 892 893 if (bp->b_flags & B_WANTED) { 894 bp->b_flags &= ~B_WANTED; 895 wakeup(bp); 896 } 897 bremfree(bp); 898 bp->b_flags |= B_BUSY; 899 900 if (bp->b_flags & B_VMIO) { 901 bp->b_flags &= ~B_ASYNC; 902 vfs_vmio_release(bp); 903 } 904 905 if (bp->b_vp) 906 brelvp(bp); 907 908fillbuf: 909 /* we are not free, nor do we contain interesting data */ 910 if (bp->b_rcred != NOCRED) { 911 crfree(bp->b_rcred); 912 bp->b_rcred = NOCRED; 913 } 914 if (bp->b_wcred != NOCRED) { 915 crfree(bp->b_wcred); 916 bp->b_wcred = NOCRED; 917 } 918 919 LIST_REMOVE(bp, b_hash); 920 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 921 if (bp->b_bufsize) { 922 allocbuf(bp, 0); 923 } 924 bp->b_flags = B_BUSY; 925 bp->b_dev = NODEV; 926 bp->b_vp = NULL; 927 bp->b_blkno = bp->b_lblkno = 0; 928 bp->b_iodone = 0; 929 bp->b_error = 0; 930 bp->b_resid = 0; 931 bp->b_bcount = 0; 932 bp->b_npages = 0; 933 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 934 bp->b_dirtyoff = bp->b_dirtyend = 0; 935 bp->b_validoff = bp->b_validend = 0; 936 bp->b_usecount = 4; 937 if (bufspace >= maxbufspace + nbyteswritten) { 938 bp->b_flags |= B_INVAL; 939 brelse(bp); 940 goto trytofreespace; 941 } 942 return (bp); 943} 944 945/* 946 * Check to see if a block is currently memory resident. 947 */ 948struct buf * 949incore(struct vnode * vp, daddr_t blkno) 950{ 951 struct buf *bp; 952 953 int s = splbio(); 954 bp = gbincore(vp, blkno); 955 splx(s); 956 return (bp); 957} 958 959/* 960 * Returns true if no I/O is needed to access the 961 * associated VM object. This is like incore except 962 * it also hunts around in the VM system for the data. 963 */ 964 965int 966inmem(struct vnode * vp, daddr_t blkno) 967{ 968 vm_object_t obj; 969 vm_offset_t toff, tinc; 970 vm_page_t m; 971 vm_ooffset_t off; 972 973 if (incore(vp, blkno)) 974 return 1; 975 if (vp->v_mount == NULL) 976 return 0; 977 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 978 return 0; 979 980 obj = vp->v_object; 981 tinc = PAGE_SIZE; 982 if (tinc > vp->v_mount->mnt_stat.f_iosize) 983 tinc = vp->v_mount->mnt_stat.f_iosize; 984 off = blkno * vp->v_mount->mnt_stat.f_iosize; 985 986 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 987 988 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 989 if (!m) 990 return 0; 991 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 992 return 0; 993 } 994 return 1; 995} 996 997/* 998 * now we set the dirty range for the buffer -- 999 * for NFS -- if the file is mapped and pages have 1000 * been written to, let it know. We want the 1001 * entire range of the buffer to be marked dirty if 1002 * any of the pages have been written to for consistancy 1003 * with the b_validoff, b_validend set in the nfs write 1004 * code, and used by the nfs read code. 1005 */ 1006static void 1007vfs_setdirty(struct buf *bp) { 1008 int i; 1009 vm_object_t object; 1010 vm_offset_t boffset, offset; 1011 /* 1012 * We qualify the scan for modified pages on whether the 1013 * object has been flushed yet. The OBJ_WRITEABLE flag 1014 * is not cleared simply by protecting pages off. 1015 */ 1016 if ((bp->b_flags & B_VMIO) && 1017 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1018 /* 1019 * test the pages to see if they have been modified directly 1020 * by users through the VM system. 1021 */ 1022 for (i = 0; i < bp->b_npages; i++) 1023 vm_page_test_dirty(bp->b_pages[i]); 1024 1025 /* 1026 * scan forwards for the first page modified 1027 */ 1028 for (i = 0; i < bp->b_npages; i++) { 1029 if (bp->b_pages[i]->dirty) { 1030 break; 1031 } 1032 } 1033 boffset = (i << PAGE_SHIFT); 1034 if (boffset < bp->b_dirtyoff) { 1035 bp->b_dirtyoff = boffset; 1036 } 1037 1038 /* 1039 * scan backwards for the last page modified 1040 */ 1041 for (i = bp->b_npages - 1; i >= 0; --i) { 1042 if (bp->b_pages[i]->dirty) { 1043 break; 1044 } 1045 } 1046 boffset = (i + 1); 1047 offset = boffset + bp->b_pages[0]->pindex; 1048 if (offset >= object->size) 1049 boffset = object->size - bp->b_pages[0]->pindex; 1050 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1051 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1052 } 1053} 1054 1055/* 1056 * Get a block given a specified block and offset into a file/device. 1057 */ 1058struct buf * 1059getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1060{ 1061 struct buf *bp; 1062 int s; 1063 struct bufhashhdr *bh; 1064 1065 s = splbio(); 1066loop: 1067 if ((bp = gbincore(vp, blkno))) { 1068 if (bp->b_flags & B_BUSY) { 1069 bp->b_flags |= B_WANTED; 1070 if (bp->b_usecount < BUF_MAXUSE) 1071 ++bp->b_usecount; 1072 if (!tsleep(bp, 1073 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1074 goto loop; 1075 1076 splx(s); 1077 return (struct buf *) NULL; 1078 } 1079 bp->b_flags |= B_BUSY | B_CACHE; 1080 bremfree(bp); 1081 1082 /* 1083 * check for size inconsistancies (note that they shouldn't happen 1084 * but do when filesystems don't handle the size changes correctly.) 1085 * We are conservative on metadata and don't just extend the buffer 1086 * but write and re-constitute it. 1087 */ 1088 1089 if (bp->b_bcount != size) { 1090 if (bp->b_flags & B_VMIO) { 1091 allocbuf(bp, size); 1092 } else { 1093 bp->b_flags |= B_NOCACHE; 1094 VOP_BWRITE(bp); 1095 goto loop; 1096 } 1097 } 1098 1099 if (bp->b_usecount < BUF_MAXUSE) 1100 ++bp->b_usecount; 1101 splx(s); 1102 return (bp); 1103 } else { 1104 vm_object_t obj; 1105 int doingvmio; 1106 1107 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1108 doingvmio = 1; 1109 } else { 1110 doingvmio = 0; 1111 } 1112 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 1113 if (slpflag || slptimeo) { 1114 splx(s); 1115 return NULL; 1116 } 1117 goto loop; 1118 } 1119 1120 /* 1121 * This code is used to make sure that a buffer is not 1122 * created while the getnewbuf routine is blocked. 1123 * Normally the vnode is locked so this isn't a problem. 1124 * VBLK type I/O requests, however, don't lock the vnode. 1125 */ 1126 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1127 bp->b_flags |= B_INVAL; 1128 brelse(bp); 1129 goto loop; 1130 } 1131 1132 /* 1133 * Insert the buffer into the hash, so that it can 1134 * be found by incore. 1135 */ 1136 bp->b_blkno = bp->b_lblkno = blkno; 1137 bgetvp(vp, bp); 1138 LIST_REMOVE(bp, b_hash); 1139 bh = BUFHASH(vp, blkno); 1140 LIST_INSERT_HEAD(bh, bp, b_hash); 1141 1142 if (doingvmio) { 1143 bp->b_flags |= (B_VMIO | B_CACHE); 1144#if defined(VFS_BIO_DEBUG) 1145 if (vp->v_type != VREG && vp->v_type != VBLK) 1146 printf("getblk: vmioing file type %d???\n", vp->v_type); 1147#endif 1148 } else { 1149 bp->b_flags &= ~B_VMIO; 1150 } 1151 splx(s); 1152 1153 allocbuf(bp, size); 1154#ifdef PC98 1155 /* 1156 * 1024byte/sector support 1157 */ 1158#define B_XXX2 0x8000000 1159 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1160#endif 1161 return (bp); 1162 } 1163} 1164 1165/* 1166 * Get an empty, disassociated buffer of given size. 1167 */ 1168struct buf * 1169geteblk(int size) 1170{ 1171 struct buf *bp; 1172 int s; 1173 1174 s = splbio(); 1175 while ((bp = getnewbuf(0, 0, 0)) == 0); 1176 splx(s); 1177 allocbuf(bp, size); 1178 bp->b_flags |= B_INVAL; 1179 return (bp); 1180} 1181 1182 1183/* 1184 * This code constitutes the buffer memory from either anonymous system 1185 * memory (in the case of non-VMIO operations) or from an associated 1186 * VM object (in the case of VMIO operations). 1187 * 1188 * Note that this code is tricky, and has many complications to resolve 1189 * deadlock or inconsistant data situations. Tread lightly!!! 1190 * 1191 * Modify the length of a buffer's underlying buffer storage without 1192 * destroying information (unless, of course the buffer is shrinking). 1193 */ 1194int 1195allocbuf(struct buf * bp, int size) 1196{ 1197 1198 int s; 1199 int newbsize, mbsize; 1200 int i; 1201 1202 if (!(bp->b_flags & B_BUSY)) 1203 panic("allocbuf: buffer not busy"); 1204 1205 if ((bp->b_flags & B_VMIO) == 0) { 1206 caddr_t origbuf; 1207 int origbufsize; 1208 /* 1209 * Just get anonymous memory from the kernel 1210 */ 1211 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1212#if !defined(NO_B_MALLOC) 1213 if (bp->b_flags & B_MALLOC) 1214 newbsize = mbsize; 1215 else 1216#endif 1217 newbsize = round_page(size); 1218 1219 if (newbsize < bp->b_bufsize) { 1220#if !defined(NO_B_MALLOC) 1221 /* 1222 * malloced buffers are not shrunk 1223 */ 1224 if (bp->b_flags & B_MALLOC) { 1225 if (newbsize) { 1226 bp->b_bcount = size; 1227 } else { 1228 free(bp->b_data, M_BIOBUF); 1229 bufspace -= bp->b_bufsize; 1230 bufmallocspace -= bp->b_bufsize; 1231 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1232 bp->b_bufsize = 0; 1233 bp->b_bcount = 0; 1234 bp->b_flags &= ~B_MALLOC; 1235 } 1236 return 1; 1237 } 1238#endif 1239 vm_hold_free_pages( 1240 bp, 1241 (vm_offset_t) bp->b_data + newbsize, 1242 (vm_offset_t) bp->b_data + bp->b_bufsize); 1243 } else if (newbsize > bp->b_bufsize) { 1244#if !defined(NO_B_MALLOC) 1245 /* 1246 * We only use malloced memory on the first allocation. 1247 * and revert to page-allocated memory when the buffer grows. 1248 */ 1249 if ( (bufmallocspace < maxbufmallocspace) && 1250 (bp->b_bufsize == 0) && 1251 (mbsize <= PAGE_SIZE/2)) { 1252 1253 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1254 bp->b_bufsize = mbsize; 1255 bp->b_bcount = size; 1256 bp->b_flags |= B_MALLOC; 1257 bufspace += mbsize; 1258 bufmallocspace += mbsize; 1259 return 1; 1260 } 1261#endif 1262 origbuf = NULL; 1263 origbufsize = 0; 1264#if !defined(NO_B_MALLOC) 1265 /* 1266 * If the buffer is growing on it's other-than-first allocation, 1267 * then we revert to the page-allocation scheme. 1268 */ 1269 if (bp->b_flags & B_MALLOC) { 1270 origbuf = bp->b_data; 1271 origbufsize = bp->b_bufsize; 1272 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1273 bufspace -= bp->b_bufsize; 1274 bufmallocspace -= bp->b_bufsize; 1275 bp->b_bufsize = 0; 1276 bp->b_flags &= ~B_MALLOC; 1277 newbsize = round_page(newbsize); 1278 } 1279#endif 1280 vm_hold_load_pages( 1281 bp, 1282 (vm_offset_t) bp->b_data + bp->b_bufsize, 1283 (vm_offset_t) bp->b_data + newbsize); 1284#if !defined(NO_B_MALLOC) 1285 if (origbuf) { 1286 bcopy(origbuf, bp->b_data, origbufsize); 1287 free(origbuf, M_BIOBUF); 1288 } 1289#endif 1290 } 1291 } else { 1292 vm_page_t m; 1293 int desiredpages; 1294 1295 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1296 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1297 1298#if !defined(NO_B_MALLOC) 1299 if (bp->b_flags & B_MALLOC) 1300 panic("allocbuf: VMIO buffer can't be malloced"); 1301#endif 1302 1303 if (newbsize < bp->b_bufsize) { 1304 if (desiredpages < bp->b_npages) { 1305 for (i = desiredpages; i < bp->b_npages; i++) { 1306 /* 1307 * the page is not freed here -- it 1308 * is the responsibility of vnode_pager_setsize 1309 */ 1310 m = bp->b_pages[i]; 1311#if defined(DIAGNOSTIC) 1312 if (m == bogus_page) 1313 panic("allocbuf: bogus page found"); 1314#endif 1315 s = splvm(); 1316 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1317 m->flags |= PG_WANTED; 1318 tsleep(m, PVM, "biodep", 0); 1319 } 1320 splx(s); 1321 1322 bp->b_pages[i] = NULL; 1323 vm_page_unwire(m); 1324 } 1325 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1326 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1327 bp->b_npages = desiredpages; 1328 } 1329 } else if (newbsize > bp->b_bufsize) { 1330 vm_object_t obj; 1331 vm_offset_t tinc, toff; 1332 vm_ooffset_t off; 1333 vm_pindex_t objoff; 1334 int pageindex, curbpnpages; 1335 struct vnode *vp; 1336 int bsize; 1337 1338 vp = bp->b_vp; 1339 1340 if (vp->v_type == VBLK) 1341 bsize = DEV_BSIZE; 1342 else 1343 bsize = vp->v_mount->mnt_stat.f_iosize; 1344 1345 if (bp->b_npages < desiredpages) { 1346 obj = vp->v_object; 1347 tinc = PAGE_SIZE; 1348 if (tinc > bsize) 1349 tinc = bsize; 1350 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1351 curbpnpages = bp->b_npages; 1352 doretry: 1353 bp->b_flags |= B_CACHE; 1354 for (toff = 0; toff < newbsize; toff += tinc) { 1355 int bytesinpage; 1356 1357 pageindex = toff >> PAGE_SHIFT; 1358 objoff = OFF_TO_IDX(off + toff); 1359 if (pageindex < curbpnpages) { 1360 1361 m = bp->b_pages[pageindex]; 1362#ifdef VFS_BIO_DIAG 1363 if (m->pindex != objoff) 1364 panic("allocbuf: page changed offset??!!!?"); 1365#endif 1366 bytesinpage = tinc; 1367 if (tinc > (newbsize - toff)) 1368 bytesinpage = newbsize - toff; 1369 if ((bp->b_flags & B_CACHE) && 1370 !vm_page_is_valid(m, 1371 (vm_offset_t) ((toff + off) & PAGE_MASK), 1372 bytesinpage)) { 1373 bp->b_flags &= ~B_CACHE; 1374 } 1375 continue; 1376 } 1377 m = vm_page_lookup(obj, objoff); 1378 if (!m) { 1379 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1380 if (!m) { 1381 VM_WAIT; 1382 goto doretry; 1383 } 1384 /* 1385 * Normally it is unwise to clear PG_BUSY without 1386 * PAGE_WAKEUP -- but it is okay here, as there is 1387 * no chance for blocking between here and vm_page_alloc 1388 */ 1389 m->flags &= ~PG_BUSY; 1390 vm_page_wire(m); 1391 bp->b_flags &= ~B_CACHE; 1392 } else if (m->flags & PG_BUSY) { 1393 s = splvm(); 1394 if (m->flags & PG_BUSY) { 1395 m->flags |= PG_WANTED; 1396 tsleep(m, PVM, "pgtblk", 0); 1397 } 1398 splx(s); 1399 goto doretry; 1400 } else { 1401 if ((curproc != pageproc) && 1402 ((m->queue - m->pc) == PQ_CACHE) && 1403 ((cnt.v_free_count + cnt.v_cache_count) < 1404 (cnt.v_free_min + cnt.v_cache_min))) { 1405 pagedaemon_wakeup(); 1406 } 1407 bytesinpage = tinc; 1408 if (tinc > (newbsize - toff)) 1409 bytesinpage = newbsize - toff; 1410 if ((bp->b_flags & B_CACHE) && 1411 !vm_page_is_valid(m, 1412 (vm_offset_t) ((toff + off) & PAGE_MASK), 1413 bytesinpage)) { 1414 bp->b_flags &= ~B_CACHE; 1415 } 1416 vm_page_wire(m); 1417 } 1418 bp->b_pages[pageindex] = m; 1419 curbpnpages = pageindex + 1; 1420 } 1421 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1422 bp->b_npages = curbpnpages; 1423 pmap_qenter((vm_offset_t) bp->b_data, 1424 bp->b_pages, bp->b_npages); 1425 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1426 } 1427 } 1428 } 1429 if (bp->b_flags & B_VMIO) 1430 vmiospace += bp->b_bufsize; 1431 bufspace += (newbsize - bp->b_bufsize); 1432 bp->b_bufsize = newbsize; 1433 bp->b_bcount = size; 1434 return 1; 1435} 1436 1437/* 1438 * Wait for buffer I/O completion, returning error status. 1439 */ 1440int 1441biowait(register struct buf * bp) 1442{ 1443 int s; 1444 1445 s = splbio(); 1446 while ((bp->b_flags & B_DONE) == 0) 1447 tsleep(bp, PRIBIO, "biowait", 0); 1448 splx(s); 1449 if (bp->b_flags & B_EINTR) { 1450 bp->b_flags &= ~B_EINTR; 1451 return (EINTR); 1452 } 1453 if (bp->b_flags & B_ERROR) { 1454 return (bp->b_error ? bp->b_error : EIO); 1455 } else { 1456 return (0); 1457 } 1458} 1459 1460/* 1461 * Finish I/O on a buffer, calling an optional function. 1462 * This is usually called from interrupt level, so process blocking 1463 * is not *a good idea*. 1464 */ 1465void 1466biodone(register struct buf * bp) 1467{ 1468 int s; 1469 1470 s = splbio(); 1471 if (!(bp->b_flags & B_BUSY)) 1472 panic("biodone: buffer not busy"); 1473 1474 if (bp->b_flags & B_DONE) { 1475 splx(s); 1476 printf("biodone: buffer already done\n"); 1477 return; 1478 } 1479 bp->b_flags |= B_DONE; 1480 1481 if ((bp->b_flags & B_READ) == 0) { 1482 vwakeup(bp); 1483 } 1484#ifdef BOUNCE_BUFFERS 1485 if (bp->b_flags & B_BOUNCE) 1486 vm_bounce_free(bp); 1487#endif 1488 1489 /* call optional completion function if requested */ 1490 if (bp->b_flags & B_CALL) { 1491 bp->b_flags &= ~B_CALL; 1492 (*bp->b_iodone) (bp); 1493 splx(s); 1494 return; 1495 } 1496 if (bp->b_flags & B_VMIO) { 1497 int i, resid; 1498 vm_ooffset_t foff; 1499 vm_page_t m; 1500 vm_object_t obj; 1501 int iosize; 1502 struct vnode *vp = bp->b_vp; 1503 1504 if (vp->v_type == VBLK) 1505 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1506 else 1507 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1508 obj = vp->v_object; 1509 if (!obj) { 1510 panic("biodone: no object"); 1511 } 1512#if defined(VFS_BIO_DEBUG) 1513 if (obj->paging_in_progress < bp->b_npages) { 1514 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1515 obj->paging_in_progress, bp->b_npages); 1516 } 1517#endif 1518 iosize = bp->b_bufsize; 1519 for (i = 0; i < bp->b_npages; i++) { 1520 int bogusflag = 0; 1521 m = bp->b_pages[i]; 1522 if (m == bogus_page) { 1523 bogusflag = 1; 1524 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1525 if (!m) { 1526#if defined(VFS_BIO_DEBUG) 1527 printf("biodone: page disappeared\n"); 1528#endif 1529 --obj->paging_in_progress; 1530 continue; 1531 } 1532 bp->b_pages[i] = m; 1533 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1534 } 1535#if defined(VFS_BIO_DEBUG) 1536 if (OFF_TO_IDX(foff) != m->pindex) { 1537 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1538 } 1539#endif 1540 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1541 if (resid > iosize) 1542 resid = iosize; 1543 /* 1544 * In the write case, the valid and clean bits are 1545 * already changed correctly, so we only need to do this 1546 * here in the read case. 1547 */ 1548 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1549 vm_page_set_validclean(m, 1550 (vm_offset_t) (foff & PAGE_MASK), resid); 1551 } 1552 1553 /* 1554 * when debugging new filesystems or buffer I/O methods, this 1555 * is the most common error that pops up. if you see this, you 1556 * have not set the page busy flag correctly!!! 1557 */ 1558 if (m->busy == 0) { 1559 printf("biodone: page busy < 0, " 1560 "pindex: %d, foff: 0x(%x,%x), " 1561 "resid: %d, index: %d\n", 1562 (int) m->pindex, (int)(foff >> 32), 1563 (int) foff & 0xffffffff, resid, i); 1564 if (vp->v_type != VBLK) 1565 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1566 bp->b_vp->v_mount->mnt_stat.f_iosize, 1567 (int) bp->b_lblkno, 1568 bp->b_flags, bp->b_npages); 1569 else 1570 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1571 (int) bp->b_lblkno, 1572 bp->b_flags, bp->b_npages); 1573 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1574 m->valid, m->dirty, m->wire_count); 1575 panic("biodone: page busy < 0\n"); 1576 } 1577 --m->busy; 1578 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1579 m->flags &= ~PG_WANTED; 1580 wakeup(m); 1581 } 1582 --obj->paging_in_progress; 1583 foff += resid; 1584 iosize -= resid; 1585 } 1586 if (obj && obj->paging_in_progress == 0 && 1587 (obj->flags & OBJ_PIPWNT)) { 1588 obj->flags &= ~OBJ_PIPWNT; 1589 wakeup(obj); 1590 } 1591 } 1592 /* 1593 * For asynchronous completions, release the buffer now. The brelse 1594 * checks for B_WANTED and will do the wakeup there if necessary - so 1595 * no need to do a wakeup here in the async case. 1596 */ 1597 1598 if (bp->b_flags & B_ASYNC) { 1599 if ((bp->b_flags & B_ORDERED) == 0) { 1600 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1601 brelse(bp); 1602 else 1603 bqrelse(bp); 1604 } 1605 } else { 1606 bp->b_flags &= ~B_WANTED; 1607 wakeup(bp); 1608 } 1609 splx(s); 1610} 1611 1612int 1613count_lock_queue() 1614{ 1615 int count; 1616 struct buf *bp; 1617 1618 count = 0; 1619 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1620 bp != NULL; 1621 bp = TAILQ_NEXT(bp, b_freelist)) 1622 count++; 1623 return (count); 1624} 1625 1626int vfs_update_interval = 30; 1627 1628static void 1629vfs_update() 1630{ 1631 (void) spl0(); /* XXX redundant? wrong place? */ 1632 while (1) { 1633 tsleep(&vfs_update_wakeup, PUSER, "update", 1634 hz * vfs_update_interval); 1635 vfs_update_wakeup = 0; 1636 sync(curproc, NULL, NULL); 1637 } 1638} 1639 1640static int 1641sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1642{ 1643 int error = sysctl_handle_int(oidp, 1644 oidp->oid_arg1, oidp->oid_arg2, req); 1645 if (!error) 1646 wakeup(&vfs_update_wakeup); 1647 return error; 1648} 1649 1650SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1651 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1652 1653 1654/* 1655 * This routine is called in lieu of iodone in the case of 1656 * incomplete I/O. This keeps the busy status for pages 1657 * consistant. 1658 */ 1659void 1660vfs_unbusy_pages(struct buf * bp) 1661{ 1662 int i; 1663 1664 if (bp->b_flags & B_VMIO) { 1665 struct vnode *vp = bp->b_vp; 1666 vm_object_t obj = vp->v_object; 1667 vm_ooffset_t foff; 1668 1669 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1670 1671 for (i = 0; i < bp->b_npages; i++) { 1672 vm_page_t m = bp->b_pages[i]; 1673 1674 if (m == bogus_page) { 1675 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1676 if (!m) { 1677 panic("vfs_unbusy_pages: page missing\n"); 1678 } 1679 bp->b_pages[i] = m; 1680 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1681 } 1682 --obj->paging_in_progress; 1683 --m->busy; 1684 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1685 m->flags &= ~PG_WANTED; 1686 wakeup(m); 1687 } 1688 } 1689 if (obj->paging_in_progress == 0 && 1690 (obj->flags & OBJ_PIPWNT)) { 1691 obj->flags &= ~OBJ_PIPWNT; 1692 wakeup(obj); 1693 } 1694 } 1695} 1696 1697/* 1698 * This routine is called before a device strategy routine. 1699 * It is used to tell the VM system that paging I/O is in 1700 * progress, and treat the pages associated with the buffer 1701 * almost as being PG_BUSY. Also the object paging_in_progress 1702 * flag is handled to make sure that the object doesn't become 1703 * inconsistant. 1704 */ 1705void 1706vfs_busy_pages(struct buf * bp, int clear_modify) 1707{ 1708 int i; 1709 1710 if (bp->b_flags & B_VMIO) { 1711 vm_object_t obj = bp->b_vp->v_object; 1712 vm_ooffset_t foff; 1713 int iocount = bp->b_bufsize; 1714 1715 if (bp->b_vp->v_type == VBLK) 1716 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1717 else 1718 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1719 vfs_setdirty(bp); 1720 for (i = 0; i < bp->b_npages; i++) { 1721 vm_page_t m = bp->b_pages[i]; 1722 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1723 1724 if (resid > iocount) 1725 resid = iocount; 1726 if ((bp->b_flags & B_CLUSTER) == 0) { 1727 obj->paging_in_progress++; 1728 m->busy++; 1729 } 1730 vm_page_protect(m, VM_PROT_NONE); 1731 if (clear_modify) { 1732 vm_page_set_validclean(m, 1733 (vm_offset_t) (foff & PAGE_MASK), resid); 1734 } else if (bp->b_bcount >= PAGE_SIZE) { 1735 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1736 bp->b_pages[i] = bogus_page; 1737 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1738 } 1739 } 1740 foff += resid; 1741 iocount -= resid; 1742 } 1743 } 1744} 1745 1746/* 1747 * Tell the VM system that the pages associated with this buffer 1748 * are clean. This is used for delayed writes where the data is 1749 * going to go to disk eventually without additional VM intevention. 1750 */ 1751void 1752vfs_clean_pages(struct buf * bp) 1753{ 1754 int i; 1755 1756 if (bp->b_flags & B_VMIO) { 1757 vm_ooffset_t foff; 1758 int iocount = bp->b_bufsize; 1759 1760 if (bp->b_vp->v_type == VBLK) 1761 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1762 else 1763 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1764 1765 for (i = 0; i < bp->b_npages; i++) { 1766 vm_page_t m = bp->b_pages[i]; 1767 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1768 1769 if (resid > iocount) 1770 resid = iocount; 1771 if (resid > 0) { 1772 vm_page_set_validclean(m, 1773 ((vm_offset_t) foff & PAGE_MASK), resid); 1774 } 1775 foff += resid; 1776 iocount -= resid; 1777 } 1778 } 1779} 1780 1781void 1782vfs_bio_clrbuf(struct buf *bp) { 1783 int i; 1784 if( bp->b_flags & B_VMIO) { 1785 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1786 int mask; 1787 mask = 0; 1788 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1789 mask |= (1 << (i/DEV_BSIZE)); 1790 if( bp->b_pages[0]->valid != mask) { 1791 bzero(bp->b_data, bp->b_bufsize); 1792 } 1793 bp->b_pages[0]->valid = mask; 1794 bp->b_resid = 0; 1795 return; 1796 } 1797 for(i=0;i<bp->b_npages;i++) { 1798 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1799 continue; 1800 if( bp->b_pages[i]->valid == 0) { 1801 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1802 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1803 } 1804 } else { 1805 int j; 1806 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1807 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1808 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1809 } 1810 } 1811 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1812 } 1813 bp->b_resid = 0; 1814 } else { 1815 clrbuf(bp); 1816 } 1817} 1818 1819/* 1820 * vm_hold_load_pages and vm_hold_unload pages get pages into 1821 * a buffers address space. The pages are anonymous and are 1822 * not associated with a file object. 1823 */ 1824void 1825vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1826{ 1827 vm_offset_t pg; 1828 vm_page_t p; 1829 int index; 1830 1831 to = round_page(to); 1832 from = round_page(from); 1833 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1834 1835 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1836 1837tryagain: 1838 1839 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1840 VM_ALLOC_NORMAL); 1841 if (!p) { 1842 VM_WAIT; 1843 goto tryagain; 1844 } 1845 vm_page_wire(p); 1846 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1847 bp->b_pages[index] = p; 1848 PAGE_WAKEUP(p); 1849 } 1850 bp->b_npages = to >> PAGE_SHIFT; 1851} 1852 1853void 1854vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1855{ 1856 vm_offset_t pg; 1857 vm_page_t p; 1858 int index; 1859 1860 from = round_page(from); 1861 to = round_page(to); 1862 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1863 1864 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1865 p = bp->b_pages[index]; 1866 if (p && (index < bp->b_npages)) { 1867 if (p->busy) { 1868 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1869 bp->b_blkno, bp->b_lblkno); 1870 } 1871 bp->b_pages[index] = NULL; 1872 pmap_kremove(pg); 1873 vm_page_unwire(p); 1874 vm_page_free(p); 1875 } 1876 } 1877 bp->b_npages = from >> PAGE_SHIFT; 1878} 1879