vfs_bio.c revision 18070
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.96 1996/08/21 21:55:18 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81static void vfs_vmio_release(struct buf *bp); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95caddr_t buffers_kva; 96 97/* 98 * bogus page -- for I/O to/from partially complete buffers 99 * this is a temporary solution to the problem, but it is not 100 * really that bad. it would be better to split the buffer 101 * for input in the case of buffers partially already in memory, 102 * but the code is intricate enough already. 103 */ 104vm_page_t bogus_page; 105static vm_offset_t bogus_offset; 106 107static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 108 bufmallocspace, maxbufmallocspace; 109 110static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 111static struct bqueues bufqueues[BUFFER_QUEUES]; 112 113extern int vm_swap_size; 114 115#define BUF_MAXUSE 8 116/* 117#define NO_B_MALLOC 118*/ 119 120/* 121 * Initialize buffer headers and related structures. 122 */ 123void 124bufinit() 125{ 126 struct buf *bp; 127 int i; 128 129 TAILQ_INIT(&bswlist); 130 LIST_INIT(&invalhash); 131 132 /* first, make a null hash table */ 133 for (i = 0; i < BUFHSZ; i++) 134 LIST_INIT(&bufhashtbl[i]); 135 136 /* next, make a null set of free lists */ 137 for (i = 0; i < BUFFER_QUEUES; i++) 138 TAILQ_INIT(&bufqueues[i]); 139 140 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 141 /* finally, initialize each buffer header and stick on empty q */ 142 for (i = 0; i < nbuf; i++) { 143 bp = &buf[i]; 144 bzero(bp, sizeof *bp); 145 bp->b_flags = B_INVAL; /* we're just an empty header */ 146 bp->b_dev = NODEV; 147 bp->b_rcred = NOCRED; 148 bp->b_wcred = NOCRED; 149 bp->b_qindex = QUEUE_EMPTY; 150 bp->b_vnbufs.le_next = NOLIST; 151 bp->b_data = buffers_kva + i * MAXBSIZE; 152 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 153 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 154 } 155/* 156 * maxbufspace is currently calculated to support all filesystem blocks 157 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 158 * cache is still the same as it would be for 8K filesystems. This 159 * keeps the size of the buffer cache "in check" for big block filesystems. 160 */ 161 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 162/* 163 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 164 */ 165 maxvmiobufspace = 2 * maxbufspace / 3; 166/* 167 * Limit the amount of malloc memory since it is wired permanently into 168 * the kernel space. Even though this is accounted for in the buffer 169 * allocation, we don't want the malloced region to grow uncontrolled. 170 * The malloc scheme improves memory utilization significantly on average 171 * (small) directories. 172 */ 173 maxbufmallocspace = maxbufspace / 20; 174 175 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 176 bogus_page = vm_page_alloc(kernel_object, 177 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 178 VM_ALLOC_NORMAL); 179 180} 181 182/* 183 * remove the buffer from the appropriate free list 184 */ 185void 186bremfree(struct buf * bp) 187{ 188 int s = splbio(); 189 190 if (bp->b_qindex != QUEUE_NONE) { 191 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 192 bp->b_qindex = QUEUE_NONE; 193 } else { 194 panic("bremfree: removing a buffer when not on a queue"); 195 } 196 splx(s); 197} 198 199/* 200 * Get a buffer with the specified data. Look in the cache first. 201 */ 202int 203bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 204 struct buf ** bpp) 205{ 206 struct buf *bp; 207 208 bp = getblk(vp, blkno, size, 0, 0); 209 *bpp = bp; 210 211 /* if not found in cache, do some I/O */ 212 if ((bp->b_flags & B_CACHE) == 0) { 213 if (curproc != NULL) 214 curproc->p_stats->p_ru.ru_inblock++; 215 bp->b_flags |= B_READ; 216 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 217 if (bp->b_rcred == NOCRED) { 218 if (cred != NOCRED) 219 crhold(cred); 220 bp->b_rcred = cred; 221 } 222 vfs_busy_pages(bp, 0); 223 VOP_STRATEGY(bp); 224 return (biowait(bp)); 225 } 226 return (0); 227} 228 229/* 230 * Operates like bread, but also starts asynchronous I/O on 231 * read-ahead blocks. 232 */ 233int 234breadn(struct vnode * vp, daddr_t blkno, int size, 235 daddr_t * rablkno, int *rabsize, 236 int cnt, struct ucred * cred, struct buf ** bpp) 237{ 238 struct buf *bp, *rabp; 239 int i; 240 int rv = 0, readwait = 0; 241 242 *bpp = bp = getblk(vp, blkno, size, 0, 0); 243 244 /* if not found in cache, do some I/O */ 245 if ((bp->b_flags & B_CACHE) == 0) { 246 if (curproc != NULL) 247 curproc->p_stats->p_ru.ru_inblock++; 248 bp->b_flags |= B_READ; 249 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 250 if (bp->b_rcred == NOCRED) { 251 if (cred != NOCRED) 252 crhold(cred); 253 bp->b_rcred = cred; 254 } 255 vfs_busy_pages(bp, 0); 256 VOP_STRATEGY(bp); 257 ++readwait; 258 } 259 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 260 if (inmem(vp, *rablkno)) 261 continue; 262 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 263 264 if ((rabp->b_flags & B_CACHE) == 0) { 265 if (curproc != NULL) 266 curproc->p_stats->p_ru.ru_inblock++; 267 rabp->b_flags |= B_READ | B_ASYNC; 268 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 269 if (rabp->b_rcred == NOCRED) { 270 if (cred != NOCRED) 271 crhold(cred); 272 rabp->b_rcred = cred; 273 } 274 vfs_busy_pages(rabp, 0); 275 VOP_STRATEGY(rabp); 276 } else { 277 brelse(rabp); 278 } 279 } 280 281 if (readwait) { 282 rv = biowait(bp); 283 } 284 return (rv); 285} 286 287/* 288 * Write, release buffer on completion. (Done by iodone 289 * if async.) 290 */ 291int 292bwrite(struct buf * bp) 293{ 294 int oldflags = bp->b_flags; 295 296 if (bp->b_flags & B_INVAL) { 297 brelse(bp); 298 return (0); 299 } 300 if (!(bp->b_flags & B_BUSY)) 301 panic("bwrite: buffer is not busy???"); 302 303 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 304 bp->b_flags |= B_WRITEINPROG; 305 306 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 307 reassignbuf(bp, bp->b_vp); 308 } 309 310 bp->b_vp->v_numoutput++; 311 vfs_busy_pages(bp, 1); 312 if (curproc != NULL) 313 curproc->p_stats->p_ru.ru_oublock++; 314 VOP_STRATEGY(bp); 315 316 if ((bp->b_flags & B_ASYNC) == 0) { 317 int rtval = biowait(bp); 318 319 if (oldflags & B_DELWRI) { 320 reassignbuf(bp, bp->b_vp); 321 } 322 brelse(bp); 323 return (rtval); 324 } 325 return (0); 326} 327 328int 329vn_bwrite(ap) 330 struct vop_bwrite_args *ap; 331{ 332 return (bwrite(ap->a_bp)); 333} 334 335/* 336 * Delayed write. (Buffer is marked dirty). 337 */ 338void 339bdwrite(struct buf * bp) 340{ 341 342 if ((bp->b_flags & B_BUSY) == 0) { 343 panic("bdwrite: buffer is not busy"); 344 } 345 if (bp->b_flags & B_INVAL) { 346 brelse(bp); 347 return; 348 } 349 if (bp->b_flags & B_TAPE) { 350 bawrite(bp); 351 return; 352 } 353 bp->b_flags &= ~(B_READ|B_RELBUF); 354 if ((bp->b_flags & B_DELWRI) == 0) { 355 bp->b_flags |= B_DONE | B_DELWRI; 356 reassignbuf(bp, bp->b_vp); 357 } 358 359 /* 360 * This bmap keeps the system from needing to do the bmap later, 361 * perhaps when the system is attempting to do a sync. Since it 362 * is likely that the indirect block -- or whatever other datastructure 363 * that the filesystem needs is still in memory now, it is a good 364 * thing to do this. Note also, that if the pageout daemon is 365 * requesting a sync -- there might not be enough memory to do 366 * the bmap then... So, this is important to do. 367 */ 368 if( bp->b_lblkno == bp->b_blkno) { 369 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 370 } 371 372 /* 373 * Set the *dirty* buffer range based upon the VM system dirty pages. 374 */ 375 vfs_setdirty(bp); 376 377 /* 378 * We need to do this here to satisfy the vnode_pager and the 379 * pageout daemon, so that it thinks that the pages have been 380 * "cleaned". Note that since the pages are in a delayed write 381 * buffer -- the VFS layer "will" see that the pages get written 382 * out on the next sync, or perhaps the cluster will be completed. 383 */ 384 vfs_clean_pages(bp); 385 bqrelse(bp); 386 return; 387} 388 389/* 390 * Asynchronous write. 391 * Start output on a buffer, but do not wait for it to complete. 392 * The buffer is released when the output completes. 393 */ 394void 395bawrite(struct buf * bp) 396{ 397 bp->b_flags |= B_ASYNC; 398 (void) VOP_BWRITE(bp); 399} 400 401/* 402 * Ordered write. 403 * Start output on a buffer, but only wait for it to complete if the 404 * output device cannot guarantee ordering in some other way. Devices 405 * that can perform asyncronous ordered writes will set the B_ASYNC 406 * flag in their strategy routine. 407 * The buffer is released when the output completes. 408 */ 409int 410bowrite(struct buf * bp) 411{ 412 bp->b_flags |= B_ORDERED; 413 return (VOP_BWRITE(bp)); 414} 415 416/* 417 * Release a buffer. 418 */ 419void 420brelse(struct buf * bp) 421{ 422 int s; 423 424 if (bp->b_flags & B_CLUSTER) { 425 relpbuf(bp); 426 return; 427 } 428 /* anyone need a "free" block? */ 429 s = splbio(); 430 431 /* anyone need this block? */ 432 if (bp->b_flags & B_WANTED) { 433 bp->b_flags &= ~(B_WANTED | B_AGE); 434 wakeup(bp); 435 } 436 437 if (bp->b_flags & B_LOCKED) 438 bp->b_flags &= ~B_ERROR; 439 440 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 441 (bp->b_bufsize <= 0)) { 442 bp->b_flags |= B_INVAL; 443 bp->b_flags &= ~(B_DELWRI | B_CACHE); 444 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 445 if (bp->b_bufsize) 446 allocbuf(bp, 0); 447 brelvp(bp); 448 } 449 } 450 451 /* 452 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 453 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 454 * but the VM object is kept around. The B_NOCACHE flag is used to 455 * invalidate the pages in the VM object. 456 */ 457 if (bp->b_flags & B_VMIO) { 458 vm_ooffset_t foff; 459 vm_object_t obj; 460 int i, resid; 461 vm_page_t m; 462 struct vnode *vp; 463 int iototal = bp->b_bufsize; 464 465 vp = bp->b_vp; 466 if (!vp) 467 panic("brelse: missing vp"); 468 469 if (bp->b_npages) { 470 vm_pindex_t poff; 471 obj = (vm_object_t) vp->v_object; 472 if (vp->v_type == VBLK) 473 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 474 else 475 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 476 poff = OFF_TO_IDX(foff); 477 for (i = 0; i < bp->b_npages; i++) { 478 m = bp->b_pages[i]; 479 if (m == bogus_page) { 480 m = vm_page_lookup(obj, poff + i); 481 if (!m) { 482 panic("brelse: page missing\n"); 483 } 484 bp->b_pages[i] = m; 485 pmap_qenter(trunc_page(bp->b_data), 486 bp->b_pages, bp->b_npages); 487 } 488 resid = IDX_TO_OFF(m->pindex+1) - foff; 489 if (resid > iototal) 490 resid = iototal; 491 if (resid > 0) { 492 /* 493 * Don't invalidate the page if the local machine has already 494 * modified it. This is the lesser of two evils, and should 495 * be fixed. 496 */ 497 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 498 vm_page_test_dirty(m); 499 if (m->dirty == 0) { 500 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 501 if (m->valid == 0) 502 vm_page_protect(m, VM_PROT_NONE); 503 } 504 } 505 if (resid >= PAGE_SIZE) { 506 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 507 bp->b_flags |= B_INVAL; 508 } 509 } else { 510 if (!vm_page_is_valid(m, 511 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 512 bp->b_flags |= B_INVAL; 513 } 514 } 515 } 516 foff += resid; 517 iototal -= resid; 518 } 519 } 520 if (bp->b_flags & (B_INVAL | B_RELBUF)) 521 vfs_vmio_release(bp); 522 } 523 if (bp->b_qindex != QUEUE_NONE) 524 panic("brelse: free buffer onto another queue???"); 525 526 /* enqueue */ 527 /* buffers with no memory */ 528 if (bp->b_bufsize == 0) { 529 bp->b_qindex = QUEUE_EMPTY; 530 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 531 LIST_REMOVE(bp, b_hash); 532 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 533 bp->b_dev = NODEV; 534 if (needsbuffer) { 535 wakeup(&needsbuffer); 536 needsbuffer=0; 537 } 538 /* buffers with junk contents */ 539 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 540 bp->b_qindex = QUEUE_AGE; 541 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 542 LIST_REMOVE(bp, b_hash); 543 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 544 bp->b_dev = NODEV; 545 if (needsbuffer) { 546 wakeup(&needsbuffer); 547 needsbuffer=0; 548 } 549 /* buffers that are locked */ 550 } else if (bp->b_flags & B_LOCKED) { 551 bp->b_qindex = QUEUE_LOCKED; 552 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 553 /* buffers with stale but valid contents */ 554 } else if (bp->b_flags & B_AGE) { 555 bp->b_qindex = QUEUE_AGE; 556 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 557 if (needsbuffer) { 558 wakeup(&needsbuffer); 559 needsbuffer=0; 560 } 561 /* buffers with valid and quite potentially reuseable contents */ 562 } else { 563 bp->b_qindex = QUEUE_LRU; 564 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 565 if (needsbuffer) { 566 wakeup(&needsbuffer); 567 needsbuffer=0; 568 } 569 } 570 571 /* unlock */ 572 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 573 splx(s); 574} 575 576/* 577 * Release a buffer. 578 */ 579void 580bqrelse(struct buf * bp) 581{ 582 int s; 583 584 s = splbio(); 585 586 587 /* anyone need this block? */ 588 if (bp->b_flags & B_WANTED) { 589 bp->b_flags &= ~(B_WANTED | B_AGE); 590 wakeup(bp); 591 } 592 593 if (bp->b_qindex != QUEUE_NONE) 594 panic("bqrelse: free buffer onto another queue???"); 595 596 if (bp->b_flags & B_LOCKED) { 597 bp->b_flags &= ~B_ERROR; 598 bp->b_qindex = QUEUE_LOCKED; 599 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 600 /* buffers with stale but valid contents */ 601 } else { 602 bp->b_qindex = QUEUE_LRU; 603 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 604 if (needsbuffer) { 605 wakeup(&needsbuffer); 606 needsbuffer=0; 607 } 608 } 609 610 /* unlock */ 611 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 612 splx(s); 613} 614 615static void 616vfs_vmio_release(bp) 617 struct buf *bp; 618{ 619 int i; 620 vm_page_t m; 621 622 for (i = 0; i < bp->b_npages; i++) { 623 m = bp->b_pages[i]; 624 bp->b_pages[i] = NULL; 625 if (m->flags & PG_WANTED) { 626 m->flags &= ~PG_WANTED; 627 wakeup(m); 628 } 629 vm_page_unwire(m); 630 if (m->wire_count == 0 && (m->flags & PG_BUSY) == 0) { 631 if (m->valid) { 632 if(m->dirty == 0) 633 vm_page_test_dirty(m); 634 /* 635 * this keeps pressure off of the process memory 636 */ 637 if ((vm_swap_size == 0) || 638 (cnt.v_free_count < cnt.v_free_min)) { 639 if ((m->dirty == 0) && 640 (m->hold_count == 0) && 641 (m->flags & PG_BUSY) == 0 && 642 (m->busy == 0)) 643 vm_page_cache(m); 644 else 645 vm_page_deactivate(m); 646 } 647 } else if ((m->hold_count == 0) && 648 ((m->flags & PG_BUSY) == 0) && 649 (m->busy == 0)) { 650 vm_page_protect(m, VM_PROT_NONE); 651 vm_page_free(m); 652 } 653 } 654 } 655 bufspace -= bp->b_bufsize; 656 vmiospace -= bp->b_bufsize; 657 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 658 bp->b_npages = 0; 659 bp->b_bufsize = 0; 660 bp->b_flags &= ~B_VMIO; 661 if (bp->b_vp) 662 brelvp(bp); 663} 664 665/* 666 * Check to see if a block is currently memory resident. 667 */ 668__inline struct buf * 669gbincore(struct vnode * vp, daddr_t blkno) 670{ 671 struct buf *bp; 672 struct bufhashhdr *bh; 673 674 bh = BUFHASH(vp, blkno); 675 bp = bh->lh_first; 676 677 /* Search hash chain */ 678 while (bp != NULL) { 679 /* hit */ 680 if (bp->b_vp == vp && bp->b_lblkno == blkno && 681 (bp->b_flags & B_INVAL) == 0) { 682 break; 683 } 684 bp = bp->b_hash.le_next; 685 } 686 return (bp); 687} 688 689/* 690 * this routine implements clustered async writes for 691 * clearing out B_DELWRI buffers... This is much better 692 * than the old way of writing only one buffer at a time. 693 */ 694int 695vfs_bio_awrite(struct buf * bp) 696{ 697 int i; 698 daddr_t lblkno = bp->b_lblkno; 699 struct vnode *vp = bp->b_vp; 700 int s; 701 int ncl; 702 struct buf *bpa; 703 int nwritten; 704 705 s = splbio(); 706 /* 707 * right now we support clustered writing only to regular files 708 */ 709 if ((vp->v_type == VREG) && 710 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 711 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 712 int size; 713 int maxcl; 714 715 size = vp->v_mount->mnt_stat.f_iosize; 716 maxcl = MAXPHYS / size; 717 718 for (i = 1; i < maxcl; i++) { 719 if ((bpa = gbincore(vp, lblkno + i)) && 720 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 721 (B_DELWRI | B_CLUSTEROK)) && 722 (bpa->b_bufsize == size)) { 723 if ((bpa->b_blkno == bpa->b_lblkno) || 724 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 725 break; 726 } else { 727 break; 728 } 729 } 730 ncl = i; 731 /* 732 * this is a possible cluster write 733 */ 734 if (ncl != 1) { 735 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 736 splx(s); 737 return nwritten; 738 } 739 } 740 bremfree(bp); 741 splx(s); 742 /* 743 * default (old) behavior, writing out only one block 744 */ 745 bp->b_flags |= B_BUSY | B_ASYNC; 746 nwritten = bp->b_bufsize; 747 (void) VOP_BWRITE(bp); 748 return nwritten; 749} 750 751 752/* 753 * Find a buffer header which is available for use. 754 */ 755static struct buf * 756getnewbuf(int slpflag, int slptimeo, int doingvmio) 757{ 758 struct buf *bp; 759 int nbyteswritten = 0; 760 761start: 762 if (bufspace >= maxbufspace) 763 goto trytofreespace; 764 765 /* can we constitute a new buffer? */ 766 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 767 if (bp->b_qindex != QUEUE_EMPTY) 768 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 769 bp->b_qindex); 770 bp->b_flags |= B_BUSY; 771 bremfree(bp); 772 goto fillbuf; 773 } 774trytofreespace: 775 /* 776 * We keep the file I/O from hogging metadata I/O 777 * This is desirable because file data is cached in the 778 * VM/Buffer cache even if a buffer is freed. 779 */ 780 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 781 if (bp->b_qindex != QUEUE_AGE) 782 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 783 bp->b_qindex); 784 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 785 if (bp->b_qindex != QUEUE_LRU) 786 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 787 bp->b_qindex); 788 } 789 if (!bp) { 790 /* wait for a free buffer of any kind */ 791 needsbuffer = 1; 792 tsleep(&needsbuffer, 793 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 794 return (0); 795 } 796 797 /* 798 * We are fairly aggressive about freeing VMIO buffers, but since 799 * the buffering is intact without buffer headers, there is not 800 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 801 */ 802 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 803 if ((bp->b_flags & B_VMIO) == 0 || 804 (vmiospace < maxvmiobufspace)) { 805 --bp->b_usecount; 806 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 807 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 808 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 809 goto start; 810 } 811 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 812 } 813 } 814 815 /* if we are a delayed write, convert to an async write */ 816 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 817 nbyteswritten += vfs_bio_awrite(bp); 818 if (!slpflag && !slptimeo) { 819 return (0); 820 } 821 goto start; 822 } 823 824 if (bp->b_flags & B_WANTED) { 825 bp->b_flags &= ~B_WANTED; 826 wakeup(bp); 827 } 828 bremfree(bp); 829 bp->b_flags |= B_BUSY; 830 831 if (bp->b_flags & B_VMIO) 832 vfs_vmio_release(bp); 833 834 if (bp->b_vp) 835 brelvp(bp); 836 837fillbuf: 838 /* we are not free, nor do we contain interesting data */ 839 if (bp->b_rcred != NOCRED) { 840 crfree(bp->b_rcred); 841 bp->b_rcred = NOCRED; 842 } 843 if (bp->b_wcred != NOCRED) { 844 crfree(bp->b_wcred); 845 bp->b_wcred = NOCRED; 846 } 847 848 LIST_REMOVE(bp, b_hash); 849 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 850 if (bp->b_bufsize) { 851 allocbuf(bp, 0); 852 } 853 bp->b_flags = B_BUSY; 854 bp->b_dev = NODEV; 855 bp->b_vp = NULL; 856 bp->b_blkno = bp->b_lblkno = 0; 857 bp->b_iodone = 0; 858 bp->b_error = 0; 859 bp->b_resid = 0; 860 bp->b_bcount = 0; 861 bp->b_npages = 0; 862 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 863 bp->b_dirtyoff = bp->b_dirtyend = 0; 864 bp->b_validoff = bp->b_validend = 0; 865 bp->b_usecount = 4; 866 if (bufspace >= maxbufspace + nbyteswritten) { 867 bp->b_flags |= B_INVAL; 868 brelse(bp); 869 goto trytofreespace; 870 } 871 return (bp); 872} 873 874/* 875 * Check to see if a block is currently memory resident. 876 */ 877struct buf * 878incore(struct vnode * vp, daddr_t blkno) 879{ 880 struct buf *bp; 881 882 int s = splbio(); 883 bp = gbincore(vp, blkno); 884 splx(s); 885 return (bp); 886} 887 888/* 889 * Returns true if no I/O is needed to access the 890 * associated VM object. This is like incore except 891 * it also hunts around in the VM system for the data. 892 */ 893 894int 895inmem(struct vnode * vp, daddr_t blkno) 896{ 897 vm_object_t obj; 898 vm_offset_t toff, tinc; 899 vm_page_t m; 900 vm_ooffset_t off; 901 902 if (incore(vp, blkno)) 903 return 1; 904 if (vp->v_mount == NULL) 905 return 0; 906 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 907 return 0; 908 909 obj = vp->v_object; 910 tinc = PAGE_SIZE; 911 if (tinc > vp->v_mount->mnt_stat.f_iosize) 912 tinc = vp->v_mount->mnt_stat.f_iosize; 913 off = blkno * vp->v_mount->mnt_stat.f_iosize; 914 915 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 916 917 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 918 if (!m) 919 return 0; 920 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 921 return 0; 922 } 923 return 1; 924} 925 926/* 927 * now we set the dirty range for the buffer -- 928 * for NFS -- if the file is mapped and pages have 929 * been written to, let it know. We want the 930 * entire range of the buffer to be marked dirty if 931 * any of the pages have been written to for consistancy 932 * with the b_validoff, b_validend set in the nfs write 933 * code, and used by the nfs read code. 934 */ 935static void 936vfs_setdirty(struct buf *bp) { 937 int i; 938 vm_object_t object; 939 vm_offset_t boffset, offset; 940 /* 941 * We qualify the scan for modified pages on whether the 942 * object has been flushed yet. The OBJ_WRITEABLE flag 943 * is not cleared simply by protecting pages off. 944 */ 945 if ((bp->b_flags & B_VMIO) && 946 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 947 /* 948 * test the pages to see if they have been modified directly 949 * by users through the VM system. 950 */ 951 for (i = 0; i < bp->b_npages; i++) 952 vm_page_test_dirty(bp->b_pages[i]); 953 954 /* 955 * scan forwards for the first page modified 956 */ 957 for (i = 0; i < bp->b_npages; i++) { 958 if (bp->b_pages[i]->dirty) { 959 break; 960 } 961 } 962 boffset = (i << PAGE_SHIFT); 963 if (boffset < bp->b_dirtyoff) { 964 bp->b_dirtyoff = boffset; 965 } 966 967 /* 968 * scan backwards for the last page modified 969 */ 970 for (i = bp->b_npages - 1; i >= 0; --i) { 971 if (bp->b_pages[i]->dirty) { 972 break; 973 } 974 } 975 boffset = (i + 1); 976 offset = boffset + bp->b_pages[0]->pindex; 977 if (offset >= object->size) 978 boffset = object->size - bp->b_pages[0]->pindex; 979 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 980 bp->b_dirtyend = (boffset << PAGE_SHIFT); 981 } 982} 983 984/* 985 * Get a block given a specified block and offset into a file/device. 986 */ 987struct buf * 988getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 989{ 990 struct buf *bp; 991 int s; 992 struct bufhashhdr *bh; 993 994 s = splbio(); 995loop: 996 if ((bp = gbincore(vp, blkno))) { 997 if (bp->b_flags & B_BUSY) { 998 bp->b_flags |= B_WANTED; 999 if (bp->b_usecount < BUF_MAXUSE) 1000 ++bp->b_usecount; 1001 if (!tsleep(bp, 1002 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1003 goto loop; 1004 1005 splx(s); 1006 return (struct buf *) NULL; 1007 } 1008 bp->b_flags |= B_BUSY | B_CACHE; 1009 bremfree(bp); 1010 1011 /* 1012 * check for size inconsistancies (note that they shouldn't happen 1013 * but do when filesystems don't handle the size changes correctly.) 1014 * We are conservative on metadata and don't just extend the buffer 1015 * but write and re-constitute it. 1016 */ 1017 1018 if (bp->b_bcount != size) { 1019 if (bp->b_flags & B_VMIO) { 1020 allocbuf(bp, size); 1021 } else { 1022 bp->b_flags |= B_NOCACHE; 1023 VOP_BWRITE(bp); 1024 goto loop; 1025 } 1026 } 1027 1028 if (bp->b_usecount < BUF_MAXUSE) 1029 ++bp->b_usecount; 1030 splx(s); 1031 return (bp); 1032 } else { 1033 vm_object_t obj; 1034 int doingvmio; 1035 1036 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1037 doingvmio = 1; 1038 } else { 1039 doingvmio = 0; 1040 } 1041 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 1042 if (slpflag || slptimeo) { 1043 splx(s); 1044 return NULL; 1045 } 1046 goto loop; 1047 } 1048 1049 /* 1050 * This code is used to make sure that a buffer is not 1051 * created while the getnewbuf routine is blocked. 1052 * Normally the vnode is locked so this isn't a problem. 1053 * VBLK type I/O requests, however, don't lock the vnode. 1054 */ 1055 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1056 bp->b_flags |= B_INVAL; 1057 brelse(bp); 1058 goto loop; 1059 } 1060 1061 /* 1062 * Insert the buffer into the hash, so that it can 1063 * be found by incore. 1064 */ 1065 bp->b_blkno = bp->b_lblkno = blkno; 1066 bgetvp(vp, bp); 1067 LIST_REMOVE(bp, b_hash); 1068 bh = BUFHASH(vp, blkno); 1069 LIST_INSERT_HEAD(bh, bp, b_hash); 1070 1071 if (doingvmio) { 1072 bp->b_flags |= (B_VMIO | B_CACHE); 1073#if defined(VFS_BIO_DEBUG) 1074 if (vp->v_type != VREG && vp->v_type != VBLK) 1075 printf("getblk: vmioing file type %d???\n", vp->v_type); 1076#endif 1077 } else { 1078 bp->b_flags &= ~B_VMIO; 1079 } 1080 splx(s); 1081 1082 allocbuf(bp, size); 1083#ifdef PC98 1084 /* 1085 * 1024byte/sector support 1086 */ 1087#define B_XXX2 0x8000000 1088 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1089#endif 1090 return (bp); 1091 } 1092} 1093 1094/* 1095 * Get an empty, disassociated buffer of given size. 1096 */ 1097struct buf * 1098geteblk(int size) 1099{ 1100 struct buf *bp; 1101 int s; 1102 1103 s = splbio(); 1104 while ((bp = getnewbuf(0, 0, 0)) == 0); 1105 splx(s); 1106 allocbuf(bp, size); 1107 bp->b_flags |= B_INVAL; 1108 return (bp); 1109} 1110 1111 1112/* 1113 * This code constitutes the buffer memory from either anonymous system 1114 * memory (in the case of non-VMIO operations) or from an associated 1115 * VM object (in the case of VMIO operations). 1116 * 1117 * Note that this code is tricky, and has many complications to resolve 1118 * deadlock or inconsistant data situations. Tread lightly!!! 1119 * 1120 * Modify the length of a buffer's underlying buffer storage without 1121 * destroying information (unless, of course the buffer is shrinking). 1122 */ 1123int 1124allocbuf(struct buf * bp, int size) 1125{ 1126 1127 int s; 1128 int newbsize, mbsize; 1129 int i; 1130 1131 if (!(bp->b_flags & B_BUSY)) 1132 panic("allocbuf: buffer not busy"); 1133 1134 if ((bp->b_flags & B_VMIO) == 0) { 1135 caddr_t origbuf; 1136 int origbufsize; 1137 /* 1138 * Just get anonymous memory from the kernel 1139 */ 1140 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1141#if !defined(NO_B_MALLOC) 1142 if (bp->b_flags & B_MALLOC) 1143 newbsize = mbsize; 1144 else 1145#endif 1146 newbsize = round_page(size); 1147 1148 if (newbsize < bp->b_bufsize) { 1149#if !defined(NO_B_MALLOC) 1150 /* 1151 * malloced buffers are not shrunk 1152 */ 1153 if (bp->b_flags & B_MALLOC) { 1154 if (newbsize) { 1155 bp->b_bcount = size; 1156 } else { 1157 free(bp->b_data, M_BIOBUF); 1158 bufspace -= bp->b_bufsize; 1159 bufmallocspace -= bp->b_bufsize; 1160 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1161 bp->b_bufsize = 0; 1162 bp->b_bcount = 0; 1163 bp->b_flags &= ~B_MALLOC; 1164 } 1165 return 1; 1166 } 1167#endif 1168 vm_hold_free_pages( 1169 bp, 1170 (vm_offset_t) bp->b_data + newbsize, 1171 (vm_offset_t) bp->b_data + bp->b_bufsize); 1172 } else if (newbsize > bp->b_bufsize) { 1173#if !defined(NO_B_MALLOC) 1174 /* 1175 * We only use malloced memory on the first allocation. 1176 * and revert to page-allocated memory when the buffer grows. 1177 */ 1178 if ( (bufmallocspace < maxbufmallocspace) && 1179 (bp->b_bufsize == 0) && 1180 (mbsize <= PAGE_SIZE/2)) { 1181 1182 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1183 bp->b_bufsize = mbsize; 1184 bp->b_bcount = size; 1185 bp->b_flags |= B_MALLOC; 1186 bufspace += mbsize; 1187 bufmallocspace += mbsize; 1188 return 1; 1189 } 1190#endif 1191 origbuf = NULL; 1192 origbufsize = 0; 1193#if !defined(NO_B_MALLOC) 1194 /* 1195 * If the buffer is growing on it's other-than-first allocation, 1196 * then we revert to the page-allocation scheme. 1197 */ 1198 if (bp->b_flags & B_MALLOC) { 1199 origbuf = bp->b_data; 1200 origbufsize = bp->b_bufsize; 1201 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1202 bufspace -= bp->b_bufsize; 1203 bufmallocspace -= bp->b_bufsize; 1204 bp->b_bufsize = 0; 1205 bp->b_flags &= ~B_MALLOC; 1206 newbsize = round_page(newbsize); 1207 } 1208#endif 1209 vm_hold_load_pages( 1210 bp, 1211 (vm_offset_t) bp->b_data + bp->b_bufsize, 1212 (vm_offset_t) bp->b_data + newbsize); 1213#if !defined(NO_B_MALLOC) 1214 if (origbuf) { 1215 bcopy(origbuf, bp->b_data, origbufsize); 1216 free(origbuf, M_BIOBUF); 1217 } 1218#endif 1219 } 1220 } else { 1221 vm_page_t m; 1222 int desiredpages; 1223 1224 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1225 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1226 1227#if !defined(NO_B_MALLOC) 1228 if (bp->b_flags & B_MALLOC) 1229 panic("allocbuf: VMIO buffer can't be malloced"); 1230#endif 1231 1232 if (newbsize < bp->b_bufsize) { 1233 if (desiredpages < bp->b_npages) { 1234 for (i = desiredpages; i < bp->b_npages; i++) { 1235 /* 1236 * the page is not freed here -- it 1237 * is the responsibility of vnode_pager_setsize 1238 */ 1239 m = bp->b_pages[i]; 1240 s = splhigh(); 1241 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1242 m->flags |= PG_WANTED; 1243 tsleep(m, PVM, "biodep", 0); 1244 } 1245 splx(s); 1246 1247 bp->b_pages[i] = NULL; 1248 vm_page_unwire(m); 1249 } 1250 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1251 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1252 bp->b_npages = desiredpages; 1253 } 1254 } else if (newbsize > bp->b_bufsize) { 1255 vm_object_t obj; 1256 vm_offset_t tinc, toff; 1257 vm_ooffset_t off; 1258 vm_pindex_t objoff; 1259 int pageindex, curbpnpages; 1260 struct vnode *vp; 1261 int bsize; 1262 1263 vp = bp->b_vp; 1264 1265 if (vp->v_type == VBLK) 1266 bsize = DEV_BSIZE; 1267 else 1268 bsize = vp->v_mount->mnt_stat.f_iosize; 1269 1270 if (bp->b_npages < desiredpages) { 1271 obj = vp->v_object; 1272 tinc = PAGE_SIZE; 1273 if (tinc > bsize) 1274 tinc = bsize; 1275 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1276 doretry: 1277 curbpnpages = bp->b_npages; 1278 bp->b_flags |= B_CACHE; 1279 for (toff = 0; toff < newbsize; toff += tinc) { 1280 int bytesinpage; 1281 1282 pageindex = toff >> PAGE_SHIFT; 1283 objoff = OFF_TO_IDX(off + toff); 1284 if (pageindex < curbpnpages) { 1285 1286 m = bp->b_pages[pageindex]; 1287#ifdef VFS_BIO_DIAG 1288 if (m->pindex != objoff) 1289 panic("allocbuf: page changed offset??!!!?"); 1290#endif 1291 bytesinpage = tinc; 1292 if (tinc > (newbsize - toff)) 1293 bytesinpage = newbsize - toff; 1294 if ((bp->b_flags & B_CACHE) && 1295 !vm_page_is_valid(m, 1296 (vm_offset_t) ((toff + off) & PAGE_MASK), 1297 bytesinpage)) { 1298 bp->b_flags &= ~B_CACHE; 1299 } 1300 continue; 1301 } 1302 m = vm_page_lookup(obj, objoff); 1303 if (!m) { 1304 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1305 if (!m) { 1306 VM_WAIT; 1307 goto doretry; 1308 } 1309 /* 1310 * Normally it is unwise to clear PG_BUSY without 1311 * PAGE_WAKEUP -- but it is okay here, as there is 1312 * no chance for blocking between here and vm_page_alloc 1313 */ 1314 m->flags &= ~PG_BUSY; 1315 vm_page_wire(m); 1316 bp->b_flags &= ~B_CACHE; 1317 } else if (m->flags & PG_BUSY) { 1318 1319 s = splhigh(); 1320 m->flags |= PG_WANTED; 1321 tsleep(m, PVM, "pgtblk", 0); 1322 splx(s); 1323 1324 goto doretry; 1325 } else { 1326 if ((curproc != pageproc) && 1327 (m->queue == PQ_CACHE) && 1328 ((cnt.v_free_count + cnt.v_cache_count) < 1329 (cnt.v_free_min + cnt.v_cache_min))) { 1330 pagedaemon_wakeup(); 1331 } 1332 bytesinpage = tinc; 1333 if (tinc > (newbsize - toff)) 1334 bytesinpage = newbsize - toff; 1335 if ((bp->b_flags & B_CACHE) && 1336 !vm_page_is_valid(m, 1337 (vm_offset_t) ((toff + off) & PAGE_MASK), 1338 bytesinpage)) { 1339 bp->b_flags &= ~B_CACHE; 1340 } 1341 vm_page_wire(m); 1342 } 1343 bp->b_pages[pageindex] = m; 1344 curbpnpages = pageindex + 1; 1345 } 1346 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1347 bp->b_npages = curbpnpages; 1348 pmap_qenter((vm_offset_t) bp->b_data, 1349 bp->b_pages, bp->b_npages); 1350 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1351 } 1352 } 1353 } 1354 if (bp->b_flags & B_VMIO) 1355 vmiospace += bp->b_bufsize; 1356 bufspace += (newbsize - bp->b_bufsize); 1357 bp->b_bufsize = newbsize; 1358 bp->b_bcount = size; 1359 return 1; 1360} 1361 1362/* 1363 * Wait for buffer I/O completion, returning error status. 1364 */ 1365int 1366biowait(register struct buf * bp) 1367{ 1368 int s; 1369 1370 s = splbio(); 1371 while ((bp->b_flags & B_DONE) == 0) 1372 tsleep(bp, PRIBIO, "biowait", 0); 1373 splx(s); 1374 if (bp->b_flags & B_EINTR) { 1375 bp->b_flags &= ~B_EINTR; 1376 return (EINTR); 1377 } 1378 if (bp->b_flags & B_ERROR) { 1379 return (bp->b_error ? bp->b_error : EIO); 1380 } else { 1381 return (0); 1382 } 1383} 1384 1385/* 1386 * Finish I/O on a buffer, calling an optional function. 1387 * This is usually called from interrupt level, so process blocking 1388 * is not *a good idea*. 1389 */ 1390void 1391biodone(register struct buf * bp) 1392{ 1393 int s; 1394 1395 s = splbio(); 1396 if (!(bp->b_flags & B_BUSY)) 1397 panic("biodone: buffer not busy"); 1398 1399 if (bp->b_flags & B_DONE) { 1400 splx(s); 1401 printf("biodone: buffer already done\n"); 1402 return; 1403 } 1404 bp->b_flags |= B_DONE; 1405 1406 if ((bp->b_flags & B_READ) == 0) { 1407 vwakeup(bp); 1408 } 1409#ifdef BOUNCE_BUFFERS 1410 if (bp->b_flags & B_BOUNCE) 1411 vm_bounce_free(bp); 1412#endif 1413 1414 /* call optional completion function if requested */ 1415 if (bp->b_flags & B_CALL) { 1416 bp->b_flags &= ~B_CALL; 1417 (*bp->b_iodone) (bp); 1418 splx(s); 1419 return; 1420 } 1421 if (bp->b_flags & B_VMIO) { 1422 int i, resid; 1423 vm_ooffset_t foff; 1424 vm_page_t m; 1425 vm_object_t obj; 1426 int iosize; 1427 struct vnode *vp = bp->b_vp; 1428 1429 if (vp->v_type == VBLK) 1430 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1431 else 1432 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1433 obj = vp->v_object; 1434 if (!obj) { 1435 panic("biodone: no object"); 1436 } 1437#if defined(VFS_BIO_DEBUG) 1438 if (obj->paging_in_progress < bp->b_npages) { 1439 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1440 obj->paging_in_progress, bp->b_npages); 1441 } 1442#endif 1443 iosize = bp->b_bufsize; 1444 for (i = 0; i < bp->b_npages; i++) { 1445 int bogusflag = 0; 1446 m = bp->b_pages[i]; 1447 if (m == bogus_page) { 1448 bogusflag = 1; 1449 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1450 if (!m) { 1451#if defined(VFS_BIO_DEBUG) 1452 printf("biodone: page disappeared\n"); 1453#endif 1454 --obj->paging_in_progress; 1455 continue; 1456 } 1457 bp->b_pages[i] = m; 1458 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1459 } 1460#if defined(VFS_BIO_DEBUG) 1461 if (OFF_TO_IDX(foff) != m->pindex) { 1462 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1463 } 1464#endif 1465 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1466 if (resid > iosize) 1467 resid = iosize; 1468 /* 1469 * In the write case, the valid and clean bits are 1470 * already changed correctly, so we only need to do this 1471 * here in the read case. 1472 */ 1473 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1474 vm_page_set_validclean(m, 1475 (vm_offset_t) (foff & PAGE_MASK), resid); 1476 } 1477 1478 /* 1479 * when debugging new filesystems or buffer I/O methods, this 1480 * is the most common error that pops up. if you see this, you 1481 * have not set the page busy flag correctly!!! 1482 */ 1483 if (m->busy == 0) { 1484 printf("biodone: page busy < 0, " 1485 "pindex: %d, foff: 0x(%x,%x), " 1486 "resid: %d, index: %d\n", 1487 (int) m->pindex, (int)(foff >> 32), 1488 (int) foff & 0xffffffff, resid, i); 1489 if (vp->v_type != VBLK) 1490 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1491 bp->b_vp->v_mount->mnt_stat.f_iosize, 1492 (int) bp->b_lblkno, 1493 bp->b_flags, bp->b_npages); 1494 else 1495 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1496 (int) bp->b_lblkno, 1497 bp->b_flags, bp->b_npages); 1498 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1499 m->valid, m->dirty, m->wire_count); 1500 panic("biodone: page busy < 0\n"); 1501 } 1502 --m->busy; 1503 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1504 m->flags &= ~PG_WANTED; 1505 wakeup(m); 1506 } 1507 --obj->paging_in_progress; 1508 foff += resid; 1509 iosize -= resid; 1510 } 1511 if (obj && obj->paging_in_progress == 0 && 1512 (obj->flags & OBJ_PIPWNT)) { 1513 obj->flags &= ~OBJ_PIPWNT; 1514 wakeup(obj); 1515 } 1516 } 1517 /* 1518 * For asynchronous completions, release the buffer now. The brelse 1519 * checks for B_WANTED and will do the wakeup there if necessary - so 1520 * no need to do a wakeup here in the async case. 1521 */ 1522 1523 if (bp->b_flags & B_ASYNC) { 1524 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1525 brelse(bp); 1526 else 1527 bqrelse(bp); 1528 } else { 1529 wakeup(bp); 1530 } 1531 splx(s); 1532} 1533 1534int 1535count_lock_queue() 1536{ 1537 int count; 1538 struct buf *bp; 1539 1540 count = 0; 1541 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1542 bp != NULL; 1543 bp = TAILQ_NEXT(bp, b_freelist)) 1544 count++; 1545 return (count); 1546} 1547 1548int vfs_update_interval = 30; 1549 1550static void 1551vfs_update() 1552{ 1553 (void) spl0(); /* XXX redundant? wrong place? */ 1554 while (1) { 1555 tsleep(&vfs_update_wakeup, PUSER, "update", 1556 hz * vfs_update_interval); 1557 vfs_update_wakeup = 0; 1558 sync(curproc, NULL, NULL); 1559 } 1560} 1561 1562static int 1563sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1564{ 1565 int error = sysctl_handle_int(oidp, 1566 oidp->oid_arg1, oidp->oid_arg2, req); 1567 if (!error) 1568 wakeup(&vfs_update_wakeup); 1569 return error; 1570} 1571 1572SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1573 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1574 1575 1576/* 1577 * This routine is called in lieu of iodone in the case of 1578 * incomplete I/O. This keeps the busy status for pages 1579 * consistant. 1580 */ 1581void 1582vfs_unbusy_pages(struct buf * bp) 1583{ 1584 int i; 1585 1586 if (bp->b_flags & B_VMIO) { 1587 struct vnode *vp = bp->b_vp; 1588 vm_object_t obj = vp->v_object; 1589 vm_ooffset_t foff; 1590 1591 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1592 1593 for (i = 0; i < bp->b_npages; i++) { 1594 vm_page_t m = bp->b_pages[i]; 1595 1596 if (m == bogus_page) { 1597 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1598 if (!m) { 1599 panic("vfs_unbusy_pages: page missing\n"); 1600 } 1601 bp->b_pages[i] = m; 1602 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1603 } 1604 --obj->paging_in_progress; 1605 --m->busy; 1606 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1607 m->flags &= ~PG_WANTED; 1608 wakeup(m); 1609 } 1610 } 1611 if (obj->paging_in_progress == 0 && 1612 (obj->flags & OBJ_PIPWNT)) { 1613 obj->flags &= ~OBJ_PIPWNT; 1614 wakeup(obj); 1615 } 1616 } 1617} 1618 1619/* 1620 * This routine is called before a device strategy routine. 1621 * It is used to tell the VM system that paging I/O is in 1622 * progress, and treat the pages associated with the buffer 1623 * almost as being PG_BUSY. Also the object paging_in_progress 1624 * flag is handled to make sure that the object doesn't become 1625 * inconsistant. 1626 */ 1627void 1628vfs_busy_pages(struct buf * bp, int clear_modify) 1629{ 1630 int i; 1631 1632 if (bp->b_flags & B_VMIO) { 1633 vm_object_t obj = bp->b_vp->v_object; 1634 vm_ooffset_t foff; 1635 int iocount = bp->b_bufsize; 1636 1637 if (bp->b_vp->v_type == VBLK) 1638 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1639 else 1640 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1641 vfs_setdirty(bp); 1642 for (i = 0; i < bp->b_npages; i++) { 1643 vm_page_t m = bp->b_pages[i]; 1644 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1645 1646 if (resid > iocount) 1647 resid = iocount; 1648 if ((bp->b_flags & B_CLUSTER) == 0) { 1649 obj->paging_in_progress++; 1650 m->busy++; 1651 } 1652 vm_page_protect(m, VM_PROT_NONE); 1653 if (clear_modify) { 1654 vm_page_set_validclean(m, 1655 (vm_offset_t) (foff & PAGE_MASK), resid); 1656 } else if (bp->b_bcount >= PAGE_SIZE) { 1657 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1658 bp->b_pages[i] = bogus_page; 1659 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1660 } 1661 } 1662 foff += resid; 1663 iocount -= resid; 1664 } 1665 } 1666} 1667 1668/* 1669 * Tell the VM system that the pages associated with this buffer 1670 * are clean. This is used for delayed writes where the data is 1671 * going to go to disk eventually without additional VM intevention. 1672 */ 1673void 1674vfs_clean_pages(struct buf * bp) 1675{ 1676 int i; 1677 1678 if (bp->b_flags & B_VMIO) { 1679 vm_ooffset_t foff; 1680 int iocount = bp->b_bufsize; 1681 1682 if (bp->b_vp->v_type == VBLK) 1683 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1684 else 1685 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1686 1687 for (i = 0; i < bp->b_npages; i++) { 1688 vm_page_t m = bp->b_pages[i]; 1689 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1690 1691 if (resid > iocount) 1692 resid = iocount; 1693 if (resid > 0) { 1694 vm_page_set_validclean(m, 1695 ((vm_offset_t) foff & PAGE_MASK), resid); 1696 } 1697 foff += resid; 1698 iocount -= resid; 1699 } 1700 } 1701} 1702 1703void 1704vfs_bio_clrbuf(struct buf *bp) { 1705 int i; 1706 if( bp->b_flags & B_VMIO) { 1707 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1708 int mask; 1709 mask = 0; 1710 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1711 mask |= (1 << (i/DEV_BSIZE)); 1712 if( bp->b_pages[0]->valid != mask) { 1713 bzero(bp->b_data, bp->b_bufsize); 1714 } 1715 bp->b_pages[0]->valid = mask; 1716 bp->b_resid = 0; 1717 return; 1718 } 1719 for(i=0;i<bp->b_npages;i++) { 1720 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1721 continue; 1722 if( bp->b_pages[i]->valid == 0) { 1723 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1724 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1725 } 1726 } else { 1727 int j; 1728 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1729 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1730 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1731 } 1732 } 1733 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1734 } 1735 bp->b_resid = 0; 1736 } else { 1737 clrbuf(bp); 1738 } 1739} 1740 1741/* 1742 * vm_hold_load_pages and vm_hold_unload pages get pages into 1743 * a buffers address space. The pages are anonymous and are 1744 * not associated with a file object. 1745 */ 1746void 1747vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1748{ 1749 vm_offset_t pg; 1750 vm_page_t p; 1751 int index; 1752 1753 to = round_page(to); 1754 from = round_page(from); 1755 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1756 1757 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1758 1759tryagain: 1760 1761 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1762 VM_ALLOC_NORMAL); 1763 if (!p) { 1764 VM_WAIT; 1765 goto tryagain; 1766 } 1767 vm_page_wire(p); 1768 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1769 bp->b_pages[index] = p; 1770 PAGE_WAKEUP(p); 1771 } 1772 bp->b_npages = to >> PAGE_SHIFT; 1773} 1774 1775void 1776vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1777{ 1778 vm_offset_t pg; 1779 vm_page_t p; 1780 int index; 1781 1782 from = round_page(from); 1783 to = round_page(to); 1784 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1785 1786 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1787 p = bp->b_pages[index]; 1788 if (p && (index < bp->b_npages)) { 1789 if (p->busy) { 1790 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1791 bp->b_blkno, bp->b_lblkno); 1792 } 1793 bp->b_pages[index] = NULL; 1794 pmap_kremove(pg); 1795 vm_page_unwire(p); 1796 vm_page_free(p); 1797 } 1798 } 1799 bp->b_npages = from >> PAGE_SHIFT; 1800} 1801