vfs_bio.c revision 18358
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.100 1996/09/14 04:40:33 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81static void vfs_vmio_release(struct buf *bp); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95caddr_t buffers_kva; 96 97/* 98 * bogus page -- for I/O to/from partially complete buffers 99 * this is a temporary solution to the problem, but it is not 100 * really that bad. it would be better to split the buffer 101 * for input in the case of buffers partially already in memory, 102 * but the code is intricate enough already. 103 */ 104vm_page_t bogus_page; 105static vm_offset_t bogus_offset; 106 107static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 108 bufmallocspace, maxbufmallocspace; 109 110static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 111static struct bqueues bufqueues[BUFFER_QUEUES]; 112 113extern int vm_swap_size; 114 115#define BUF_MAXUSE 8 116/* 117#define NO_B_MALLOC 118*/ 119 120/* 121 * Initialize buffer headers and related structures. 122 */ 123void 124bufinit() 125{ 126 struct buf *bp; 127 int i; 128 129 TAILQ_INIT(&bswlist); 130 LIST_INIT(&invalhash); 131 132 /* first, make a null hash table */ 133 for (i = 0; i < BUFHSZ; i++) 134 LIST_INIT(&bufhashtbl[i]); 135 136 /* next, make a null set of free lists */ 137 for (i = 0; i < BUFFER_QUEUES; i++) 138 TAILQ_INIT(&bufqueues[i]); 139 140 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 141 /* finally, initialize each buffer header and stick on empty q */ 142 for (i = 0; i < nbuf; i++) { 143 bp = &buf[i]; 144 bzero(bp, sizeof *bp); 145 bp->b_flags = B_INVAL; /* we're just an empty header */ 146 bp->b_dev = NODEV; 147 bp->b_rcred = NOCRED; 148 bp->b_wcred = NOCRED; 149 bp->b_qindex = QUEUE_EMPTY; 150 bp->b_vnbufs.le_next = NOLIST; 151 bp->b_data = buffers_kva + i * MAXBSIZE; 152 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 153 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 154 } 155/* 156 * maxbufspace is currently calculated to support all filesystem blocks 157 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 158 * cache is still the same as it would be for 8K filesystems. This 159 * keeps the size of the buffer cache "in check" for big block filesystems. 160 */ 161 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 162/* 163 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 164 */ 165 maxvmiobufspace = 2 * maxbufspace / 3; 166/* 167 * Limit the amount of malloc memory since it is wired permanently into 168 * the kernel space. Even though this is accounted for in the buffer 169 * allocation, we don't want the malloced region to grow uncontrolled. 170 * The malloc scheme improves memory utilization significantly on average 171 * (small) directories. 172 */ 173 maxbufmallocspace = maxbufspace / 20; 174 175 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 176 bogus_page = vm_page_alloc(kernel_object, 177 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 178 VM_ALLOC_NORMAL); 179 180} 181 182/* 183 * remove the buffer from the appropriate free list 184 */ 185void 186bremfree(struct buf * bp) 187{ 188 int s = splbio(); 189 190 if (bp->b_qindex != QUEUE_NONE) { 191 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 192 bp->b_qindex = QUEUE_NONE; 193 } else { 194 panic("bremfree: removing a buffer when not on a queue"); 195 } 196 splx(s); 197} 198 199/* 200 * Get a buffer with the specified data. Look in the cache first. 201 */ 202int 203bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 204 struct buf ** bpp) 205{ 206 struct buf *bp; 207 208 bp = getblk(vp, blkno, size, 0, 0); 209 *bpp = bp; 210 211 /* if not found in cache, do some I/O */ 212 if ((bp->b_flags & B_CACHE) == 0) { 213 if (curproc != NULL) 214 curproc->p_stats->p_ru.ru_inblock++; 215 bp->b_flags |= B_READ; 216 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 217 if (bp->b_rcred == NOCRED) { 218 if (cred != NOCRED) 219 crhold(cred); 220 bp->b_rcred = cred; 221 } 222 vfs_busy_pages(bp, 0); 223 VOP_STRATEGY(bp); 224 return (biowait(bp)); 225 } 226 return (0); 227} 228 229/* 230 * Operates like bread, but also starts asynchronous I/O on 231 * read-ahead blocks. 232 */ 233int 234breadn(struct vnode * vp, daddr_t blkno, int size, 235 daddr_t * rablkno, int *rabsize, 236 int cnt, struct ucred * cred, struct buf ** bpp) 237{ 238 struct buf *bp, *rabp; 239 int i; 240 int rv = 0, readwait = 0; 241 242 *bpp = bp = getblk(vp, blkno, size, 0, 0); 243 244 /* if not found in cache, do some I/O */ 245 if ((bp->b_flags & B_CACHE) == 0) { 246 if (curproc != NULL) 247 curproc->p_stats->p_ru.ru_inblock++; 248 bp->b_flags |= B_READ; 249 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 250 if (bp->b_rcred == NOCRED) { 251 if (cred != NOCRED) 252 crhold(cred); 253 bp->b_rcred = cred; 254 } 255 vfs_busy_pages(bp, 0); 256 VOP_STRATEGY(bp); 257 ++readwait; 258 } 259 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 260 if (inmem(vp, *rablkno)) 261 continue; 262 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 263 264 if ((rabp->b_flags & B_CACHE) == 0) { 265 if (curproc != NULL) 266 curproc->p_stats->p_ru.ru_inblock++; 267 rabp->b_flags |= B_READ | B_ASYNC; 268 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 269 if (rabp->b_rcred == NOCRED) { 270 if (cred != NOCRED) 271 crhold(cred); 272 rabp->b_rcred = cred; 273 } 274 vfs_busy_pages(rabp, 0); 275 VOP_STRATEGY(rabp); 276 } else { 277 brelse(rabp); 278 } 279 } 280 281 if (readwait) { 282 rv = biowait(bp); 283 } 284 return (rv); 285} 286 287/* 288 * Write, release buffer on completion. (Done by iodone 289 * if async.) 290 */ 291int 292bwrite(struct buf * bp) 293{ 294 int oldflags = bp->b_flags; 295 296 if (bp->b_flags & B_INVAL) { 297 brelse(bp); 298 return (0); 299 } 300 if (!(bp->b_flags & B_BUSY)) 301 panic("bwrite: buffer is not busy???"); 302 303 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 304 bp->b_flags |= B_WRITEINPROG; 305 306 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 307 reassignbuf(bp, bp->b_vp); 308 } 309 310 bp->b_vp->v_numoutput++; 311 vfs_busy_pages(bp, 1); 312 if (curproc != NULL) 313 curproc->p_stats->p_ru.ru_oublock++; 314 VOP_STRATEGY(bp); 315 316 /* 317 * It is possible that the buffer is reused 318 * before this point if B_ASYNC... What to do? 319 */ 320 321 /* if ((bp->b_flags & B_ASYNC) == 0) { */ 322 if ((oldflags & B_ASYNC) == 0) { 323 int rtval = biowait(bp); 324 325 if (oldflags & B_DELWRI) { 326 reassignbuf(bp, bp->b_vp); 327 } 328 brelse(bp); 329 return (rtval); 330 } 331 return (0); 332} 333 334int 335vn_bwrite(ap) 336 struct vop_bwrite_args *ap; 337{ 338 return (bwrite(ap->a_bp)); 339} 340 341/* 342 * Delayed write. (Buffer is marked dirty). 343 */ 344void 345bdwrite(struct buf * bp) 346{ 347 348 if ((bp->b_flags & B_BUSY) == 0) { 349 panic("bdwrite: buffer is not busy"); 350 } 351 if (bp->b_flags & B_INVAL) { 352 brelse(bp); 353 return; 354 } 355 if (bp->b_flags & B_TAPE) { 356 bawrite(bp); 357 return; 358 } 359 bp->b_flags &= ~(B_READ|B_RELBUF); 360 if ((bp->b_flags & B_DELWRI) == 0) { 361 bp->b_flags |= B_DONE | B_DELWRI; 362 reassignbuf(bp, bp->b_vp); 363 } 364 365 /* 366 * This bmap keeps the system from needing to do the bmap later, 367 * perhaps when the system is attempting to do a sync. Since it 368 * is likely that the indirect block -- or whatever other datastructure 369 * that the filesystem needs is still in memory now, it is a good 370 * thing to do this. Note also, that if the pageout daemon is 371 * requesting a sync -- there might not be enough memory to do 372 * the bmap then... So, this is important to do. 373 */ 374 if( bp->b_lblkno == bp->b_blkno) { 375 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 376 } 377 378 /* 379 * Set the *dirty* buffer range based upon the VM system dirty pages. 380 */ 381 vfs_setdirty(bp); 382 383 /* 384 * We need to do this here to satisfy the vnode_pager and the 385 * pageout daemon, so that it thinks that the pages have been 386 * "cleaned". Note that since the pages are in a delayed write 387 * buffer -- the VFS layer "will" see that the pages get written 388 * out on the next sync, or perhaps the cluster will be completed. 389 */ 390 vfs_clean_pages(bp); 391 bqrelse(bp); 392 return; 393} 394 395/* 396 * Asynchronous write. 397 * Start output on a buffer, but do not wait for it to complete. 398 * The buffer is released when the output completes. 399 */ 400void 401bawrite(struct buf * bp) 402{ 403 bp->b_flags |= B_ASYNC; 404 (void) VOP_BWRITE(bp); 405} 406 407/* 408 * Ordered write. 409 * Start output on a buffer, but only wait for it to complete if the 410 * output device cannot guarantee ordering in some other way. Devices 411 * that can perform asynchronous ordered writes will set the B_ASYNC 412 * flag in their strategy routine. 413 * The buffer is released when the output completes. 414 */ 415int 416bowrite(struct buf * bp) 417{ 418 bp->b_flags |= B_ORDERED; 419 return (VOP_BWRITE(bp)); 420} 421 422/* 423 * Release a buffer. 424 */ 425void 426brelse(struct buf * bp) 427{ 428 int s; 429 430 if (bp->b_flags & B_CLUSTER) { 431 relpbuf(bp); 432 return; 433 } 434 /* anyone need a "free" block? */ 435 s = splbio(); 436 437 /* anyone need this block? */ 438 if (bp->b_flags & B_WANTED) { 439 bp->b_flags &= ~(B_WANTED | B_AGE); 440 wakeup(bp); 441 } 442 443 if (bp->b_flags & B_LOCKED) 444 bp->b_flags &= ~B_ERROR; 445 446 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 447 (bp->b_bufsize <= 0)) { 448 bp->b_flags |= B_INVAL; 449 bp->b_flags &= ~(B_DELWRI | B_CACHE); 450 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 451 if (bp->b_bufsize) 452 allocbuf(bp, 0); 453 brelvp(bp); 454 } 455 } 456 457 /* 458 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 459 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 460 * but the VM object is kept around. The B_NOCACHE flag is used to 461 * invalidate the pages in the VM object. 462 */ 463 if (bp->b_flags & B_VMIO) { 464 vm_ooffset_t foff; 465 vm_object_t obj; 466 int i, resid; 467 vm_page_t m; 468 struct vnode *vp; 469 int iototal = bp->b_bufsize; 470 471 vp = bp->b_vp; 472 if (!vp) 473 panic("brelse: missing vp"); 474 475 if (bp->b_npages) { 476 vm_pindex_t poff; 477 obj = (vm_object_t) vp->v_object; 478 if (vp->v_type == VBLK) 479 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 480 else 481 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 482 poff = OFF_TO_IDX(foff); 483 for (i = 0; i < bp->b_npages; i++) { 484 m = bp->b_pages[i]; 485 if (m == bogus_page) { 486 m = vm_page_lookup(obj, poff + i); 487 if (!m) { 488 panic("brelse: page missing\n"); 489 } 490 bp->b_pages[i] = m; 491 pmap_qenter(trunc_page(bp->b_data), 492 bp->b_pages, bp->b_npages); 493 } 494 resid = IDX_TO_OFF(m->pindex+1) - foff; 495 if (resid > iototal) 496 resid = iototal; 497 if (resid > 0) { 498 /* 499 * Don't invalidate the page if the local machine has already 500 * modified it. This is the lesser of two evils, and should 501 * be fixed. 502 */ 503 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 504 vm_page_test_dirty(m); 505 if (m->dirty == 0) { 506 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 507 if (m->valid == 0) 508 vm_page_protect(m, VM_PROT_NONE); 509 } 510 } 511 if (resid >= PAGE_SIZE) { 512 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 513 bp->b_flags |= B_INVAL; 514 } 515 } else { 516 if (!vm_page_is_valid(m, 517 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 518 bp->b_flags |= B_INVAL; 519 } 520 } 521 } 522 foff += resid; 523 iototal -= resid; 524 } 525 } 526 if (bp->b_flags & (B_INVAL | B_RELBUF)) 527 vfs_vmio_release(bp); 528 } 529 if (bp->b_qindex != QUEUE_NONE) 530 panic("brelse: free buffer onto another queue???"); 531 532 /* enqueue */ 533 /* buffers with no memory */ 534 if (bp->b_bufsize == 0) { 535 bp->b_qindex = QUEUE_EMPTY; 536 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 537 LIST_REMOVE(bp, b_hash); 538 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 539 bp->b_dev = NODEV; 540 if (needsbuffer) { 541 wakeup(&needsbuffer); 542 needsbuffer=0; 543 } 544 /* buffers with junk contents */ 545 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 546 bp->b_qindex = QUEUE_AGE; 547 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 548 LIST_REMOVE(bp, b_hash); 549 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 550 bp->b_dev = NODEV; 551 if (needsbuffer) { 552 wakeup(&needsbuffer); 553 needsbuffer=0; 554 } 555 /* buffers that are locked */ 556 } else if (bp->b_flags & B_LOCKED) { 557 bp->b_qindex = QUEUE_LOCKED; 558 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 559 /* buffers with stale but valid contents */ 560 } else if (bp->b_flags & B_AGE) { 561 bp->b_qindex = QUEUE_AGE; 562 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 563 if (needsbuffer) { 564 wakeup(&needsbuffer); 565 needsbuffer=0; 566 } 567 /* buffers with valid and quite potentially reuseable contents */ 568 } else { 569 bp->b_qindex = QUEUE_LRU; 570 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 571 if (needsbuffer) { 572 wakeup(&needsbuffer); 573 needsbuffer=0; 574 } 575 } 576 577 /* unlock */ 578 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 579 splx(s); 580} 581 582/* 583 * Release a buffer. 584 */ 585void 586bqrelse(struct buf * bp) 587{ 588 int s; 589 590 s = splbio(); 591 592 593 /* anyone need this block? */ 594 if (bp->b_flags & B_WANTED) { 595 bp->b_flags &= ~(B_WANTED | B_AGE); 596 wakeup(bp); 597 } 598 599 if (bp->b_qindex != QUEUE_NONE) 600 panic("bqrelse: free buffer onto another queue???"); 601 602 if (bp->b_flags & B_LOCKED) { 603 bp->b_flags &= ~B_ERROR; 604 bp->b_qindex = QUEUE_LOCKED; 605 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 606 /* buffers with stale but valid contents */ 607 } else { 608 bp->b_qindex = QUEUE_LRU; 609 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 610 if (needsbuffer) { 611 wakeup(&needsbuffer); 612 needsbuffer=0; 613 } 614 } 615 616 /* unlock */ 617 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 618 splx(s); 619} 620 621static void 622vfs_vmio_release(bp) 623 struct buf *bp; 624{ 625 int i; 626 627 for (i = 0; i < bp->b_npages; i++) { 628 int s; 629 vm_page_t m; 630 631 m = bp->b_pages[i]; 632 bp->b_pages[i] = NULL; 633 634 s = splbio(); 635 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 636 m->flags |= PG_WANTED; 637 tsleep(m, PVM, "vmiorl", 0); 638 } 639 splx(s); 640 641 vm_page_unwire(m); 642 643 if (m->wire_count == 0) { 644 if (m->flags & PG_WANTED) { 645 m->flags &= ~PG_WANTED; 646 wakeup(m); 647 } 648 649 if (m->valid) { 650 if(m->dirty == 0) 651 vm_page_test_dirty(m); 652 /* 653 * this keeps pressure off of the process memory 654 */ 655 if ((vm_swap_size == 0) || 656 (cnt.v_free_count < cnt.v_free_min)) { 657 if ((m->dirty == 0) && (m->hold_count == 0)) 658 vm_page_cache(m); 659 else 660 vm_page_deactivate(m); 661 } 662 } else if (m->hold_count == 0) { 663 vm_page_protect(m, VM_PROT_NONE); 664 vm_page_free(m); 665 } 666 } 667 } 668 bufspace -= bp->b_bufsize; 669 vmiospace -= bp->b_bufsize; 670 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 671 bp->b_npages = 0; 672 bp->b_bufsize = 0; 673 bp->b_flags &= ~B_VMIO; 674 if (bp->b_vp) 675 brelvp(bp); 676} 677 678/* 679 * Check to see if a block is currently memory resident. 680 */ 681__inline struct buf * 682gbincore(struct vnode * vp, daddr_t blkno) 683{ 684 struct buf *bp; 685 struct bufhashhdr *bh; 686 687 bh = BUFHASH(vp, blkno); 688 bp = bh->lh_first; 689 690 /* Search hash chain */ 691 while (bp != NULL) { 692 /* hit */ 693 if (bp->b_vp == vp && bp->b_lblkno == blkno && 694 (bp->b_flags & B_INVAL) == 0) { 695 break; 696 } 697 bp = bp->b_hash.le_next; 698 } 699 return (bp); 700} 701 702/* 703 * this routine implements clustered async writes for 704 * clearing out B_DELWRI buffers... This is much better 705 * than the old way of writing only one buffer at a time. 706 */ 707int 708vfs_bio_awrite(struct buf * bp) 709{ 710 int i; 711 daddr_t lblkno = bp->b_lblkno; 712 struct vnode *vp = bp->b_vp; 713 int s; 714 int ncl; 715 struct buf *bpa; 716 int nwritten; 717 718 s = splbio(); 719 /* 720 * right now we support clustered writing only to regular files 721 */ 722 if ((vp->v_type == VREG) && 723 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 724 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 725 int size; 726 int maxcl; 727 728 size = vp->v_mount->mnt_stat.f_iosize; 729 maxcl = MAXPHYS / size; 730 731 for (i = 1; i < maxcl; i++) { 732 if ((bpa = gbincore(vp, lblkno + i)) && 733 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 734 (B_DELWRI | B_CLUSTEROK)) && 735 (bpa->b_bufsize == size)) { 736 if ((bpa->b_blkno == bpa->b_lblkno) || 737 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 738 break; 739 } else { 740 break; 741 } 742 } 743 ncl = i; 744 /* 745 * this is a possible cluster write 746 */ 747 if (ncl != 1) { 748 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 749 splx(s); 750 return nwritten; 751 } 752 } 753 bremfree(bp); 754 splx(s); 755 /* 756 * default (old) behavior, writing out only one block 757 */ 758 bp->b_flags |= B_BUSY | B_ASYNC; 759 nwritten = bp->b_bufsize; 760 (void) VOP_BWRITE(bp); 761 return nwritten; 762} 763 764 765/* 766 * Find a buffer header which is available for use. 767 */ 768static struct buf * 769getnewbuf(int slpflag, int slptimeo, int doingvmio) 770{ 771 struct buf *bp; 772 int nbyteswritten = 0; 773 774start: 775 if (bufspace >= maxbufspace) 776 goto trytofreespace; 777 778 /* can we constitute a new buffer? */ 779 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 780 if (bp->b_qindex != QUEUE_EMPTY) 781 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 782 bp->b_qindex); 783 bp->b_flags |= B_BUSY; 784 bremfree(bp); 785 goto fillbuf; 786 } 787trytofreespace: 788 /* 789 * We keep the file I/O from hogging metadata I/O 790 * This is desirable because file data is cached in the 791 * VM/Buffer cache even if a buffer is freed. 792 */ 793 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 794 if (bp->b_qindex != QUEUE_AGE) 795 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 796 bp->b_qindex); 797 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 798 if (bp->b_qindex != QUEUE_LRU) 799 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 800 bp->b_qindex); 801 } 802 if (!bp) { 803 /* wait for a free buffer of any kind */ 804 needsbuffer = 1; 805 tsleep(&needsbuffer, 806 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 807 return (0); 808 } 809 810 /* 811 * We are fairly aggressive about freeing VMIO buffers, but since 812 * the buffering is intact without buffer headers, there is not 813 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 814 */ 815 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 816 if ((bp->b_flags & B_VMIO) == 0 || 817 (vmiospace < maxvmiobufspace)) { 818 --bp->b_usecount; 819 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 820 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 821 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 822 goto start; 823 } 824 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 825 } 826 } 827 828 /* if we are a delayed write, convert to an async write */ 829 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 830 nbyteswritten += vfs_bio_awrite(bp); 831 if (!slpflag && !slptimeo) { 832 return (0); 833 } 834 goto start; 835 } 836 837 if (bp->b_flags & B_WANTED) { 838 bp->b_flags &= ~B_WANTED; 839 wakeup(bp); 840 } 841 bremfree(bp); 842 bp->b_flags |= B_BUSY; 843 844 if (bp->b_flags & B_VMIO) 845 vfs_vmio_release(bp); 846 847 if (bp->b_vp) 848 brelvp(bp); 849 850fillbuf: 851 /* we are not free, nor do we contain interesting data */ 852 if (bp->b_rcred != NOCRED) { 853 crfree(bp->b_rcred); 854 bp->b_rcred = NOCRED; 855 } 856 if (bp->b_wcred != NOCRED) { 857 crfree(bp->b_wcred); 858 bp->b_wcred = NOCRED; 859 } 860 861 LIST_REMOVE(bp, b_hash); 862 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 863 if (bp->b_bufsize) { 864 allocbuf(bp, 0); 865 } 866 bp->b_flags = B_BUSY; 867 bp->b_dev = NODEV; 868 bp->b_vp = NULL; 869 bp->b_blkno = bp->b_lblkno = 0; 870 bp->b_iodone = 0; 871 bp->b_error = 0; 872 bp->b_resid = 0; 873 bp->b_bcount = 0; 874 bp->b_npages = 0; 875 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 876 bp->b_dirtyoff = bp->b_dirtyend = 0; 877 bp->b_validoff = bp->b_validend = 0; 878 bp->b_usecount = 4; 879 if (bufspace >= maxbufspace + nbyteswritten) { 880 bp->b_flags |= B_INVAL; 881 brelse(bp); 882 goto trytofreespace; 883 } 884 return (bp); 885} 886 887/* 888 * Check to see if a block is currently memory resident. 889 */ 890struct buf * 891incore(struct vnode * vp, daddr_t blkno) 892{ 893 struct buf *bp; 894 895 int s = splbio(); 896 bp = gbincore(vp, blkno); 897 splx(s); 898 return (bp); 899} 900 901/* 902 * Returns true if no I/O is needed to access the 903 * associated VM object. This is like incore except 904 * it also hunts around in the VM system for the data. 905 */ 906 907int 908inmem(struct vnode * vp, daddr_t blkno) 909{ 910 vm_object_t obj; 911 vm_offset_t toff, tinc; 912 vm_page_t m; 913 vm_ooffset_t off; 914 915 if (incore(vp, blkno)) 916 return 1; 917 if (vp->v_mount == NULL) 918 return 0; 919 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 920 return 0; 921 922 obj = vp->v_object; 923 tinc = PAGE_SIZE; 924 if (tinc > vp->v_mount->mnt_stat.f_iosize) 925 tinc = vp->v_mount->mnt_stat.f_iosize; 926 off = blkno * vp->v_mount->mnt_stat.f_iosize; 927 928 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 929 930 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 931 if (!m) 932 return 0; 933 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 934 return 0; 935 } 936 return 1; 937} 938 939/* 940 * now we set the dirty range for the buffer -- 941 * for NFS -- if the file is mapped and pages have 942 * been written to, let it know. We want the 943 * entire range of the buffer to be marked dirty if 944 * any of the pages have been written to for consistancy 945 * with the b_validoff, b_validend set in the nfs write 946 * code, and used by the nfs read code. 947 */ 948static void 949vfs_setdirty(struct buf *bp) { 950 int i; 951 vm_object_t object; 952 vm_offset_t boffset, offset; 953 /* 954 * We qualify the scan for modified pages on whether the 955 * object has been flushed yet. The OBJ_WRITEABLE flag 956 * is not cleared simply by protecting pages off. 957 */ 958 if ((bp->b_flags & B_VMIO) && 959 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 960 /* 961 * test the pages to see if they have been modified directly 962 * by users through the VM system. 963 */ 964 for (i = 0; i < bp->b_npages; i++) 965 vm_page_test_dirty(bp->b_pages[i]); 966 967 /* 968 * scan forwards for the first page modified 969 */ 970 for (i = 0; i < bp->b_npages; i++) { 971 if (bp->b_pages[i]->dirty) { 972 break; 973 } 974 } 975 boffset = (i << PAGE_SHIFT); 976 if (boffset < bp->b_dirtyoff) { 977 bp->b_dirtyoff = boffset; 978 } 979 980 /* 981 * scan backwards for the last page modified 982 */ 983 for (i = bp->b_npages - 1; i >= 0; --i) { 984 if (bp->b_pages[i]->dirty) { 985 break; 986 } 987 } 988 boffset = (i + 1); 989 offset = boffset + bp->b_pages[0]->pindex; 990 if (offset >= object->size) 991 boffset = object->size - bp->b_pages[0]->pindex; 992 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 993 bp->b_dirtyend = (boffset << PAGE_SHIFT); 994 } 995} 996 997/* 998 * Get a block given a specified block and offset into a file/device. 999 */ 1000struct buf * 1001getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1002{ 1003 struct buf *bp; 1004 int s; 1005 struct bufhashhdr *bh; 1006 1007 s = splbio(); 1008loop: 1009 if ((bp = gbincore(vp, blkno))) { 1010 if (bp->b_flags & B_BUSY) { 1011 bp->b_flags |= B_WANTED; 1012 if (bp->b_usecount < BUF_MAXUSE) 1013 ++bp->b_usecount; 1014 if (!tsleep(bp, 1015 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1016 goto loop; 1017 1018 splx(s); 1019 return (struct buf *) NULL; 1020 } 1021 bp->b_flags |= B_BUSY | B_CACHE; 1022 bremfree(bp); 1023 1024 /* 1025 * check for size inconsistancies (note that they shouldn't happen 1026 * but do when filesystems don't handle the size changes correctly.) 1027 * We are conservative on metadata and don't just extend the buffer 1028 * but write and re-constitute it. 1029 */ 1030 1031 if (bp->b_bcount != size) { 1032 if (bp->b_flags & B_VMIO) { 1033 allocbuf(bp, size); 1034 } else { 1035 bp->b_flags |= B_NOCACHE; 1036 VOP_BWRITE(bp); 1037 goto loop; 1038 } 1039 } 1040 1041 if (bp->b_usecount < BUF_MAXUSE) 1042 ++bp->b_usecount; 1043 splx(s); 1044 return (bp); 1045 } else { 1046 vm_object_t obj; 1047 int doingvmio; 1048 1049 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1050 doingvmio = 1; 1051 } else { 1052 doingvmio = 0; 1053 } 1054 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 1055 if (slpflag || slptimeo) { 1056 splx(s); 1057 return NULL; 1058 } 1059 goto loop; 1060 } 1061 1062 /* 1063 * This code is used to make sure that a buffer is not 1064 * created while the getnewbuf routine is blocked. 1065 * Normally the vnode is locked so this isn't a problem. 1066 * VBLK type I/O requests, however, don't lock the vnode. 1067 */ 1068 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1069 bp->b_flags |= B_INVAL; 1070 brelse(bp); 1071 goto loop; 1072 } 1073 1074 /* 1075 * Insert the buffer into the hash, so that it can 1076 * be found by incore. 1077 */ 1078 bp->b_blkno = bp->b_lblkno = blkno; 1079 bgetvp(vp, bp); 1080 LIST_REMOVE(bp, b_hash); 1081 bh = BUFHASH(vp, blkno); 1082 LIST_INSERT_HEAD(bh, bp, b_hash); 1083 1084 if (doingvmio) { 1085 bp->b_flags |= (B_VMIO | B_CACHE); 1086#if defined(VFS_BIO_DEBUG) 1087 if (vp->v_type != VREG && vp->v_type != VBLK) 1088 printf("getblk: vmioing file type %d???\n", vp->v_type); 1089#endif 1090 } else { 1091 bp->b_flags &= ~B_VMIO; 1092 } 1093 splx(s); 1094 1095 allocbuf(bp, size); 1096#ifdef PC98 1097 /* 1098 * 1024byte/sector support 1099 */ 1100#define B_XXX2 0x8000000 1101 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1102#endif 1103 return (bp); 1104 } 1105} 1106 1107/* 1108 * Get an empty, disassociated buffer of given size. 1109 */ 1110struct buf * 1111geteblk(int size) 1112{ 1113 struct buf *bp; 1114 int s; 1115 1116 s = splbio(); 1117 while ((bp = getnewbuf(0, 0, 0)) == 0); 1118 splx(s); 1119 allocbuf(bp, size); 1120 bp->b_flags |= B_INVAL; 1121 return (bp); 1122} 1123 1124 1125/* 1126 * This code constitutes the buffer memory from either anonymous system 1127 * memory (in the case of non-VMIO operations) or from an associated 1128 * VM object (in the case of VMIO operations). 1129 * 1130 * Note that this code is tricky, and has many complications to resolve 1131 * deadlock or inconsistant data situations. Tread lightly!!! 1132 * 1133 * Modify the length of a buffer's underlying buffer storage without 1134 * destroying information (unless, of course the buffer is shrinking). 1135 */ 1136int 1137allocbuf(struct buf * bp, int size) 1138{ 1139 1140 int s; 1141 int newbsize, mbsize; 1142 int i; 1143 1144 if (!(bp->b_flags & B_BUSY)) 1145 panic("allocbuf: buffer not busy"); 1146 1147 if ((bp->b_flags & B_VMIO) == 0) { 1148 caddr_t origbuf; 1149 int origbufsize; 1150 /* 1151 * Just get anonymous memory from the kernel 1152 */ 1153 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1154#if !defined(NO_B_MALLOC) 1155 if (bp->b_flags & B_MALLOC) 1156 newbsize = mbsize; 1157 else 1158#endif 1159 newbsize = round_page(size); 1160 1161 if (newbsize < bp->b_bufsize) { 1162#if !defined(NO_B_MALLOC) 1163 /* 1164 * malloced buffers are not shrunk 1165 */ 1166 if (bp->b_flags & B_MALLOC) { 1167 if (newbsize) { 1168 bp->b_bcount = size; 1169 } else { 1170 free(bp->b_data, M_BIOBUF); 1171 bufspace -= bp->b_bufsize; 1172 bufmallocspace -= bp->b_bufsize; 1173 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1174 bp->b_bufsize = 0; 1175 bp->b_bcount = 0; 1176 bp->b_flags &= ~B_MALLOC; 1177 } 1178 return 1; 1179 } 1180#endif 1181 vm_hold_free_pages( 1182 bp, 1183 (vm_offset_t) bp->b_data + newbsize, 1184 (vm_offset_t) bp->b_data + bp->b_bufsize); 1185 } else if (newbsize > bp->b_bufsize) { 1186#if !defined(NO_B_MALLOC) 1187 /* 1188 * We only use malloced memory on the first allocation. 1189 * and revert to page-allocated memory when the buffer grows. 1190 */ 1191 if ( (bufmallocspace < maxbufmallocspace) && 1192 (bp->b_bufsize == 0) && 1193 (mbsize <= PAGE_SIZE/2)) { 1194 1195 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1196 bp->b_bufsize = mbsize; 1197 bp->b_bcount = size; 1198 bp->b_flags |= B_MALLOC; 1199 bufspace += mbsize; 1200 bufmallocspace += mbsize; 1201 return 1; 1202 } 1203#endif 1204 origbuf = NULL; 1205 origbufsize = 0; 1206#if !defined(NO_B_MALLOC) 1207 /* 1208 * If the buffer is growing on it's other-than-first allocation, 1209 * then we revert to the page-allocation scheme. 1210 */ 1211 if (bp->b_flags & B_MALLOC) { 1212 origbuf = bp->b_data; 1213 origbufsize = bp->b_bufsize; 1214 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1215 bufspace -= bp->b_bufsize; 1216 bufmallocspace -= bp->b_bufsize; 1217 bp->b_bufsize = 0; 1218 bp->b_flags &= ~B_MALLOC; 1219 newbsize = round_page(newbsize); 1220 } 1221#endif 1222 vm_hold_load_pages( 1223 bp, 1224 (vm_offset_t) bp->b_data + bp->b_bufsize, 1225 (vm_offset_t) bp->b_data + newbsize); 1226#if !defined(NO_B_MALLOC) 1227 if (origbuf) { 1228 bcopy(origbuf, bp->b_data, origbufsize); 1229 free(origbuf, M_BIOBUF); 1230 } 1231#endif 1232 } 1233 } else { 1234 vm_page_t m; 1235 int desiredpages; 1236 1237 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1238 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1239 1240#if !defined(NO_B_MALLOC) 1241 if (bp->b_flags & B_MALLOC) 1242 panic("allocbuf: VMIO buffer can't be malloced"); 1243#endif 1244 1245 if (newbsize < bp->b_bufsize) { 1246 if (desiredpages < bp->b_npages) { 1247 for (i = desiredpages; i < bp->b_npages; i++) { 1248 /* 1249 * the page is not freed here -- it 1250 * is the responsibility of vnode_pager_setsize 1251 */ 1252 m = bp->b_pages[i]; 1253 s = splhigh(); 1254 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1255 m->flags |= PG_WANTED; 1256 tsleep(m, PVM, "biodep", 0); 1257 } 1258 splx(s); 1259 1260 bp->b_pages[i] = NULL; 1261 vm_page_unwire(m); 1262 } 1263 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1264 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1265 bp->b_npages = desiredpages; 1266 } 1267 } else if (newbsize > bp->b_bufsize) { 1268 vm_object_t obj; 1269 vm_offset_t tinc, toff; 1270 vm_ooffset_t off; 1271 vm_pindex_t objoff; 1272 int pageindex, curbpnpages; 1273 struct vnode *vp; 1274 int bsize; 1275 1276 vp = bp->b_vp; 1277 1278 if (vp->v_type == VBLK) 1279 bsize = DEV_BSIZE; 1280 else 1281 bsize = vp->v_mount->mnt_stat.f_iosize; 1282 1283 if (bp->b_npages < desiredpages) { 1284 obj = vp->v_object; 1285 tinc = PAGE_SIZE; 1286 if (tinc > bsize) 1287 tinc = bsize; 1288 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1289 doretry: 1290 curbpnpages = bp->b_npages; 1291 bp->b_flags |= B_CACHE; 1292 for (toff = 0; toff < newbsize; toff += tinc) { 1293 int bytesinpage; 1294 1295 pageindex = toff >> PAGE_SHIFT; 1296 objoff = OFF_TO_IDX(off + toff); 1297 if (pageindex < curbpnpages) { 1298 1299 m = bp->b_pages[pageindex]; 1300#ifdef VFS_BIO_DIAG 1301 if (m->pindex != objoff) 1302 panic("allocbuf: page changed offset??!!!?"); 1303#endif 1304 bytesinpage = tinc; 1305 if (tinc > (newbsize - toff)) 1306 bytesinpage = newbsize - toff; 1307 if ((bp->b_flags & B_CACHE) && 1308 !vm_page_is_valid(m, 1309 (vm_offset_t) ((toff + off) & PAGE_MASK), 1310 bytesinpage)) { 1311 bp->b_flags &= ~B_CACHE; 1312 } 1313 continue; 1314 } 1315 m = vm_page_lookup(obj, objoff); 1316 if (!m) { 1317 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1318 if (!m) { 1319 VM_WAIT; 1320 goto doretry; 1321 } 1322 /* 1323 * Normally it is unwise to clear PG_BUSY without 1324 * PAGE_WAKEUP -- but it is okay here, as there is 1325 * no chance for blocking between here and vm_page_alloc 1326 */ 1327 m->flags &= ~PG_BUSY; 1328 vm_page_wire(m); 1329 bp->b_flags &= ~B_CACHE; 1330 } else if (m->flags & PG_BUSY) { 1331 1332 s = splhigh(); 1333 m->flags |= PG_WANTED; 1334 tsleep(m, PVM, "pgtblk", 0); 1335 splx(s); 1336 1337 goto doretry; 1338 } else { 1339 if ((curproc != pageproc) && 1340 ((m->queue - m->pc) == PQ_CACHE) && 1341 ((cnt.v_free_count + cnt.v_cache_count) < 1342 (cnt.v_free_min + cnt.v_cache_min))) { 1343 pagedaemon_wakeup(); 1344 } 1345 bytesinpage = tinc; 1346 if (tinc > (newbsize - toff)) 1347 bytesinpage = newbsize - toff; 1348 if ((bp->b_flags & B_CACHE) && 1349 !vm_page_is_valid(m, 1350 (vm_offset_t) ((toff + off) & PAGE_MASK), 1351 bytesinpage)) { 1352 bp->b_flags &= ~B_CACHE; 1353 } 1354 vm_page_wire(m); 1355 } 1356 bp->b_pages[pageindex] = m; 1357 curbpnpages = pageindex + 1; 1358 } 1359 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1360 bp->b_npages = curbpnpages; 1361 pmap_qenter((vm_offset_t) bp->b_data, 1362 bp->b_pages, bp->b_npages); 1363 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1364 } 1365 } 1366 } 1367 if (bp->b_flags & B_VMIO) 1368 vmiospace += bp->b_bufsize; 1369 bufspace += (newbsize - bp->b_bufsize); 1370 bp->b_bufsize = newbsize; 1371 bp->b_bcount = size; 1372 return 1; 1373} 1374 1375/* 1376 * Wait for buffer I/O completion, returning error status. 1377 */ 1378int 1379biowait(register struct buf * bp) 1380{ 1381 int s; 1382 1383 s = splbio(); 1384 while ((bp->b_flags & B_DONE) == 0) 1385 tsleep(bp, PRIBIO, "biowait", 0); 1386 splx(s); 1387 if (bp->b_flags & B_EINTR) { 1388 bp->b_flags &= ~B_EINTR; 1389 return (EINTR); 1390 } 1391 if (bp->b_flags & B_ERROR) { 1392 return (bp->b_error ? bp->b_error : EIO); 1393 } else { 1394 return (0); 1395 } 1396} 1397 1398/* 1399 * Finish I/O on a buffer, calling an optional function. 1400 * This is usually called from interrupt level, so process blocking 1401 * is not *a good idea*. 1402 */ 1403void 1404biodone(register struct buf * bp) 1405{ 1406 int s; 1407 1408 s = splbio(); 1409 if (!(bp->b_flags & B_BUSY)) 1410 panic("biodone: buffer not busy"); 1411 1412 if (bp->b_flags & B_DONE) { 1413 splx(s); 1414 printf("biodone: buffer already done\n"); 1415 return; 1416 } 1417 bp->b_flags |= B_DONE; 1418 1419 if ((bp->b_flags & B_READ) == 0) { 1420 vwakeup(bp); 1421 } 1422#ifdef BOUNCE_BUFFERS 1423 if (bp->b_flags & B_BOUNCE) 1424 vm_bounce_free(bp); 1425#endif 1426 1427 /* call optional completion function if requested */ 1428 if (bp->b_flags & B_CALL) { 1429 bp->b_flags &= ~B_CALL; 1430 (*bp->b_iodone) (bp); 1431 splx(s); 1432 return; 1433 } 1434 if (bp->b_flags & B_VMIO) { 1435 int i, resid; 1436 vm_ooffset_t foff; 1437 vm_page_t m; 1438 vm_object_t obj; 1439 int iosize; 1440 struct vnode *vp = bp->b_vp; 1441 1442 if (vp->v_type == VBLK) 1443 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1444 else 1445 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1446 obj = vp->v_object; 1447 if (!obj) { 1448 panic("biodone: no object"); 1449 } 1450#if defined(VFS_BIO_DEBUG) 1451 if (obj->paging_in_progress < bp->b_npages) { 1452 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1453 obj->paging_in_progress, bp->b_npages); 1454 } 1455#endif 1456 iosize = bp->b_bufsize; 1457 for (i = 0; i < bp->b_npages; i++) { 1458 int bogusflag = 0; 1459 m = bp->b_pages[i]; 1460 if (m == bogus_page) { 1461 bogusflag = 1; 1462 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1463 if (!m) { 1464#if defined(VFS_BIO_DEBUG) 1465 printf("biodone: page disappeared\n"); 1466#endif 1467 --obj->paging_in_progress; 1468 continue; 1469 } 1470 bp->b_pages[i] = m; 1471 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1472 } 1473#if defined(VFS_BIO_DEBUG) 1474 if (OFF_TO_IDX(foff) != m->pindex) { 1475 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1476 } 1477#endif 1478 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1479 if (resid > iosize) 1480 resid = iosize; 1481 /* 1482 * In the write case, the valid and clean bits are 1483 * already changed correctly, so we only need to do this 1484 * here in the read case. 1485 */ 1486 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1487 vm_page_set_validclean(m, 1488 (vm_offset_t) (foff & PAGE_MASK), resid); 1489 } 1490 1491 /* 1492 * when debugging new filesystems or buffer I/O methods, this 1493 * is the most common error that pops up. if you see this, you 1494 * have not set the page busy flag correctly!!! 1495 */ 1496 if (m->busy == 0) { 1497 printf("biodone: page busy < 0, " 1498 "pindex: %d, foff: 0x(%x,%x), " 1499 "resid: %d, index: %d\n", 1500 (int) m->pindex, (int)(foff >> 32), 1501 (int) foff & 0xffffffff, resid, i); 1502 if (vp->v_type != VBLK) 1503 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1504 bp->b_vp->v_mount->mnt_stat.f_iosize, 1505 (int) bp->b_lblkno, 1506 bp->b_flags, bp->b_npages); 1507 else 1508 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1509 (int) bp->b_lblkno, 1510 bp->b_flags, bp->b_npages); 1511 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1512 m->valid, m->dirty, m->wire_count); 1513 panic("biodone: page busy < 0\n"); 1514 } 1515 --m->busy; 1516 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1517 m->flags &= ~PG_WANTED; 1518 wakeup(m); 1519 } 1520 --obj->paging_in_progress; 1521 foff += resid; 1522 iosize -= resid; 1523 } 1524 if (obj && obj->paging_in_progress == 0 && 1525 (obj->flags & OBJ_PIPWNT)) { 1526 obj->flags &= ~OBJ_PIPWNT; 1527 wakeup(obj); 1528 } 1529 } 1530 /* 1531 * For asynchronous completions, release the buffer now. The brelse 1532 * checks for B_WANTED and will do the wakeup there if necessary - so 1533 * no need to do a wakeup here in the async case. 1534 */ 1535 1536 if (bp->b_flags & B_ASYNC) { 1537 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1538 brelse(bp); 1539 else 1540 bqrelse(bp); 1541 } else { 1542 wakeup(bp); 1543 } 1544 splx(s); 1545} 1546 1547int 1548count_lock_queue() 1549{ 1550 int count; 1551 struct buf *bp; 1552 1553 count = 0; 1554 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1555 bp != NULL; 1556 bp = TAILQ_NEXT(bp, b_freelist)) 1557 count++; 1558 return (count); 1559} 1560 1561int vfs_update_interval = 30; 1562 1563static void 1564vfs_update() 1565{ 1566 (void) spl0(); /* XXX redundant? wrong place? */ 1567 while (1) { 1568 tsleep(&vfs_update_wakeup, PUSER, "update", 1569 hz * vfs_update_interval); 1570 vfs_update_wakeup = 0; 1571 sync(curproc, NULL, NULL); 1572 } 1573} 1574 1575static int 1576sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1577{ 1578 int error = sysctl_handle_int(oidp, 1579 oidp->oid_arg1, oidp->oid_arg2, req); 1580 if (!error) 1581 wakeup(&vfs_update_wakeup); 1582 return error; 1583} 1584 1585SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1586 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1587 1588 1589/* 1590 * This routine is called in lieu of iodone in the case of 1591 * incomplete I/O. This keeps the busy status for pages 1592 * consistant. 1593 */ 1594void 1595vfs_unbusy_pages(struct buf * bp) 1596{ 1597 int i; 1598 1599 if (bp->b_flags & B_VMIO) { 1600 struct vnode *vp = bp->b_vp; 1601 vm_object_t obj = vp->v_object; 1602 vm_ooffset_t foff; 1603 1604 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1605 1606 for (i = 0; i < bp->b_npages; i++) { 1607 vm_page_t m = bp->b_pages[i]; 1608 1609 if (m == bogus_page) { 1610 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1611 if (!m) { 1612 panic("vfs_unbusy_pages: page missing\n"); 1613 } 1614 bp->b_pages[i] = m; 1615 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1616 } 1617 --obj->paging_in_progress; 1618 --m->busy; 1619 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1620 m->flags &= ~PG_WANTED; 1621 wakeup(m); 1622 } 1623 } 1624 if (obj->paging_in_progress == 0 && 1625 (obj->flags & OBJ_PIPWNT)) { 1626 obj->flags &= ~OBJ_PIPWNT; 1627 wakeup(obj); 1628 } 1629 } 1630} 1631 1632/* 1633 * This routine is called before a device strategy routine. 1634 * It is used to tell the VM system that paging I/O is in 1635 * progress, and treat the pages associated with the buffer 1636 * almost as being PG_BUSY. Also the object paging_in_progress 1637 * flag is handled to make sure that the object doesn't become 1638 * inconsistant. 1639 */ 1640void 1641vfs_busy_pages(struct buf * bp, int clear_modify) 1642{ 1643 int i; 1644 1645 if (bp->b_flags & B_VMIO) { 1646 vm_object_t obj = bp->b_vp->v_object; 1647 vm_ooffset_t foff; 1648 int iocount = bp->b_bufsize; 1649 1650 if (bp->b_vp->v_type == VBLK) 1651 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1652 else 1653 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1654 vfs_setdirty(bp); 1655 for (i = 0; i < bp->b_npages; i++) { 1656 vm_page_t m = bp->b_pages[i]; 1657 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1658 1659 if (resid > iocount) 1660 resid = iocount; 1661 if ((bp->b_flags & B_CLUSTER) == 0) { 1662 obj->paging_in_progress++; 1663 m->busy++; 1664 } 1665 vm_page_protect(m, VM_PROT_NONE); 1666 if (clear_modify) { 1667 vm_page_set_validclean(m, 1668 (vm_offset_t) (foff & PAGE_MASK), resid); 1669 } else if (bp->b_bcount >= PAGE_SIZE) { 1670 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1671 bp->b_pages[i] = bogus_page; 1672 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1673 } 1674 } 1675 foff += resid; 1676 iocount -= resid; 1677 } 1678 } 1679} 1680 1681/* 1682 * Tell the VM system that the pages associated with this buffer 1683 * are clean. This is used for delayed writes where the data is 1684 * going to go to disk eventually without additional VM intevention. 1685 */ 1686void 1687vfs_clean_pages(struct buf * bp) 1688{ 1689 int i; 1690 1691 if (bp->b_flags & B_VMIO) { 1692 vm_ooffset_t foff; 1693 int iocount = bp->b_bufsize; 1694 1695 if (bp->b_vp->v_type == VBLK) 1696 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1697 else 1698 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1699 1700 for (i = 0; i < bp->b_npages; i++) { 1701 vm_page_t m = bp->b_pages[i]; 1702 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1703 1704 if (resid > iocount) 1705 resid = iocount; 1706 if (resid > 0) { 1707 vm_page_set_validclean(m, 1708 ((vm_offset_t) foff & PAGE_MASK), resid); 1709 } 1710 foff += resid; 1711 iocount -= resid; 1712 } 1713 } 1714} 1715 1716void 1717vfs_bio_clrbuf(struct buf *bp) { 1718 int i; 1719 if( bp->b_flags & B_VMIO) { 1720 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1721 int mask; 1722 mask = 0; 1723 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1724 mask |= (1 << (i/DEV_BSIZE)); 1725 if( bp->b_pages[0]->valid != mask) { 1726 bzero(bp->b_data, bp->b_bufsize); 1727 } 1728 bp->b_pages[0]->valid = mask; 1729 bp->b_resid = 0; 1730 return; 1731 } 1732 for(i=0;i<bp->b_npages;i++) { 1733 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1734 continue; 1735 if( bp->b_pages[i]->valid == 0) { 1736 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1737 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1738 } 1739 } else { 1740 int j; 1741 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1742 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1743 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1744 } 1745 } 1746 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1747 } 1748 bp->b_resid = 0; 1749 } else { 1750 clrbuf(bp); 1751 } 1752} 1753 1754/* 1755 * vm_hold_load_pages and vm_hold_unload pages get pages into 1756 * a buffers address space. The pages are anonymous and are 1757 * not associated with a file object. 1758 */ 1759void 1760vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1761{ 1762 vm_offset_t pg; 1763 vm_page_t p; 1764 int index; 1765 1766 to = round_page(to); 1767 from = round_page(from); 1768 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1769 1770 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1771 1772tryagain: 1773 1774 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1775 VM_ALLOC_NORMAL); 1776 if (!p) { 1777 VM_WAIT; 1778 goto tryagain; 1779 } 1780 vm_page_wire(p); 1781 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1782 bp->b_pages[index] = p; 1783 PAGE_WAKEUP(p); 1784 } 1785 bp->b_npages = to >> PAGE_SHIFT; 1786} 1787 1788void 1789vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1790{ 1791 vm_offset_t pg; 1792 vm_page_t p; 1793 int index; 1794 1795 from = round_page(from); 1796 to = round_page(to); 1797 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1798 1799 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1800 p = bp->b_pages[index]; 1801 if (p && (index < bp->b_npages)) { 1802 if (p->busy) { 1803 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1804 bp->b_blkno, bp->b_lblkno); 1805 } 1806 bp->b_pages[index] = NULL; 1807 pmap_kremove(pg); 1808 vm_page_unwire(p); 1809 vm_page_free(p); 1810 } 1811 } 1812 bp->b_npages = from >> PAGE_SHIFT; 1813} 1814