vfs_bio.c revision 16027
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.91 1996/05/24 05:21:58 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81static void vfs_vmio_release(struct buf *bp); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95caddr_t buffers_kva; 96 97/* 98 * bogus page -- for I/O to/from partially complete buffers 99 * this is a temporary solution to the problem, but it is not 100 * really that bad. it would be better to split the buffer 101 * for input in the case of buffers partially already in memory, 102 * but the code is intricate enough already. 103 */ 104vm_page_t bogus_page; 105static vm_offset_t bogus_offset; 106 107static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 108 bufmallocspace, maxbufmallocspace; 109 110static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 111static struct bqueues bufqueues[BUFFER_QUEUES]; 112 113extern int vm_swap_size; 114 115#define BUF_MAXUSE 8 116 117/* 118 * Initialize buffer headers and related structures. 119 */ 120void 121bufinit() 122{ 123 struct buf *bp; 124 int i; 125 126 TAILQ_INIT(&bswlist); 127 LIST_INIT(&invalhash); 128 129 /* first, make a null hash table */ 130 for (i = 0; i < BUFHSZ; i++) 131 LIST_INIT(&bufhashtbl[i]); 132 133 /* next, make a null set of free lists */ 134 for (i = 0; i < BUFFER_QUEUES; i++) 135 TAILQ_INIT(&bufqueues[i]); 136 137 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 138 /* finally, initialize each buffer header and stick on empty q */ 139 for (i = 0; i < nbuf; i++) { 140 bp = &buf[i]; 141 bzero(bp, sizeof *bp); 142 bp->b_flags = B_INVAL; /* we're just an empty header */ 143 bp->b_dev = NODEV; 144 bp->b_rcred = NOCRED; 145 bp->b_wcred = NOCRED; 146 bp->b_qindex = QUEUE_EMPTY; 147 bp->b_vnbufs.le_next = NOLIST; 148 bp->b_data = buffers_kva + i * MAXBSIZE; 149 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 150 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 151 } 152/* 153 * maxbufspace is currently calculated to support all filesystem blocks 154 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 155 * cache is still the same as it would be for 8K filesystems. This 156 * keeps the size of the buffer cache "in check" for big block filesystems. 157 */ 158 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 159/* 160 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 161 */ 162 maxvmiobufspace = 2 * maxbufspace / 3; 163/* 164 * Limit the amount of malloc memory since it is wired permanently into 165 * the kernel space. Even though this is accounted for in the buffer 166 * allocation, we don't want the malloced region to grow uncontrolled. 167 * The malloc scheme improves memory utilization significantly on average 168 * (small) directories. 169 */ 170 maxbufmallocspace = maxbufspace / 20; 171 172 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 173 bogus_page = vm_page_alloc(kernel_object, 174 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 175 VM_ALLOC_NORMAL); 176 177} 178 179/* 180 * remove the buffer from the appropriate free list 181 */ 182void 183bremfree(struct buf * bp) 184{ 185 int s = splbio(); 186 187 if (bp->b_qindex != QUEUE_NONE) { 188 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 189 bp->b_qindex = QUEUE_NONE; 190 } else { 191 panic("bremfree: removing a buffer when not on a queue"); 192 } 193 splx(s); 194} 195 196/* 197 * Get a buffer with the specified data. Look in the cache first. 198 */ 199int 200bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 201 struct buf ** bpp) 202{ 203 struct buf *bp; 204 205 bp = getblk(vp, blkno, size, 0, 0); 206 *bpp = bp; 207 208 /* if not found in cache, do some I/O */ 209 if ((bp->b_flags & B_CACHE) == 0) { 210 if (curproc != NULL) 211 curproc->p_stats->p_ru.ru_inblock++; 212 bp->b_flags |= B_READ; 213 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 214 if (bp->b_rcred == NOCRED) { 215 if (cred != NOCRED) 216 crhold(cred); 217 bp->b_rcred = cred; 218 } 219 vfs_busy_pages(bp, 0); 220 VOP_STRATEGY(bp); 221 return (biowait(bp)); 222 } 223 return (0); 224} 225 226/* 227 * Operates like bread, but also starts asynchronous I/O on 228 * read-ahead blocks. 229 */ 230int 231breadn(struct vnode * vp, daddr_t blkno, int size, 232 daddr_t * rablkno, int *rabsize, 233 int cnt, struct ucred * cred, struct buf ** bpp) 234{ 235 struct buf *bp, *rabp; 236 int i; 237 int rv = 0, readwait = 0; 238 239 *bpp = bp = getblk(vp, blkno, size, 0, 0); 240 241 /* if not found in cache, do some I/O */ 242 if ((bp->b_flags & B_CACHE) == 0) { 243 if (curproc != NULL) 244 curproc->p_stats->p_ru.ru_inblock++; 245 bp->b_flags |= B_READ; 246 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 247 if (bp->b_rcred == NOCRED) { 248 if (cred != NOCRED) 249 crhold(cred); 250 bp->b_rcred = cred; 251 } 252 vfs_busy_pages(bp, 0); 253 VOP_STRATEGY(bp); 254 ++readwait; 255 } 256 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 257 if (inmem(vp, *rablkno)) 258 continue; 259 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 260 261 if ((rabp->b_flags & B_CACHE) == 0) { 262 if (curproc != NULL) 263 curproc->p_stats->p_ru.ru_inblock++; 264 rabp->b_flags |= B_READ | B_ASYNC; 265 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 266 if (rabp->b_rcred == NOCRED) { 267 if (cred != NOCRED) 268 crhold(cred); 269 rabp->b_rcred = cred; 270 } 271 vfs_busy_pages(rabp, 0); 272 VOP_STRATEGY(rabp); 273 } else { 274 brelse(rabp); 275 } 276 } 277 278 if (readwait) { 279 rv = biowait(bp); 280 } 281 return (rv); 282} 283 284/* 285 * Write, release buffer on completion. (Done by iodone 286 * if async.) 287 */ 288int 289bwrite(struct buf * bp) 290{ 291 int oldflags = bp->b_flags; 292 293 if (bp->b_flags & B_INVAL) { 294 brelse(bp); 295 return (0); 296 } 297 if (!(bp->b_flags & B_BUSY)) 298 panic("bwrite: buffer is not busy???"); 299 300 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 301 bp->b_flags |= B_WRITEINPROG; 302 303 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 304 reassignbuf(bp, bp->b_vp); 305 } 306 307 bp->b_vp->v_numoutput++; 308 vfs_busy_pages(bp, 1); 309 if (curproc != NULL) 310 curproc->p_stats->p_ru.ru_oublock++; 311 VOP_STRATEGY(bp); 312 313 if ((oldflags & B_ASYNC) == 0) { 314 int rtval = biowait(bp); 315 316 if (oldflags & B_DELWRI) { 317 reassignbuf(bp, bp->b_vp); 318 } 319 brelse(bp); 320 return (rtval); 321 } 322 return (0); 323} 324 325int 326vn_bwrite(ap) 327 struct vop_bwrite_args *ap; 328{ 329 return (bwrite(ap->a_bp)); 330} 331 332/* 333 * Delayed write. (Buffer is marked dirty). 334 */ 335void 336bdwrite(struct buf * bp) 337{ 338 339 if ((bp->b_flags & B_BUSY) == 0) { 340 panic("bdwrite: buffer is not busy"); 341 } 342 if (bp->b_flags & B_INVAL) { 343 brelse(bp); 344 return; 345 } 346 if (bp->b_flags & B_TAPE) { 347 bawrite(bp); 348 return; 349 } 350 bp->b_flags &= ~(B_READ|B_RELBUF); 351 if ((bp->b_flags & B_DELWRI) == 0) { 352 bp->b_flags |= B_DONE | B_DELWRI; 353 reassignbuf(bp, bp->b_vp); 354 } 355 356 /* 357 * This bmap keeps the system from needing to do the bmap later, 358 * perhaps when the system is attempting to do a sync. Since it 359 * is likely that the indirect block -- or whatever other datastructure 360 * that the filesystem needs is still in memory now, it is a good 361 * thing to do this. Note also, that if the pageout daemon is 362 * requesting a sync -- there might not be enough memory to do 363 * the bmap then... So, this is important to do. 364 */ 365 if( bp->b_lblkno == bp->b_blkno) { 366 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 367 } 368 369 /* 370 * Set the *dirty* buffer range based upon the VM system dirty pages. 371 */ 372 vfs_setdirty(bp); 373 374 /* 375 * We need to do this here to satisfy the vnode_pager and the 376 * pageout daemon, so that it thinks that the pages have been 377 * "cleaned". Note that since the pages are in a delayed write 378 * buffer -- the VFS layer "will" see that the pages get written 379 * out on the next sync, or perhaps the cluster will be completed. 380 */ 381 vfs_clean_pages(bp); 382 bqrelse(bp); 383 return; 384} 385 386/* 387 * Asynchronous write. 388 * Start output on a buffer, but do not wait for it to complete. 389 * The buffer is released when the output completes. 390 */ 391void 392bawrite(struct buf * bp) 393{ 394 bp->b_flags |= B_ASYNC; 395 (void) VOP_BWRITE(bp); 396} 397 398/* 399 * Release a buffer. 400 */ 401void 402brelse(struct buf * bp) 403{ 404 int s; 405 406 if (bp->b_flags & B_CLUSTER) { 407 relpbuf(bp); 408 return; 409 } 410 /* anyone need a "free" block? */ 411 s = splbio(); 412 413 /* anyone need this block? */ 414 if (bp->b_flags & B_WANTED) { 415 bp->b_flags &= ~(B_WANTED | B_AGE); 416 wakeup(bp); 417 } 418 419 if (bp->b_flags & B_LOCKED) 420 bp->b_flags &= ~B_ERROR; 421 422 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 423 (bp->b_bufsize <= 0)) { 424 bp->b_flags |= B_INVAL; 425 bp->b_flags &= ~(B_DELWRI | B_CACHE); 426 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 427 if (bp->b_bufsize) 428 allocbuf(bp, 0); 429 brelvp(bp); 430 } 431 } 432 433 /* 434 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 435 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 436 * but the VM object is kept around. The B_NOCACHE flag is used to 437 * invalidate the pages in the VM object. 438 */ 439 if (bp->b_flags & B_VMIO) { 440 vm_ooffset_t foff; 441 vm_object_t obj; 442 int i, resid; 443 vm_page_t m; 444 struct vnode *vp; 445 int iototal = bp->b_bufsize; 446 447 vp = bp->b_vp; 448 if (!vp) 449 panic("brelse: missing vp"); 450 451 if (bp->b_npages) { 452 vm_pindex_t poff; 453 obj = (vm_object_t) vp->v_object; 454 if (vp->v_type == VBLK) 455 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 456 else 457 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 458 poff = OFF_TO_IDX(foff); 459 for (i = 0; i < bp->b_npages; i++) { 460 m = bp->b_pages[i]; 461 if (m == bogus_page) { 462 m = vm_page_lookup(obj, poff + i); 463 if (!m) { 464 panic("brelse: page missing\n"); 465 } 466 bp->b_pages[i] = m; 467 pmap_qenter(trunc_page(bp->b_data), 468 bp->b_pages, bp->b_npages); 469 } 470 resid = IDX_TO_OFF(m->pindex+1) - foff; 471 if (resid > iototal) 472 resid = iototal; 473 if (resid > 0) { 474 /* 475 * Don't invalidate the page if the local machine has already 476 * modified it. This is the lesser of two evils, and should 477 * be fixed. 478 */ 479 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 480 vm_page_test_dirty(m); 481 if (m->dirty == 0) { 482 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 483 if (m->valid == 0) 484 vm_page_protect(m, VM_PROT_NONE); 485 } 486 } 487 if (resid >= PAGE_SIZE) { 488 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 489 bp->b_flags |= B_INVAL; 490 } 491 } else { 492 if (!vm_page_is_valid(m, 493 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 494 bp->b_flags |= B_INVAL; 495 } 496 } 497 } 498 foff += resid; 499 iototal -= resid; 500 } 501 } 502 if (bp->b_flags & (B_INVAL | B_RELBUF)) 503 vfs_vmio_release(bp); 504 } 505 if (bp->b_qindex != QUEUE_NONE) 506 panic("brelse: free buffer onto another queue???"); 507 508 /* enqueue */ 509 /* buffers with no memory */ 510 if (bp->b_bufsize == 0) { 511 bp->b_qindex = QUEUE_EMPTY; 512 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 513 LIST_REMOVE(bp, b_hash); 514 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 515 bp->b_dev = NODEV; 516 if (needsbuffer) { 517 wakeup(&needsbuffer); 518 needsbuffer=0; 519 } 520 /* buffers with junk contents */ 521 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 522 bp->b_qindex = QUEUE_AGE; 523 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 524 LIST_REMOVE(bp, b_hash); 525 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 526 bp->b_dev = NODEV; 527 if (needsbuffer) { 528 wakeup(&needsbuffer); 529 needsbuffer=0; 530 } 531 /* buffers that are locked */ 532 } else if (bp->b_flags & B_LOCKED) { 533 bp->b_qindex = QUEUE_LOCKED; 534 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 535 /* buffers with stale but valid contents */ 536 } else if (bp->b_flags & B_AGE) { 537 bp->b_qindex = QUEUE_AGE; 538 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 539 if (needsbuffer) { 540 wakeup(&needsbuffer); 541 needsbuffer=0; 542 } 543 /* buffers with valid and quite potentially reuseable contents */ 544 } else { 545 bp->b_qindex = QUEUE_LRU; 546 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 547 if (needsbuffer) { 548 wakeup(&needsbuffer); 549 needsbuffer=0; 550 } 551 } 552 553 /* unlock */ 554 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 555 splx(s); 556} 557 558/* 559 * Release a buffer. 560 */ 561void 562bqrelse(struct buf * bp) 563{ 564 int s; 565 566 s = splbio(); 567 568 569 /* anyone need this block? */ 570 if (bp->b_flags & B_WANTED) { 571 bp->b_flags &= ~(B_WANTED | B_AGE); 572 wakeup(bp); 573 } 574 575 if (bp->b_qindex != QUEUE_NONE) 576 panic("bqrelse: free buffer onto another queue???"); 577 578 if (bp->b_flags & B_LOCKED) { 579 bp->b_flags &= ~B_ERROR; 580 bp->b_qindex = QUEUE_LOCKED; 581 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 582 /* buffers with stale but valid contents */ 583 } else { 584 bp->b_qindex = QUEUE_LRU; 585 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 586 if (needsbuffer) { 587 wakeup(&needsbuffer); 588 needsbuffer=0; 589 } 590 } 591 592 /* unlock */ 593 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 594 splx(s); 595} 596 597static void 598vfs_vmio_release(bp) 599 struct buf *bp; 600{ 601 int i; 602 vm_page_t m; 603 604 for (i = 0; i < bp->b_npages; i++) { 605 m = bp->b_pages[i]; 606 bp->b_pages[i] = NULL; 607 if (m->flags & PG_WANTED) { 608 m->flags &= ~PG_WANTED; 609 wakeup(m); 610 } 611 vm_page_unwire(m); 612 if (m->wire_count == 0 && (m->flags & PG_BUSY) == 0) { 613 if (m->valid) { 614 if(m->dirty == 0) 615 vm_page_test_dirty(m); 616 /* 617 * this keeps pressure off of the process memory 618 */ 619 if ((vm_swap_size == 0) || 620 (cnt.v_free_count < cnt.v_free_min)) { 621 if ((m->dirty == 0) && 622 (m->hold_count == 0) && 623 (m->flags & PG_BUSY) == 0 && 624 (m->busy == 0)) 625 vm_page_cache(m); 626 else 627 vm_page_deactivate(m); 628 } 629 } else if ((m->hold_count == 0) && 630 ((m->flags & PG_BUSY) == 0) && 631 (m->busy == 0)) { 632 vm_page_protect(m, VM_PROT_NONE); 633 vm_page_free(m); 634 } 635 } 636 } 637 bufspace -= bp->b_bufsize; 638 vmiospace -= bp->b_bufsize; 639 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 640 bp->b_npages = 0; 641 bp->b_bufsize = 0; 642 bp->b_flags &= ~B_VMIO; 643 if (bp->b_vp) 644 brelvp(bp); 645} 646 647/* 648 * Check to see if a block is currently memory resident. 649 */ 650__inline struct buf * 651gbincore(struct vnode * vp, daddr_t blkno) 652{ 653 struct buf *bp; 654 struct bufhashhdr *bh; 655 656 bh = BUFHASH(vp, blkno); 657 bp = bh->lh_first; 658 659 /* Search hash chain */ 660 while (bp != NULL) { 661 /* hit */ 662 if (bp->b_vp == vp && bp->b_lblkno == blkno && 663 (bp->b_flags & B_INVAL) == 0) { 664 break; 665 } 666 bp = bp->b_hash.le_next; 667 } 668 return (bp); 669} 670 671/* 672 * this routine implements clustered async writes for 673 * clearing out B_DELWRI buffers... This is much better 674 * than the old way of writing only one buffer at a time. 675 */ 676int 677vfs_bio_awrite(struct buf * bp) 678{ 679 int i; 680 daddr_t lblkno = bp->b_lblkno; 681 struct vnode *vp = bp->b_vp; 682 int s; 683 int ncl; 684 struct buf *bpa; 685 int nwritten; 686 687 s = splbio(); 688 /* 689 * right now we support clustered writing only to regular files 690 */ 691 if ((vp->v_type == VREG) && 692 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 693 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 694 int size; 695 int maxcl; 696 697 size = vp->v_mount->mnt_stat.f_iosize; 698 maxcl = MAXPHYS / size; 699 700 for (i = 1; i < maxcl; i++) { 701 if ((bpa = gbincore(vp, lblkno + i)) && 702 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 703 (B_DELWRI | B_CLUSTEROK)) && 704 (bpa->b_bufsize == size)) { 705 if ((bpa->b_blkno == bpa->b_lblkno) || 706 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 707 break; 708 } else { 709 break; 710 } 711 } 712 ncl = i; 713 /* 714 * this is a possible cluster write 715 */ 716 if (ncl != 1) { 717 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 718 splx(s); 719 return nwritten; 720 } 721 } 722 bremfree(bp); 723 splx(s); 724 /* 725 * default (old) behavior, writing out only one block 726 */ 727 bp->b_flags |= B_BUSY | B_ASYNC; 728 nwritten = bp->b_bufsize; 729 (void) VOP_BWRITE(bp); 730 return nwritten; 731} 732 733 734/* 735 * Find a buffer header which is available for use. 736 */ 737static struct buf * 738getnewbuf(int slpflag, int slptimeo, int doingvmio) 739{ 740 struct buf *bp; 741 int nbyteswritten = 0; 742 743start: 744 if (bufspace >= maxbufspace) 745 goto trytofreespace; 746 747 /* can we constitute a new buffer? */ 748 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 749 if (bp->b_qindex != QUEUE_EMPTY) 750 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 751 bp->b_qindex); 752 bp->b_flags |= B_BUSY; 753 bremfree(bp); 754 goto fillbuf; 755 } 756trytofreespace: 757 /* 758 * We keep the file I/O from hogging metadata I/O 759 * This is desirable because file data is cached in the 760 * VM/Buffer cache even if a buffer is freed. 761 */ 762 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 763 if (bp->b_qindex != QUEUE_AGE) 764 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 765 bp->b_qindex); 766 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 767 if (bp->b_qindex != QUEUE_LRU) 768 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 769 bp->b_qindex); 770 } 771 if (!bp) { 772 /* wait for a free buffer of any kind */ 773 needsbuffer = 1; 774 tsleep(&needsbuffer, 775 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 776 return (0); 777 } 778 779 /* 780 * We are fairly aggressive about freeing VMIO buffers, but since 781 * the buffering is intact without buffer headers, there is not 782 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 783 */ 784 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 785 if ((bp->b_flags & B_VMIO) == 0 || 786 (vmiospace < maxvmiobufspace)) { 787 --bp->b_usecount; 788 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 789 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 790 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 791 goto start; 792 } 793 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 794 } 795 } 796 797 /* if we are a delayed write, convert to an async write */ 798 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 799 nbyteswritten += vfs_bio_awrite(bp); 800 if (!slpflag && !slptimeo) { 801 return (0); 802 } 803 goto start; 804 } 805 806 if (bp->b_flags & B_WANTED) { 807 bp->b_flags &= ~B_WANTED; 808 wakeup(bp); 809 } 810 bremfree(bp); 811 bp->b_flags |= B_BUSY; 812 813 if (bp->b_flags & B_VMIO) 814 vfs_vmio_release(bp); 815 816 if (bp->b_vp) 817 brelvp(bp); 818 819fillbuf: 820 /* we are not free, nor do we contain interesting data */ 821 if (bp->b_rcred != NOCRED) { 822 crfree(bp->b_rcred); 823 bp->b_rcred = NOCRED; 824 } 825 if (bp->b_wcred != NOCRED) { 826 crfree(bp->b_wcred); 827 bp->b_wcred = NOCRED; 828 } 829 830 LIST_REMOVE(bp, b_hash); 831 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 832 if (bp->b_bufsize) { 833 allocbuf(bp, 0); 834 } 835 bp->b_flags = B_BUSY; 836 bp->b_dev = NODEV; 837 bp->b_vp = NULL; 838 bp->b_blkno = bp->b_lblkno = 0; 839 bp->b_iodone = 0; 840 bp->b_error = 0; 841 bp->b_resid = 0; 842 bp->b_bcount = 0; 843 bp->b_npages = 0; 844 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 845 bp->b_dirtyoff = bp->b_dirtyend = 0; 846 bp->b_validoff = bp->b_validend = 0; 847 bp->b_usecount = 2; 848 if (bufspace >= maxbufspace + nbyteswritten) { 849 bp->b_flags |= B_INVAL; 850 brelse(bp); 851 goto trytofreespace; 852 } 853 return (bp); 854} 855 856/* 857 * Check to see if a block is currently memory resident. 858 */ 859struct buf * 860incore(struct vnode * vp, daddr_t blkno) 861{ 862 struct buf *bp; 863 864 int s = splbio(); 865 bp = gbincore(vp, blkno); 866 splx(s); 867 return (bp); 868} 869 870/* 871 * Returns true if no I/O is needed to access the 872 * associated VM object. This is like incore except 873 * it also hunts around in the VM system for the data. 874 */ 875 876int 877inmem(struct vnode * vp, daddr_t blkno) 878{ 879 vm_object_t obj; 880 vm_offset_t toff, tinc; 881 vm_page_t m; 882 vm_ooffset_t off; 883 884 if (incore(vp, blkno)) 885 return 1; 886 if (vp->v_mount == NULL) 887 return 0; 888 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 889 return 0; 890 891 obj = vp->v_object; 892 tinc = PAGE_SIZE; 893 if (tinc > vp->v_mount->mnt_stat.f_iosize) 894 tinc = vp->v_mount->mnt_stat.f_iosize; 895 off = blkno * vp->v_mount->mnt_stat.f_iosize; 896 897 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 898 899 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 900 if (!m) 901 return 0; 902 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 903 return 0; 904 } 905 return 1; 906} 907 908/* 909 * now we set the dirty range for the buffer -- 910 * for NFS -- if the file is mapped and pages have 911 * been written to, let it know. We want the 912 * entire range of the buffer to be marked dirty if 913 * any of the pages have been written to for consistancy 914 * with the b_validoff, b_validend set in the nfs write 915 * code, and used by the nfs read code. 916 */ 917static void 918vfs_setdirty(struct buf *bp) { 919 int i; 920 vm_object_t object; 921 vm_offset_t boffset, offset; 922 /* 923 * We qualify the scan for modified pages on whether the 924 * object has been flushed yet. The OBJ_WRITEABLE flag 925 * is not cleared simply by protecting pages off. 926 */ 927 if ((bp->b_flags & B_VMIO) && 928 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 929 /* 930 * test the pages to see if they have been modified directly 931 * by users through the VM system. 932 */ 933 for (i = 0; i < bp->b_npages; i++) 934 vm_page_test_dirty(bp->b_pages[i]); 935 936 /* 937 * scan forwards for the first page modified 938 */ 939 for (i = 0; i < bp->b_npages; i++) { 940 if (bp->b_pages[i]->dirty) { 941 break; 942 } 943 } 944 boffset = (i << PAGE_SHIFT); 945 if (boffset < bp->b_dirtyoff) { 946 bp->b_dirtyoff = boffset; 947 } 948 949 /* 950 * scan backwards for the last page modified 951 */ 952 for (i = bp->b_npages - 1; i >= 0; --i) { 953 if (bp->b_pages[i]->dirty) { 954 break; 955 } 956 } 957 boffset = (i + 1); 958 offset = boffset + bp->b_pages[0]->pindex; 959 if (offset >= object->size) 960 boffset = object->size - bp->b_pages[0]->pindex; 961 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 962 bp->b_dirtyend = (boffset << PAGE_SHIFT); 963 } 964} 965 966/* 967 * Get a block given a specified block and offset into a file/device. 968 */ 969struct buf * 970getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 971{ 972 struct buf *bp; 973 int s; 974 struct bufhashhdr *bh; 975 976 s = splbio(); 977loop: 978 if ((bp = gbincore(vp, blkno))) { 979 if (bp->b_flags & B_BUSY) { 980 bp->b_flags |= B_WANTED; 981 if (bp->b_usecount < BUF_MAXUSE) 982 ++bp->b_usecount; 983 if (!tsleep(bp, 984 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 985 goto loop; 986 987 splx(s); 988 return (struct buf *) NULL; 989 } 990 bp->b_flags |= B_BUSY | B_CACHE; 991 bremfree(bp); 992 993 /* 994 * check for size inconsistancies (note that they shouldn't happen 995 * but do when filesystems don't handle the size changes correctly.) 996 * We are conservative on metadata and don't just extend the buffer 997 * but write and re-constitute it. 998 */ 999 1000 if (bp->b_bcount != size) { 1001 if (bp->b_flags & B_VMIO) { 1002 allocbuf(bp, size); 1003 } else { 1004 bp->b_flags |= B_NOCACHE; 1005 VOP_BWRITE(bp); 1006 goto loop; 1007 } 1008 } 1009 1010 if (bp->b_usecount < BUF_MAXUSE) 1011 ++bp->b_usecount; 1012 splx(s); 1013 return (bp); 1014 } else { 1015 vm_object_t obj; 1016 int doingvmio; 1017 1018 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1019 doingvmio = 1; 1020 } else { 1021 doingvmio = 0; 1022 } 1023 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 1024 if (slpflag || slptimeo) { 1025 splx(s); 1026 return NULL; 1027 } 1028 goto loop; 1029 } 1030 1031 /* 1032 * This code is used to make sure that a buffer is not 1033 * created while the getnewbuf routine is blocked. 1034 * Normally the vnode is locked so this isn't a problem. 1035 * VBLK type I/O requests, however, don't lock the vnode. 1036 */ 1037 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1038 bp->b_flags |= B_INVAL; 1039 brelse(bp); 1040 goto loop; 1041 } 1042 1043 /* 1044 * Insert the buffer into the hash, so that it can 1045 * be found by incore. 1046 */ 1047 bp->b_blkno = bp->b_lblkno = blkno; 1048 bgetvp(vp, bp); 1049 LIST_REMOVE(bp, b_hash); 1050 bh = BUFHASH(vp, blkno); 1051 LIST_INSERT_HEAD(bh, bp, b_hash); 1052 1053 if (doingvmio) { 1054 bp->b_flags |= (B_VMIO | B_CACHE); 1055#if defined(VFS_BIO_DEBUG) 1056 if (vp->v_type != VREG && vp->v_type != VBLK) 1057 printf("getblk: vmioing file type %d???\n", vp->v_type); 1058#endif 1059 } else { 1060 bp->b_flags &= ~B_VMIO; 1061 } 1062 splx(s); 1063 1064 allocbuf(bp, size); 1065 return (bp); 1066 } 1067} 1068 1069/* 1070 * Get an empty, disassociated buffer of given size. 1071 */ 1072struct buf * 1073geteblk(int size) 1074{ 1075 struct buf *bp; 1076 int s; 1077 1078 s = splbio(); 1079 while ((bp = getnewbuf(0, 0, 0)) == 0); 1080 splx(s); 1081 allocbuf(bp, size); 1082 bp->b_flags |= B_INVAL; 1083 return (bp); 1084} 1085 1086 1087/* 1088 * This code constitutes the buffer memory from either anonymous system 1089 * memory (in the case of non-VMIO operations) or from an associated 1090 * VM object (in the case of VMIO operations). 1091 * 1092 * Note that this code is tricky, and has many complications to resolve 1093 * deadlock or inconsistant data situations. Tread lightly!!! 1094 * 1095 * Modify the length of a buffer's underlying buffer storage without 1096 * destroying information (unless, of course the buffer is shrinking). 1097 */ 1098int 1099allocbuf(struct buf * bp, int size) 1100{ 1101 1102 int s; 1103 int newbsize, mbsize; 1104 int i; 1105 1106 if (!(bp->b_flags & B_BUSY)) 1107 panic("allocbuf: buffer not busy"); 1108 1109 if ((bp->b_flags & B_VMIO) == 0) { 1110 caddr_t origbuf; 1111 int origbufsize; 1112 /* 1113 * Just get anonymous memory from the kernel 1114 */ 1115 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1116 if (bp->b_flags & B_MALLOC) 1117 newbsize = mbsize; 1118 else 1119 newbsize = round_page(size); 1120 1121 if (newbsize < bp->b_bufsize) { 1122 /* 1123 * malloced buffers are not shrunk 1124 */ 1125 if (bp->b_flags & B_MALLOC) { 1126 if (newbsize) { 1127 bp->b_bcount = size; 1128 } else { 1129 free(bp->b_data, M_TEMP); 1130 bufspace -= bp->b_bufsize; 1131 bufmallocspace -= bp->b_bufsize; 1132 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1133 bp->b_bufsize = 0; 1134 bp->b_bcount = 0; 1135 bp->b_flags &= ~B_MALLOC; 1136 } 1137 return 1; 1138 } 1139 vm_hold_free_pages( 1140 bp, 1141 (vm_offset_t) bp->b_data + newbsize, 1142 (vm_offset_t) bp->b_data + bp->b_bufsize); 1143 } else if (newbsize > bp->b_bufsize) { 1144 /* 1145 * We only use malloced memory on the first allocation. 1146 * and revert to page-allocated memory when the buffer grows. 1147 */ 1148 if ( (bufmallocspace < maxbufmallocspace) && 1149 (bp->b_bufsize == 0) && 1150 (mbsize <= PAGE_SIZE/2)) { 1151 1152 bp->b_data = malloc(mbsize, M_TEMP, M_WAITOK); 1153 bp->b_bufsize = mbsize; 1154 bp->b_bcount = size; 1155 bp->b_flags |= B_MALLOC; 1156 bufspace += mbsize; 1157 bufmallocspace += mbsize; 1158 return 1; 1159 } 1160 origbuf = NULL; 1161 origbufsize = 0; 1162 /* 1163 * If the buffer is growing on it's other-than-first allocation, 1164 * then we revert to the page-allocation scheme. 1165 */ 1166 if (bp->b_flags & B_MALLOC) { 1167 origbuf = bp->b_data; 1168 origbufsize = bp->b_bufsize; 1169 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1170 bufspace -= bp->b_bufsize; 1171 bufmallocspace -= bp->b_bufsize; 1172 bp->b_bufsize = 0; 1173 bp->b_flags &= ~B_MALLOC; 1174 newbsize = round_page(newbsize); 1175 } 1176 vm_hold_load_pages( 1177 bp, 1178 (vm_offset_t) bp->b_data + bp->b_bufsize, 1179 (vm_offset_t) bp->b_data + newbsize); 1180 if (origbuf) { 1181 bcopy(origbuf, bp->b_data, origbufsize); 1182 free(origbuf, M_TEMP); 1183 } 1184 } 1185 } else { 1186 vm_page_t m; 1187 int desiredpages; 1188 1189 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1190 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1191 1192 if (bp->b_flags & B_MALLOC) 1193 panic("allocbuf: VMIO buffer can't be malloced"); 1194 1195 if (newbsize < bp->b_bufsize) { 1196 if (desiredpages < bp->b_npages) { 1197 for (i = desiredpages; i < bp->b_npages; i++) { 1198 /* 1199 * the page is not freed here -- it 1200 * is the responsibility of vnode_pager_setsize 1201 */ 1202 m = bp->b_pages[i]; 1203 s = splhigh(); 1204 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1205 m->flags |= PG_WANTED; 1206 tsleep(m, PVM, "biodep", 0); 1207 } 1208 splx(s); 1209 1210 bp->b_pages[i] = NULL; 1211 vm_page_unwire(m); 1212 } 1213 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1214 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1215 bp->b_npages = desiredpages; 1216 } 1217 } else if (newbsize > bp->b_bufsize) { 1218 vm_object_t obj; 1219 vm_offset_t tinc, toff; 1220 vm_ooffset_t off; 1221 vm_pindex_t objoff; 1222 int pageindex, curbpnpages; 1223 struct vnode *vp; 1224 int bsize; 1225 1226 vp = bp->b_vp; 1227 1228 if (vp->v_type == VBLK) 1229 bsize = DEV_BSIZE; 1230 else 1231 bsize = vp->v_mount->mnt_stat.f_iosize; 1232 1233 if (bp->b_npages < desiredpages) { 1234 obj = vp->v_object; 1235 tinc = PAGE_SIZE; 1236 if (tinc > bsize) 1237 tinc = bsize; 1238 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1239 doretry: 1240 curbpnpages = bp->b_npages; 1241 bp->b_flags |= B_CACHE; 1242 for (toff = 0; toff < newbsize; toff += tinc) { 1243 int bytesinpage; 1244 1245 pageindex = toff >> PAGE_SHIFT; 1246 objoff = OFF_TO_IDX(off + toff); 1247 if (pageindex < curbpnpages) { 1248 1249 m = bp->b_pages[pageindex]; 1250#ifdef VFS_BIO_DIAG 1251 if (m->pindex != objoff) 1252 panic("allocbuf: page changed offset??!!!?"); 1253#endif 1254 bytesinpage = tinc; 1255 if (tinc > (newbsize - toff)) 1256 bytesinpage = newbsize - toff; 1257 if ((bp->b_flags & B_CACHE) && 1258 !vm_page_is_valid(m, 1259 (vm_offset_t) ((toff + off) & PAGE_MASK), 1260 bytesinpage)) { 1261 bp->b_flags &= ~B_CACHE; 1262 } 1263 continue; 1264 } 1265 m = vm_page_lookup(obj, objoff); 1266 if (!m) { 1267 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1268 if (!m) { 1269 VM_WAIT; 1270 goto doretry; 1271 } 1272 /* 1273 * Normally it is unwise to clear PG_BUSY without 1274 * PAGE_WAKEUP -- but it is okay here, as there is 1275 * no chance for blocking between here and vm_page_alloc 1276 */ 1277 m->flags &= ~PG_BUSY; 1278 vm_page_wire(m); 1279 bp->b_flags &= ~B_CACHE; 1280 } else if (m->flags & PG_BUSY) { 1281 1282 s = splhigh(); 1283 m->flags |= PG_WANTED; 1284 tsleep(m, PVM, "pgtblk", 0); 1285 splx(s); 1286 1287 goto doretry; 1288 } else { 1289 if ((curproc != pageproc) && 1290 (m->queue == PQ_CACHE) && 1291 ((cnt.v_free_count + cnt.v_cache_count) < 1292 (cnt.v_free_min + cnt.v_cache_min))) { 1293 pagedaemon_wakeup(); 1294 } 1295 bytesinpage = tinc; 1296 if (tinc > (newbsize - toff)) 1297 bytesinpage = newbsize - toff; 1298 if ((bp->b_flags & B_CACHE) && 1299 !vm_page_is_valid(m, 1300 (vm_offset_t) ((toff + off) & PAGE_MASK), 1301 bytesinpage)) { 1302 bp->b_flags &= ~B_CACHE; 1303 } 1304 vm_page_wire(m); 1305 } 1306 bp->b_pages[pageindex] = m; 1307 curbpnpages = pageindex + 1; 1308 } 1309 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1310 bp->b_npages = curbpnpages; 1311 pmap_qenter((vm_offset_t) bp->b_data, 1312 bp->b_pages, bp->b_npages); 1313 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1314 } 1315 } 1316 } 1317 if (bp->b_flags & B_VMIO) 1318 vmiospace += bp->b_bufsize; 1319 bufspace += (newbsize - bp->b_bufsize); 1320 bp->b_bufsize = newbsize; 1321 bp->b_bcount = size; 1322 return 1; 1323} 1324 1325/* 1326 * Wait for buffer I/O completion, returning error status. 1327 */ 1328int 1329biowait(register struct buf * bp) 1330{ 1331 int s; 1332 1333 s = splbio(); 1334 while ((bp->b_flags & B_DONE) == 0) 1335 tsleep(bp, PRIBIO, "biowait", 0); 1336 splx(s); 1337 if (bp->b_flags & B_EINTR) { 1338 bp->b_flags &= ~B_EINTR; 1339 return (EINTR); 1340 } 1341 if (bp->b_flags & B_ERROR) { 1342 return (bp->b_error ? bp->b_error : EIO); 1343 } else { 1344 return (0); 1345 } 1346} 1347 1348/* 1349 * Finish I/O on a buffer, calling an optional function. 1350 * This is usually called from interrupt level, so process blocking 1351 * is not *a good idea*. 1352 */ 1353void 1354biodone(register struct buf * bp) 1355{ 1356 int s; 1357 1358 s = splbio(); 1359 if (!(bp->b_flags & B_BUSY)) 1360 panic("biodone: buffer not busy"); 1361 1362 if (bp->b_flags & B_DONE) { 1363 splx(s); 1364 printf("biodone: buffer already done\n"); 1365 return; 1366 } 1367 bp->b_flags |= B_DONE; 1368 1369 if ((bp->b_flags & B_READ) == 0) { 1370 vwakeup(bp); 1371 } 1372#ifdef BOUNCE_BUFFERS 1373 if (bp->b_flags & B_BOUNCE) 1374 vm_bounce_free(bp); 1375#endif 1376 1377 /* call optional completion function if requested */ 1378 if (bp->b_flags & B_CALL) { 1379 bp->b_flags &= ~B_CALL; 1380 (*bp->b_iodone) (bp); 1381 splx(s); 1382 return; 1383 } 1384 if (bp->b_flags & B_VMIO) { 1385 int i, resid; 1386 vm_ooffset_t foff; 1387 vm_page_t m; 1388 vm_object_t obj; 1389 int iosize; 1390 struct vnode *vp = bp->b_vp; 1391 1392 if (vp->v_type == VBLK) 1393 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1394 else 1395 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1396 obj = vp->v_object; 1397 if (!obj) { 1398 panic("biodone: no object"); 1399 } 1400#if defined(VFS_BIO_DEBUG) 1401 if (obj->paging_in_progress < bp->b_npages) { 1402 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1403 obj->paging_in_progress, bp->b_npages); 1404 } 1405#endif 1406 iosize = bp->b_bufsize; 1407 for (i = 0; i < bp->b_npages; i++) { 1408 int bogusflag = 0; 1409 m = bp->b_pages[i]; 1410 if (m == bogus_page) { 1411 bogusflag = 1; 1412 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1413 if (!m) { 1414#if defined(VFS_BIO_DEBUG) 1415 printf("biodone: page disappeared\n"); 1416#endif 1417 --obj->paging_in_progress; 1418 continue; 1419 } 1420 bp->b_pages[i] = m; 1421 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1422 } 1423#if defined(VFS_BIO_DEBUG) 1424 if (OFF_TO_IDX(foff) != m->pindex) { 1425 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1426 } 1427#endif 1428 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1429 if (resid > iosize) 1430 resid = iosize; 1431 /* 1432 * In the write case, the valid and clean bits are 1433 * already changed correctly, so we only need to do this 1434 * here in the read case. 1435 */ 1436 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1437 vm_page_set_validclean(m, 1438 (vm_offset_t) (foff & PAGE_MASK), resid); 1439 } 1440 1441 /* 1442 * when debugging new filesystems or buffer I/O methods, this 1443 * is the most common error that pops up. if you see this, you 1444 * have not set the page busy flag correctly!!! 1445 */ 1446 if (m->busy == 0) { 1447 printf("biodone: page busy < 0, " 1448 "pindex: %d, foff: 0x(%x,%x), " 1449 "resid: %d, index: %d\n", 1450 (int) m->pindex, (int)(foff >> 32), 1451 (int) foff & 0xffffffff, resid, i); 1452 if (vp->v_type != VBLK) 1453 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1454 bp->b_vp->v_mount->mnt_stat.f_iosize, 1455 (int) bp->b_lblkno, 1456 bp->b_flags, bp->b_npages); 1457 else 1458 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1459 (int) bp->b_lblkno, 1460 bp->b_flags, bp->b_npages); 1461 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1462 m->valid, m->dirty, m->wire_count); 1463 panic("biodone: page busy < 0\n"); 1464 } 1465 --m->busy; 1466 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1467 m->flags &= ~PG_WANTED; 1468 wakeup(m); 1469 } 1470 --obj->paging_in_progress; 1471 foff += resid; 1472 iosize -= resid; 1473 } 1474 if (obj && obj->paging_in_progress == 0 && 1475 (obj->flags & OBJ_PIPWNT)) { 1476 obj->flags &= ~OBJ_PIPWNT; 1477 wakeup(obj); 1478 } 1479 } 1480 /* 1481 * For asynchronous completions, release the buffer now. The brelse 1482 * checks for B_WANTED and will do the wakeup there if necessary - so 1483 * no need to do a wakeup here in the async case. 1484 */ 1485 1486 if (bp->b_flags & B_ASYNC) { 1487 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1488 brelse(bp); 1489 else 1490 bqrelse(bp); 1491 } else { 1492 wakeup(bp); 1493 } 1494 splx(s); 1495} 1496 1497int 1498count_lock_queue() 1499{ 1500 int count; 1501 struct buf *bp; 1502 1503 count = 0; 1504 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1505 bp != NULL; 1506 bp = TAILQ_NEXT(bp, b_freelist)) 1507 count++; 1508 return (count); 1509} 1510 1511int vfs_update_interval = 30; 1512 1513static void 1514vfs_update() 1515{ 1516 (void) spl0(); /* XXX redundant? wrong place? */ 1517 while (1) { 1518 tsleep(&vfs_update_wakeup, PUSER, "update", 1519 hz * vfs_update_interval); 1520 vfs_update_wakeup = 0; 1521 sync(curproc, NULL, NULL); 1522 } 1523} 1524 1525static int 1526sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1527{ 1528 int error = sysctl_handle_int(oidp, 1529 oidp->oid_arg1, oidp->oid_arg2, req); 1530 if (!error) 1531 wakeup(&vfs_update_wakeup); 1532 return error; 1533} 1534 1535SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1536 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1537 1538 1539/* 1540 * This routine is called in lieu of iodone in the case of 1541 * incomplete I/O. This keeps the busy status for pages 1542 * consistant. 1543 */ 1544void 1545vfs_unbusy_pages(struct buf * bp) 1546{ 1547 int i; 1548 1549 if (bp->b_flags & B_VMIO) { 1550 struct vnode *vp = bp->b_vp; 1551 vm_object_t obj = vp->v_object; 1552 vm_ooffset_t foff; 1553 1554 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1555 1556 for (i = 0; i < bp->b_npages; i++) { 1557 vm_page_t m = bp->b_pages[i]; 1558 1559 if (m == bogus_page) { 1560 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1561 if (!m) { 1562 panic("vfs_unbusy_pages: page missing\n"); 1563 } 1564 bp->b_pages[i] = m; 1565 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1566 } 1567 --obj->paging_in_progress; 1568 --m->busy; 1569 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1570 m->flags &= ~PG_WANTED; 1571 wakeup(m); 1572 } 1573 } 1574 if (obj->paging_in_progress == 0 && 1575 (obj->flags & OBJ_PIPWNT)) { 1576 obj->flags &= ~OBJ_PIPWNT; 1577 wakeup(obj); 1578 } 1579 } 1580} 1581 1582/* 1583 * This routine is called before a device strategy routine. 1584 * It is used to tell the VM system that paging I/O is in 1585 * progress, and treat the pages associated with the buffer 1586 * almost as being PG_BUSY. Also the object paging_in_progress 1587 * flag is handled to make sure that the object doesn't become 1588 * inconsistant. 1589 */ 1590void 1591vfs_busy_pages(struct buf * bp, int clear_modify) 1592{ 1593 int i; 1594 1595 if (bp->b_flags & B_VMIO) { 1596 vm_object_t obj = bp->b_vp->v_object; 1597 vm_ooffset_t foff; 1598 int iocount = bp->b_bufsize; 1599 1600 if (bp->b_vp->v_type == VBLK) 1601 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1602 else 1603 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1604 vfs_setdirty(bp); 1605 for (i = 0; i < bp->b_npages; i++) { 1606 vm_page_t m = bp->b_pages[i]; 1607 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1608 1609 if (resid > iocount) 1610 resid = iocount; 1611 if ((bp->b_flags & B_CLUSTER) == 0) { 1612 obj->paging_in_progress++; 1613 m->busy++; 1614 } 1615 if (clear_modify) { 1616 vm_page_protect(m, VM_PROT_READ); 1617 vm_page_set_validclean(m, 1618 (vm_offset_t) (foff & PAGE_MASK), resid); 1619 } else if (bp->b_bcount >= PAGE_SIZE) { 1620 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1621 bp->b_pages[i] = bogus_page; 1622 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1623 } 1624 } 1625 foff += resid; 1626 iocount -= resid; 1627 } 1628 } 1629} 1630 1631/* 1632 * Tell the VM system that the pages associated with this buffer 1633 * are clean. This is used for delayed writes where the data is 1634 * going to go to disk eventually without additional VM intevention. 1635 */ 1636void 1637vfs_clean_pages(struct buf * bp) 1638{ 1639 int i; 1640 1641 if (bp->b_flags & B_VMIO) { 1642 vm_ooffset_t foff; 1643 int iocount = bp->b_bufsize; 1644 1645 if (bp->b_vp->v_type == VBLK) 1646 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1647 else 1648 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1649 1650 for (i = 0; i < bp->b_npages; i++) { 1651 vm_page_t m = bp->b_pages[i]; 1652 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1653 1654 if (resid > iocount) 1655 resid = iocount; 1656 if (resid > 0) { 1657 vm_page_set_validclean(m, 1658 ((vm_offset_t) foff & PAGE_MASK), resid); 1659 } 1660 foff += resid; 1661 iocount -= resid; 1662 } 1663 } 1664} 1665 1666void 1667vfs_bio_clrbuf(struct buf *bp) { 1668 int i; 1669 if( bp->b_flags & B_VMIO) { 1670 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1671 int mask; 1672 mask = 0; 1673 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1674 mask |= (1 << (i/DEV_BSIZE)); 1675 if( bp->b_pages[0]->valid != mask) { 1676 bzero(bp->b_data, bp->b_bufsize); 1677 } 1678 bp->b_pages[0]->valid = mask; 1679 bp->b_resid = 0; 1680 return; 1681 } 1682 for(i=0;i<bp->b_npages;i++) { 1683 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1684 continue; 1685 if( bp->b_pages[i]->valid == 0) { 1686 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1687 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1688 } 1689 } else { 1690 int j; 1691 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1692 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1693 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1694 } 1695 } 1696 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1697 } 1698 bp->b_resid = 0; 1699 } else { 1700 clrbuf(bp); 1701 } 1702} 1703 1704/* 1705 * vm_hold_load_pages and vm_hold_unload pages get pages into 1706 * a buffers address space. The pages are anonymous and are 1707 * not associated with a file object. 1708 */ 1709void 1710vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1711{ 1712 vm_offset_t pg; 1713 vm_page_t p; 1714 int index; 1715 1716 to = round_page(to); 1717 from = round_page(from); 1718 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1719 1720 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1721 1722tryagain: 1723 1724 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1725 VM_ALLOC_NORMAL); 1726 if (!p) { 1727 VM_WAIT; 1728 goto tryagain; 1729 } 1730 vm_page_wire(p); 1731 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1732 bp->b_pages[index] = p; 1733 PAGE_WAKEUP(p); 1734 } 1735 bp->b_npages = to >> PAGE_SHIFT; 1736} 1737 1738void 1739vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1740{ 1741 vm_offset_t pg; 1742 vm_page_t p; 1743 int index; 1744 1745 from = round_page(from); 1746 to = round_page(to); 1747 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1748 1749 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1750 p = bp->b_pages[index]; 1751 if (p && (index < bp->b_npages)) { 1752 if (p->busy) { 1753 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1754 bp->b_blkno, bp->b_lblkno); 1755 } 1756 bp->b_pages[index] = NULL; 1757 pmap_kremove(pg); 1758 vm_page_unwire(p); 1759 vm_page_free(p); 1760 } 1761 } 1762 bp->b_npages = from >> PAGE_SHIFT; 1763} 1764