vfs_bio.c revision 13292
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.81 1996/01/05 20:12:33 wollman Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81 82int needsbuffer; 83 84/* 85 * Internal update daemon, process 3 86 * The variable vfs_update_wakeup allows for internal syncs. 87 */ 88int vfs_update_wakeup; 89 90 91/* 92 * buffers base kva 93 */ 94caddr_t buffers_kva; 95 96/* 97 * bogus page -- for I/O to/from partially complete buffers 98 * this is a temporary solution to the problem, but it is not 99 * really that bad. it would be better to split the buffer 100 * for input in the case of buffers partially already in memory, 101 * but the code is intricate enough already. 102 */ 103vm_page_t bogus_page; 104static vm_offset_t bogus_offset; 105 106static int bufspace, maxbufspace; 107 108static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 109static struct bqueues bufqueues[BUFFER_QUEUES]; 110 111#define BUF_MAXUSE 8 112 113/* 114 * Initialize buffer headers and related structures. 115 */ 116void 117bufinit() 118{ 119 struct buf *bp; 120 int i; 121 122 TAILQ_INIT(&bswlist); 123 LIST_INIT(&invalhash); 124 125 /* first, make a null hash table */ 126 for (i = 0; i < BUFHSZ; i++) 127 LIST_INIT(&bufhashtbl[i]); 128 129 /* next, make a null set of free lists */ 130 for (i = 0; i < BUFFER_QUEUES; i++) 131 TAILQ_INIT(&bufqueues[i]); 132 133 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 134 /* finally, initialize each buffer header and stick on empty q */ 135 for (i = 0; i < nbuf; i++) { 136 bp = &buf[i]; 137 bzero(bp, sizeof *bp); 138 bp->b_flags = B_INVAL; /* we're just an empty header */ 139 bp->b_dev = NODEV; 140 bp->b_rcred = NOCRED; 141 bp->b_wcred = NOCRED; 142 bp->b_qindex = QUEUE_EMPTY; 143 bp->b_vnbufs.le_next = NOLIST; 144 bp->b_data = buffers_kva + i * MAXBSIZE; 145 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 146 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 147 } 148/* 149 * maxbufspace is currently calculated to support all filesystem blocks 150 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 151 * cache is still the same as it would be for 8K filesystems. This 152 * keeps the size of the buffer cache "in check" for big block filesystems. 153 */ 154 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 155 156 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 157 bogus_page = vm_page_alloc(kernel_object, 158 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 159 VM_ALLOC_NORMAL); 160 161} 162 163/* 164 * remove the buffer from the appropriate free list 165 */ 166void 167bremfree(struct buf * bp) 168{ 169 int s = splbio(); 170 171 if (bp->b_qindex != QUEUE_NONE) { 172 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 173 bp->b_qindex = QUEUE_NONE; 174 } else { 175 panic("bremfree: removing a buffer when not on a queue"); 176 } 177 splx(s); 178} 179 180/* 181 * Get a buffer with the specified data. Look in the cache first. 182 */ 183int 184bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 185 struct buf ** bpp) 186{ 187 struct buf *bp; 188 189 bp = getblk(vp, blkno, size, 0, 0); 190 *bpp = bp; 191 192 /* if not found in cache, do some I/O */ 193 if ((bp->b_flags & B_CACHE) == 0) { 194 if (curproc != NULL) 195 curproc->p_stats->p_ru.ru_inblock++; 196 bp->b_flags |= B_READ; 197 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 198 if (bp->b_rcred == NOCRED) { 199 if (cred != NOCRED) 200 crhold(cred); 201 bp->b_rcred = cred; 202 } 203 vfs_busy_pages(bp, 0); 204 VOP_STRATEGY(bp); 205 return (biowait(bp)); 206 } 207 return (0); 208} 209 210/* 211 * Operates like bread, but also starts asynchronous I/O on 212 * read-ahead blocks. 213 */ 214int 215breadn(struct vnode * vp, daddr_t blkno, int size, 216 daddr_t * rablkno, int *rabsize, 217 int cnt, struct ucred * cred, struct buf ** bpp) 218{ 219 struct buf *bp, *rabp; 220 int i; 221 int rv = 0, readwait = 0; 222 223 *bpp = bp = getblk(vp, blkno, size, 0, 0); 224 225 /* if not found in cache, do some I/O */ 226 if ((bp->b_flags & B_CACHE) == 0) { 227 if (curproc != NULL) 228 curproc->p_stats->p_ru.ru_inblock++; 229 bp->b_flags |= B_READ; 230 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 231 if (bp->b_rcred == NOCRED) { 232 if (cred != NOCRED) 233 crhold(cred); 234 bp->b_rcred = cred; 235 } 236 vfs_busy_pages(bp, 0); 237 VOP_STRATEGY(bp); 238 ++readwait; 239 } 240 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 241 if (inmem(vp, *rablkno)) 242 continue; 243 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 244 245 if ((rabp->b_flags & B_CACHE) == 0) { 246 if (curproc != NULL) 247 curproc->p_stats->p_ru.ru_inblock++; 248 rabp->b_flags |= B_READ | B_ASYNC; 249 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 250 if (rabp->b_rcred == NOCRED) { 251 if (cred != NOCRED) 252 crhold(cred); 253 rabp->b_rcred = cred; 254 } 255 vfs_busy_pages(rabp, 0); 256 VOP_STRATEGY(rabp); 257 } else { 258 brelse(rabp); 259 } 260 } 261 262 if (readwait) { 263 rv = biowait(bp); 264 } 265 return (rv); 266} 267 268/* 269 * Write, release buffer on completion. (Done by iodone 270 * if async.) 271 */ 272int 273bwrite(struct buf * bp) 274{ 275 int oldflags = bp->b_flags; 276 277 if (bp->b_flags & B_INVAL) { 278 brelse(bp); 279 return (0); 280 } 281 if (!(bp->b_flags & B_BUSY)) 282 panic("bwrite: buffer is not busy???"); 283 284 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 285 bp->b_flags |= B_WRITEINPROG; 286 287 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 288 reassignbuf(bp, bp->b_vp); 289 } 290 291 bp->b_vp->v_numoutput++; 292 vfs_busy_pages(bp, 1); 293 if (curproc != NULL) 294 curproc->p_stats->p_ru.ru_oublock++; 295 VOP_STRATEGY(bp); 296 297 if ((oldflags & B_ASYNC) == 0) { 298 int rtval = biowait(bp); 299 300 if (oldflags & B_DELWRI) { 301 reassignbuf(bp, bp->b_vp); 302 } 303 brelse(bp); 304 return (rtval); 305 } 306 return (0); 307} 308 309int 310vn_bwrite(ap) 311 struct vop_bwrite_args *ap; 312{ 313 return (bwrite(ap->a_bp)); 314} 315 316/* 317 * Delayed write. (Buffer is marked dirty). 318 */ 319void 320bdwrite(struct buf * bp) 321{ 322 323 if ((bp->b_flags & B_BUSY) == 0) { 324 panic("bdwrite: buffer is not busy"); 325 } 326 if (bp->b_flags & B_INVAL) { 327 brelse(bp); 328 return; 329 } 330 if (bp->b_flags & B_TAPE) { 331 bawrite(bp); 332 return; 333 } 334 bp->b_flags &= ~(B_READ|B_RELBUF); 335 if ((bp->b_flags & B_DELWRI) == 0) { 336 bp->b_flags |= B_DONE | B_DELWRI; 337 reassignbuf(bp, bp->b_vp); 338 } 339 340 /* 341 * This bmap keeps the system from needing to do the bmap later, 342 * perhaps when the system is attempting to do a sync. Since it 343 * is likely that the indirect block -- or whatever other datastructure 344 * that the filesystem needs is still in memory now, it is a good 345 * thing to do this. Note also, that if the pageout daemon is 346 * requesting a sync -- there might not be enough memory to do 347 * the bmap then... So, this is important to do. 348 */ 349 if( bp->b_lblkno == bp->b_blkno) { 350 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 351 } 352 353 /* 354 * Set the *dirty* buffer range based upon the VM system dirty pages. 355 */ 356 vfs_setdirty(bp); 357 358 /* 359 * We need to do this here to satisfy the vnode_pager and the 360 * pageout daemon, so that it thinks that the pages have been 361 * "cleaned". Note that since the pages are in a delayed write 362 * buffer -- the VFS layer "will" see that the pages get written 363 * out on the next sync, or perhaps the cluster will be completed. 364 */ 365 vfs_clean_pages(bp); 366 brelse(bp); 367 return; 368} 369 370/* 371 * Asynchronous write. 372 * Start output on a buffer, but do not wait for it to complete. 373 * The buffer is released when the output completes. 374 */ 375void 376bawrite(struct buf * bp) 377{ 378 bp->b_flags |= B_ASYNC; 379 (void) VOP_BWRITE(bp); 380} 381 382/* 383 * Release a buffer. 384 */ 385void 386brelse(struct buf * bp) 387{ 388 int s; 389 390 if (bp->b_flags & B_CLUSTER) { 391 relpbuf(bp); 392 return; 393 } 394 /* anyone need a "free" block? */ 395 s = splbio(); 396 397 if (needsbuffer) { 398 needsbuffer = 0; 399 wakeup(&needsbuffer); 400 } 401 402 /* anyone need this block? */ 403 if (bp->b_flags & B_WANTED) { 404 bp->b_flags &= ~(B_WANTED | B_AGE); 405 wakeup(bp); 406 } 407 408 if (bp->b_flags & B_LOCKED) 409 bp->b_flags &= ~B_ERROR; 410 411 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 412 (bp->b_bufsize <= 0)) { 413 bp->b_flags |= B_INVAL; 414 bp->b_flags &= ~(B_DELWRI | B_CACHE); 415 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 416 brelvp(bp); 417 } 418 419 /* 420 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 421 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 422 * but the VM object is kept around. The B_NOCACHE flag is used to 423 * invalidate the pages in the VM object. 424 */ 425 if (bp->b_flags & B_VMIO) { 426 vm_ooffset_t foff; 427 vm_object_t obj; 428 int i, resid; 429 vm_page_t m; 430 struct vnode *vp; 431 int iototal = bp->b_bufsize; 432 433 vp = bp->b_vp; 434 if (!vp) 435 panic("brelse: missing vp"); 436 437 if (bp->b_npages) { 438 vm_pindex_t poff; 439 obj = (vm_object_t) vp->v_object; 440 if (vp->v_type == VBLK) 441 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 442 else 443 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 444 poff = OFF_TO_IDX(foff); 445 for (i = 0; i < bp->b_npages; i++) { 446 m = bp->b_pages[i]; 447 if (m == bogus_page) { 448 m = vm_page_lookup(obj, poff + i); 449 if (!m) { 450 panic("brelse: page missing\n"); 451 } 452 bp->b_pages[i] = m; 453 pmap_qenter(trunc_page(bp->b_data), 454 bp->b_pages, bp->b_npages); 455 } 456 resid = IDX_TO_OFF(m->pindex+1) - foff; 457 if (resid > iototal) 458 resid = iototal; 459 if (resid > 0) { 460 /* 461 * Don't invalidate the page if the local machine has already 462 * modified it. This is the lesser of two evils, and should 463 * be fixed. 464 */ 465 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 466 vm_page_test_dirty(m); 467 if (m->dirty == 0) { 468 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 469 if (m->valid == 0) 470 vm_page_protect(m, VM_PROT_NONE); 471 } 472 } 473 } 474 foff += resid; 475 iototal -= resid; 476 } 477 } 478 479 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 480 for(i = 0; i < bp->b_npages; i++) { 481 m = bp->b_pages[i]; 482 --m->bmapped; 483 if (m->bmapped == 0) { 484 if (m->flags & PG_WANTED) { 485 m->flags &= ~PG_WANTED; 486 wakeup(m); 487 } 488 if ((m->busy == 0) && ((m->flags & PG_BUSY) == 0)) { 489 if (m->object->flags & OBJ_MIGHTBEDIRTY) { 490 vm_page_test_dirty(m); 491 } 492 /* 493 * if page isn't valid, no sense in keeping it around 494 */ 495 if (m->valid == 0) { 496 vm_page_protect(m, VM_PROT_NONE); 497 vm_page_free(m); 498 /* 499 * if page isn't dirty and hasn't been referenced by 500 * a process, then cache it 501 */ 502 } else if ((m->dirty & m->valid) == 0 && 503 (m->flags & PG_REFERENCED) == 0 && 504 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 505 vm_page_cache(m); 506 /* 507 * otherwise activate it 508 */ 509 } else if ((m->flags & PG_ACTIVE) == 0) { 510 vm_page_activate(m); 511 m->act_count = 0; 512 } 513 } 514 } 515 } 516 bufspace -= bp->b_bufsize; 517 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 518 bp->b_npages = 0; 519 bp->b_bufsize = 0; 520 bp->b_flags &= ~B_VMIO; 521 if (bp->b_vp) 522 brelvp(bp); 523 } 524 } 525 if (bp->b_qindex != QUEUE_NONE) 526 panic("brelse: free buffer onto another queue???"); 527 528 /* enqueue */ 529 /* buffers with no memory */ 530 if (bp->b_bufsize == 0) { 531 bp->b_qindex = QUEUE_EMPTY; 532 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 533 LIST_REMOVE(bp, b_hash); 534 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 535 bp->b_dev = NODEV; 536 /* buffers with junk contents */ 537 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 538 bp->b_qindex = QUEUE_AGE; 539 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 540 LIST_REMOVE(bp, b_hash); 541 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 542 bp->b_dev = NODEV; 543 /* buffers that are locked */ 544 } else if (bp->b_flags & B_LOCKED) { 545 bp->b_qindex = QUEUE_LOCKED; 546 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 547 /* buffers with stale but valid contents */ 548 } else if (bp->b_flags & B_AGE) { 549 bp->b_qindex = QUEUE_AGE; 550 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 551 /* buffers with valid and quite potentially reuseable contents */ 552 } else { 553 bp->b_qindex = QUEUE_LRU; 554 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 555 } 556 557 /* unlock */ 558 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 559 splx(s); 560} 561 562/* 563 * Check to see if a block is currently memory resident. 564 */ 565__inline struct buf * 566gbincore(struct vnode * vp, daddr_t blkno) 567{ 568 struct buf *bp; 569 struct bufhashhdr *bh; 570 571 bh = BUFHASH(vp, blkno); 572 bp = bh->lh_first; 573 574 /* Search hash chain */ 575 while (bp != NULL) { 576 /* hit */ 577 if (bp->b_vp == vp && bp->b_lblkno == blkno && 578 (bp->b_flags & B_INVAL) == 0) { 579 break; 580 } 581 bp = bp->b_hash.le_next; 582 } 583 return (bp); 584} 585 586/* 587 * this routine implements clustered async writes for 588 * clearing out B_DELWRI buffers... This is much better 589 * than the old way of writing only one buffer at a time. 590 */ 591int 592vfs_bio_awrite(struct buf * bp) 593{ 594 int i; 595 daddr_t lblkno = bp->b_lblkno; 596 struct vnode *vp = bp->b_vp; 597 int s; 598 int ncl; 599 struct buf *bpa; 600 int nwritten; 601 602 s = splbio(); 603 /* 604 * right now we support clustered writing only to regular files 605 */ 606 if ((vp->v_type == VREG) && 607 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 608 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 609 int size; 610 int maxcl; 611 612 size = vp->v_mount->mnt_stat.f_iosize; 613 maxcl = MAXPHYS / size; 614 615 for (i = 1; i < maxcl; i++) { 616 if ((bpa = gbincore(vp, lblkno + i)) && 617 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 618 (B_DELWRI | B_CLUSTEROK)) && 619 (bpa->b_bufsize == size)) { 620 if ((bpa->b_blkno == bpa->b_lblkno) || 621 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 622 break; 623 } else { 624 break; 625 } 626 } 627 ncl = i; 628 /* 629 * this is a possible cluster write 630 */ 631 if (ncl != 1) { 632 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 633 splx(s); 634 return nwritten; 635 } 636 } 637 bremfree(bp); 638 splx(s); 639 /* 640 * default (old) behavior, writing out only one block 641 */ 642 bp->b_flags |= B_BUSY | B_ASYNC; 643 nwritten = bp->b_bufsize; 644 (void) VOP_BWRITE(bp); 645 return nwritten; 646} 647 648 649/* 650 * Find a buffer header which is available for use. 651 */ 652static struct buf * 653getnewbuf(int slpflag, int slptimeo, int doingvmio) 654{ 655 struct buf *bp; 656 int s; 657 int nbyteswritten = 0; 658 659 s = splbio(); 660start: 661 if (bufspace >= maxbufspace) 662 goto trytofreespace; 663 664 /* can we constitute a new buffer? */ 665 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 666 if (bp->b_qindex != QUEUE_EMPTY) 667 panic("getnewbuf: inconsistent EMPTY queue"); 668 bremfree(bp); 669 goto fillbuf; 670 } 671trytofreespace: 672 /* 673 * We keep the file I/O from hogging metadata I/O 674 * This is desirable because file data is cached in the 675 * VM/Buffer cache even if a buffer is freed. 676 */ 677 if ((bp = bufqueues[QUEUE_AGE].tqh_first)) { 678 if (bp->b_qindex != QUEUE_AGE) 679 panic("getnewbuf: inconsistent AGE queue"); 680 } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) { 681 if (bp->b_qindex != QUEUE_LRU) 682 panic("getnewbuf: inconsistent LRU queue"); 683 } 684 if (!bp) { 685 /* wait for a free buffer of any kind */ 686 needsbuffer = 1; 687 tsleep(&needsbuffer, 688 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 689 splx(s); 690 return (0); 691 } 692 693 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 694 --bp->b_usecount; 695 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 696 if (bufqueues[QUEUE_LRU].tqh_first != NULL) { 697 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 698 goto start; 699 } 700 } 701 702 /* if we are a delayed write, convert to an async write */ 703 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 704 nbyteswritten += vfs_bio_awrite(bp); 705 if (!slpflag && !slptimeo) { 706 splx(s); 707 return (0); 708 } 709 goto start; 710 } 711 712 if (bp->b_flags & B_WANTED) { 713 bp->b_flags &= ~B_WANTED; 714 wakeup(bp); 715 } 716 bremfree(bp); 717 718 if (bp->b_flags & B_VMIO) { 719 bp->b_flags |= B_RELBUF | B_BUSY | B_DONE; 720 brelse(bp); 721 bremfree(bp); 722 } 723 724 if (bp->b_vp) 725 brelvp(bp); 726 727fillbuf: 728 /* we are not free, nor do we contain interesting data */ 729 if (bp->b_rcred != NOCRED) { 730 crfree(bp->b_rcred); 731 bp->b_rcred = NOCRED; 732 } 733 if (bp->b_wcred != NOCRED) { 734 crfree(bp->b_wcred); 735 bp->b_wcred = NOCRED; 736 } 737 bp->b_flags |= B_BUSY; 738 LIST_REMOVE(bp, b_hash); 739 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 740 splx(s); 741 if (bp->b_bufsize) { 742 allocbuf(bp, 0); 743 } 744 bp->b_flags = B_BUSY; 745 bp->b_dev = NODEV; 746 bp->b_vp = NULL; 747 bp->b_blkno = bp->b_lblkno = 0; 748 bp->b_iodone = 0; 749 bp->b_error = 0; 750 bp->b_resid = 0; 751 bp->b_bcount = 0; 752 bp->b_npages = 0; 753 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 754 bp->b_dirtyoff = bp->b_dirtyend = 0; 755 bp->b_validoff = bp->b_validend = 0; 756 bp->b_usecount = 2; 757 if (bufspace >= maxbufspace + nbyteswritten) { 758 s = splbio(); 759 bp->b_flags |= B_INVAL; 760 brelse(bp); 761 goto trytofreespace; 762 } 763 return (bp); 764} 765 766/* 767 * Check to see if a block is currently memory resident. 768 */ 769struct buf * 770incore(struct vnode * vp, daddr_t blkno) 771{ 772 struct buf *bp; 773 struct bufhashhdr *bh; 774 775 int s = splbio(); 776 777 bh = BUFHASH(vp, blkno); 778 bp = bh->lh_first; 779 780 /* Search hash chain */ 781 while (bp != NULL) { 782 /* hit */ 783 if (bp->b_vp == vp && bp->b_lblkno == blkno && 784 (bp->b_flags & B_INVAL) == 0) { 785 break; 786 } 787 bp = bp->b_hash.le_next; 788 } 789 splx(s); 790 return (bp); 791} 792 793/* 794 * Returns true if no I/O is needed to access the 795 * associated VM object. This is like incore except 796 * it also hunts around in the VM system for the data. 797 */ 798 799int 800inmem(struct vnode * vp, daddr_t blkno) 801{ 802 vm_object_t obj; 803 vm_offset_t toff, tinc; 804 vm_page_t m; 805 vm_ooffset_t off; 806 807 if (incore(vp, blkno)) 808 return 1; 809 if (vp->v_mount == NULL) 810 return 0; 811 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 812 return 0; 813 814 obj = vp->v_object; 815 tinc = PAGE_SIZE; 816 if (tinc > vp->v_mount->mnt_stat.f_iosize) 817 tinc = vp->v_mount->mnt_stat.f_iosize; 818 off = blkno * vp->v_mount->mnt_stat.f_iosize; 819 820 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 821 822 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 823 if (!m) 824 return 0; 825 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 826 return 0; 827 } 828 return 1; 829} 830 831/* 832 * now we set the dirty range for the buffer -- 833 * for NFS -- if the file is mapped and pages have 834 * been written to, let it know. We want the 835 * entire range of the buffer to be marked dirty if 836 * any of the pages have been written to for consistancy 837 * with the b_validoff, b_validend set in the nfs write 838 * code, and used by the nfs read code. 839 */ 840static void 841vfs_setdirty(struct buf *bp) { 842 int i; 843 vm_object_t object; 844 vm_offset_t boffset, offset; 845 /* 846 * We qualify the scan for modified pages on whether the 847 * object has been flushed yet. The OBJ_WRITEABLE flag 848 * is not cleared simply by protecting pages off. 849 */ 850 if ((bp->b_flags & B_VMIO) && 851 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 852 /* 853 * test the pages to see if they have been modified directly 854 * by users through the VM system. 855 */ 856 for (i = 0; i < bp->b_npages; i++) 857 vm_page_test_dirty(bp->b_pages[i]); 858 859 /* 860 * scan forwards for the first page modified 861 */ 862 for (i = 0; i < bp->b_npages; i++) { 863 if (bp->b_pages[i]->dirty) { 864 break; 865 } 866 } 867 boffset = (i << PAGE_SHIFT); 868 if (boffset < bp->b_dirtyoff) { 869 bp->b_dirtyoff = boffset; 870 } 871 872 /* 873 * scan backwards for the last page modified 874 */ 875 for (i = bp->b_npages - 1; i >= 0; --i) { 876 if (bp->b_pages[i]->dirty) { 877 break; 878 } 879 } 880 boffset = (i + 1); 881 offset = boffset + bp->b_pages[0]->pindex; 882 if (offset >= object->size) 883 boffset = object->size - bp->b_pages[0]->pindex; 884 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 885 bp->b_dirtyend = (boffset << PAGE_SHIFT); 886 } 887} 888 889/* 890 * Get a block given a specified block and offset into a file/device. 891 */ 892struct buf * 893getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 894{ 895 struct buf *bp; 896 int s; 897 struct bufhashhdr *bh; 898 899 s = splbio(); 900loop: 901 if ((bp = gbincore(vp, blkno))) { 902 if (bp->b_flags & B_BUSY) { 903 bp->b_flags |= B_WANTED; 904 if (bp->b_usecount < BUF_MAXUSE) 905 ++bp->b_usecount; 906 if (!tsleep(bp, 907 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 908 goto loop; 909 910 splx(s); 911 return (struct buf *) NULL; 912 } 913 bp->b_flags |= B_BUSY | B_CACHE; 914 bremfree(bp); 915 916 /* 917 * check for size inconsistancies (note that they shouldn't happen 918 * but do when filesystems don't handle the size changes correctly.) 919 * We are conservative on metadata and don't just extend the buffer 920 * but write and re-constitute it. 921 */ 922 923 if (bp->b_bcount != size) { 924 if (bp->b_flags & B_VMIO) { 925 allocbuf(bp, size); 926 } else { 927 bp->b_flags |= B_NOCACHE; 928 VOP_BWRITE(bp); 929 goto loop; 930 } 931 } 932 933 /* 934 * make sure that all pages in the buffer are valid, if they 935 * aren't, clear the cache flag. 936 * ASSUMPTION: 937 * if the buffer is greater than 1 page in size, it is assumed 938 * that the buffer address starts on a page boundary... 939 */ 940 if (bp->b_flags & B_VMIO) { 941 int szleft, i; 942 szleft = size; 943 for (i=0;i<bp->b_npages;i++) { 944 if (szleft > PAGE_SIZE) { 945 if ((bp->b_pages[i]->valid & VM_PAGE_BITS_ALL) != 946 VM_PAGE_BITS_ALL) { 947 bp->b_flags &= ~(B_CACHE|B_DONE); 948 break; 949 } 950 szleft -= PAGE_SIZE; 951 } else { 952 if (!vm_page_is_valid(bp->b_pages[i], 953 (((vm_offset_t) bp->b_data) & PAGE_MASK), 954 szleft)) { 955 bp->b_flags &= ~(B_CACHE|B_DONE); 956 break; 957 } 958 szleft = 0; 959 } 960 } 961 } 962 if (bp->b_usecount < BUF_MAXUSE) 963 ++bp->b_usecount; 964 splx(s); 965 return (bp); 966 } else { 967 vm_object_t obj; 968 int doingvmio; 969 970 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 971 doingvmio = 1; 972 } else { 973 doingvmio = 0; 974 } 975 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 976 if (slpflag || slptimeo) { 977 splx(s); 978 return NULL; 979 } 980 goto loop; 981 } 982 983 /* 984 * This code is used to make sure that a buffer is not 985 * created while the getnewbuf routine is blocked. 986 * Normally the vnode is locked so this isn't a problem. 987 * VBLK type I/O requests, however, don't lock the vnode. 988 */ 989 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 990 bp->b_flags |= B_INVAL; 991 brelse(bp); 992 goto loop; 993 } 994 995 /* 996 * Insert the buffer into the hash, so that it can 997 * be found by incore. 998 */ 999 bp->b_blkno = bp->b_lblkno = blkno; 1000 bgetvp(vp, bp); 1001 LIST_REMOVE(bp, b_hash); 1002 bh = BUFHASH(vp, blkno); 1003 LIST_INSERT_HEAD(bh, bp, b_hash); 1004 1005 if (doingvmio) { 1006 bp->b_flags |= (B_VMIO | B_CACHE); 1007#if defined(VFS_BIO_DEBUG) 1008 if (vp->v_type != VREG) 1009 printf("getblk: vmioing file type %d???\n", vp->v_type); 1010#endif 1011 } else { 1012 bp->b_flags &= ~B_VMIO; 1013 } 1014 splx(s); 1015 1016 allocbuf(bp, size); 1017 return (bp); 1018 } 1019} 1020 1021/* 1022 * Get an empty, disassociated buffer of given size. 1023 */ 1024struct buf * 1025geteblk(int size) 1026{ 1027 struct buf *bp; 1028 1029 while ((bp = getnewbuf(0, 0, 0)) == 0); 1030 allocbuf(bp, size); 1031 bp->b_flags |= B_INVAL; 1032 return (bp); 1033} 1034 1035/* 1036 * This code constitutes the buffer memory from either anonymous system 1037 * memory (in the case of non-VMIO operations) or from an associated 1038 * VM object (in the case of VMIO operations). 1039 * 1040 * Note that this code is tricky, and has many complications to resolve 1041 * deadlock or inconsistant data situations. Tread lightly!!! 1042 * 1043 * Modify the length of a buffer's underlying buffer storage without 1044 * destroying information (unless, of course the buffer is shrinking). 1045 */ 1046int 1047allocbuf(struct buf * bp, int size) 1048{ 1049 1050 int s; 1051 int newbsize, mbsize; 1052 int i; 1053 1054 if (!(bp->b_flags & B_BUSY)) 1055 panic("allocbuf: buffer not busy"); 1056 1057 if ((bp->b_flags & B_VMIO) == 0) { 1058 /* 1059 * Just get anonymous memory from the kernel 1060 */ 1061 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1062 newbsize = round_page(size); 1063 1064 if (newbsize < bp->b_bufsize) { 1065 vm_hold_free_pages( 1066 bp, 1067 (vm_offset_t) bp->b_data + newbsize, 1068 (vm_offset_t) bp->b_data + bp->b_bufsize); 1069 } else if (newbsize > bp->b_bufsize) { 1070 vm_hold_load_pages( 1071 bp, 1072 (vm_offset_t) bp->b_data + bp->b_bufsize, 1073 (vm_offset_t) bp->b_data + newbsize); 1074 } 1075 } else { 1076 vm_page_t m; 1077 int desiredpages; 1078 1079 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1080 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1081 1082 if (newbsize < bp->b_bufsize) { 1083 if (desiredpages < bp->b_npages) { 1084 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1085 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1086 for (i = desiredpages; i < bp->b_npages; i++) { 1087 m = bp->b_pages[i]; 1088 s = splhigh(); 1089 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1090 m->flags |= PG_WANTED; 1091 tsleep(m, PVM, "biodep", 0); 1092 } 1093 splx(s); 1094 1095 if (m->bmapped == 0) { 1096 printf("allocbuf: bmapped is zero for page %d\n", i); 1097 panic("allocbuf: error"); 1098 } 1099 --m->bmapped; 1100 if (m->bmapped == 0) { 1101 vm_page_protect(m, VM_PROT_NONE); 1102 vm_page_free(m); 1103 } 1104 bp->b_pages[i] = NULL; 1105 } 1106 bp->b_npages = desiredpages; 1107 } 1108 } else if (newbsize > bp->b_bufsize) { 1109 vm_object_t obj; 1110 vm_offset_t tinc, toff; 1111 vm_ooffset_t off; 1112 vm_pindex_t objoff; 1113 int pageindex, curbpnpages; 1114 struct vnode *vp; 1115 int bsize; 1116 1117 vp = bp->b_vp; 1118 1119 if (vp->v_type == VBLK) 1120 bsize = DEV_BSIZE; 1121 else 1122 bsize = vp->v_mount->mnt_stat.f_iosize; 1123 1124 if (bp->b_npages < desiredpages) { 1125 obj = vp->v_object; 1126 tinc = PAGE_SIZE; 1127 if (tinc > bsize) 1128 tinc = bsize; 1129 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1130 doretry: 1131 curbpnpages = bp->b_npages; 1132 bp->b_flags |= B_CACHE; 1133 for (toff = 0; toff < newbsize; toff += tinc) { 1134 int bytesinpage; 1135 1136 pageindex = toff >> PAGE_SHIFT; 1137 objoff = OFF_TO_IDX(off + toff); 1138 if (pageindex < curbpnpages) { 1139 1140 m = bp->b_pages[pageindex]; 1141 if (m->pindex != objoff) 1142 panic("allocbuf: page changed offset??!!!?"); 1143 bytesinpage = tinc; 1144 if (tinc > (newbsize - toff)) 1145 bytesinpage = newbsize - toff; 1146 if (!vm_page_is_valid(m, 1147 (vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)), 1148 bytesinpage)) { 1149 bp->b_flags &= ~B_CACHE; 1150 } 1151 if ((m->flags & PG_ACTIVE) == 0) { 1152 vm_page_activate(m); 1153 m->act_count = 0; 1154 } 1155 continue; 1156 } 1157 m = vm_page_lookup(obj, objoff); 1158 if (!m) { 1159 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1160 if (!m) { 1161 int j; 1162 1163 for (j = bp->b_npages; j < pageindex; j++) { 1164 PAGE_WAKEUP(bp->b_pages[j]); 1165 } 1166 VM_WAIT; 1167 goto doretry; 1168 } 1169 vm_page_activate(m); 1170 m->act_count = 0; 1171 m->valid = 0; 1172 bp->b_flags &= ~B_CACHE; 1173 } else if (m->flags & PG_BUSY) { 1174 int j; 1175 1176 for (j = bp->b_npages; j < pageindex; j++) { 1177 PAGE_WAKEUP(bp->b_pages[j]); 1178 } 1179 1180 s = splbio(); 1181 m->flags |= PG_WANTED; 1182 tsleep(m, PVM, "pgtblk", 0); 1183 splx(s); 1184 1185 goto doretry; 1186 } else { 1187 if ((curproc != pageproc) && 1188 (m->flags & PG_CACHE) && 1189 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 1190 pagedaemon_wakeup(); 1191 } 1192 bytesinpage = tinc; 1193 if (tinc > (newbsize - toff)) 1194 bytesinpage = newbsize - toff; 1195 if (!vm_page_is_valid(m, 1196 (vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)), 1197 bytesinpage)) { 1198 bp->b_flags &= ~B_CACHE; 1199 } 1200 if ((m->flags & PG_ACTIVE) == 0) { 1201 vm_page_activate(m); 1202 m->act_count = 0; 1203 } 1204 m->flags |= PG_BUSY; 1205 } 1206 bp->b_pages[pageindex] = m; 1207 curbpnpages = pageindex + 1; 1208 } 1209 for (i = bp->b_npages; i < curbpnpages; i++) { 1210 m = bp->b_pages[i]; 1211 m->bmapped++; 1212 PAGE_WAKEUP(m); 1213 } 1214 bp->b_npages = curbpnpages; 1215 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1216 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1217 bp->b_data += off & (PAGE_SIZE - 1); 1218 } 1219 } 1220 } 1221 bufspace += (newbsize - bp->b_bufsize); 1222 bp->b_bufsize = newbsize; 1223 bp->b_bcount = size; 1224 return 1; 1225} 1226 1227/* 1228 * Wait for buffer I/O completion, returning error status. 1229 */ 1230int 1231biowait(register struct buf * bp) 1232{ 1233 int s; 1234 1235 s = splbio(); 1236 while ((bp->b_flags & B_DONE) == 0) 1237 tsleep(bp, PRIBIO, "biowait", 0); 1238 splx(s); 1239 if (bp->b_flags & B_EINTR) { 1240 bp->b_flags &= ~B_EINTR; 1241 return (EINTR); 1242 } 1243 if (bp->b_flags & B_ERROR) { 1244 return (bp->b_error ? bp->b_error : EIO); 1245 } else { 1246 return (0); 1247 } 1248} 1249 1250/* 1251 * Finish I/O on a buffer, calling an optional function. 1252 * This is usually called from interrupt level, so process blocking 1253 * is not *a good idea*. 1254 */ 1255void 1256biodone(register struct buf * bp) 1257{ 1258 int s; 1259 1260 s = splbio(); 1261 if (!(bp->b_flags & B_BUSY)) 1262 panic("biodone: buffer not busy"); 1263 1264 if (bp->b_flags & B_DONE) { 1265 splx(s); 1266 printf("biodone: buffer already done\n"); 1267 return; 1268 } 1269 bp->b_flags |= B_DONE; 1270 1271 if ((bp->b_flags & B_READ) == 0) { 1272 vwakeup(bp); 1273 } 1274#ifdef BOUNCE_BUFFERS 1275 if (bp->b_flags & B_BOUNCE) 1276 vm_bounce_free(bp); 1277#endif 1278 1279 /* call optional completion function if requested */ 1280 if (bp->b_flags & B_CALL) { 1281 bp->b_flags &= ~B_CALL; 1282 (*bp->b_iodone) (bp); 1283 splx(s); 1284 return; 1285 } 1286 if (bp->b_flags & B_VMIO) { 1287 int i, resid; 1288 vm_ooffset_t foff; 1289 vm_page_t m; 1290 vm_object_t obj; 1291 int iosize; 1292 struct vnode *vp = bp->b_vp; 1293 1294 if (vp->v_type == VBLK) 1295 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1296 else 1297 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1298 obj = vp->v_object; 1299 if (!obj) { 1300 panic("biodone: no object"); 1301 } 1302#if defined(VFS_BIO_DEBUG) 1303 if (obj->paging_in_progress < bp->b_npages) { 1304 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1305 obj->paging_in_progress, bp->b_npages); 1306 } 1307#endif 1308 iosize = bp->b_bufsize; 1309 for (i = 0; i < bp->b_npages; i++) { 1310 int bogusflag = 0; 1311 m = bp->b_pages[i]; 1312 if (m == bogus_page) { 1313 bogusflag = 1; 1314 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1315 if (!m) { 1316#if defined(VFS_BIO_DEBUG) 1317 printf("biodone: page disappeared\n"); 1318#endif 1319 --obj->paging_in_progress; 1320 continue; 1321 } 1322 bp->b_pages[i] = m; 1323 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1324 } 1325#if defined(VFS_BIO_DEBUG) 1326 if (OFF_TO_IDX(foff) != m->pindex) { 1327 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1328 } 1329#endif 1330 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1331 if (resid > iosize) 1332 resid = iosize; 1333 /* 1334 * In the write case, the valid and clean bits are 1335 * already changed correctly, so we only need to do this 1336 * here in the read case. 1337 */ 1338 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1339 vm_page_set_validclean(m, 1340 (vm_offset_t) (foff & (PAGE_SIZE-1)), resid); 1341 } 1342 1343 /* 1344 * when debugging new filesystems or buffer I/O methods, this 1345 * is the most common error that pops up. if you see this, you 1346 * have not set the page busy flag correctly!!! 1347 */ 1348 if (m->busy == 0) { 1349 printf("biodone: page busy < 0, " 1350 "pindex: %d, foff: 0x(%x,%x), " 1351 "resid: %d, index: %d\n", 1352 (int) m->pindex, (int)(foff >> 32), 1353 (int) foff & 0xffffffff, resid, i); 1354 if (vp->v_type != VBLK) 1355 printf(" iosize: %d, lblkno: %d, flags: 0x%lx, npages: %d\n", 1356 bp->b_vp->v_mount->mnt_stat.f_iosize, 1357 (int) bp->b_lblkno, 1358 bp->b_flags, bp->b_npages); 1359 else 1360 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1361 (int) bp->b_lblkno, 1362 bp->b_flags, bp->b_npages); 1363 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1364 m->valid, m->dirty, m->bmapped); 1365 panic("biodone: page busy < 0\n"); 1366 } 1367 --m->busy; 1368 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1369 m->flags &= ~PG_WANTED; 1370 wakeup(m); 1371 } 1372 --obj->paging_in_progress; 1373 foff += resid; 1374 iosize -= resid; 1375 } 1376 if (obj && obj->paging_in_progress == 0 && 1377 (obj->flags & OBJ_PIPWNT)) { 1378 obj->flags &= ~OBJ_PIPWNT; 1379 wakeup(obj); 1380 } 1381 } 1382 /* 1383 * For asynchronous completions, release the buffer now. The brelse 1384 * checks for B_WANTED and will do the wakeup there if necessary - so 1385 * no need to do a wakeup here in the async case. 1386 */ 1387 1388 if (bp->b_flags & B_ASYNC) { 1389 brelse(bp); 1390 } else { 1391 wakeup(bp); 1392 } 1393 splx(s); 1394} 1395 1396int 1397count_lock_queue() 1398{ 1399 int count; 1400 struct buf *bp; 1401 1402 count = 0; 1403 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1404 bp != NULL; 1405 bp = bp->b_freelist.tqe_next) 1406 count++; 1407 return (count); 1408} 1409 1410int vfs_update_interval = 30; 1411 1412static void 1413vfs_update() 1414{ 1415 (void) spl0(); /* XXX redundant? wrong place? */ 1416 while (1) { 1417 tsleep(&vfs_update_wakeup, PUSER, "update", 1418 hz * vfs_update_interval); 1419 vfs_update_wakeup = 0; 1420 sync(curproc, NULL, NULL); 1421 } 1422} 1423 1424static int 1425sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1426{ 1427 int error = sysctl_handle_int(oidp, 1428 oidp->oid_arg1, oidp->oid_arg2, req); 1429 if (!error) 1430 wakeup(&vfs_update_wakeup); 1431 return error; 1432} 1433 1434SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1435 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1436 1437 1438/* 1439 * This routine is called in lieu of iodone in the case of 1440 * incomplete I/O. This keeps the busy status for pages 1441 * consistant. 1442 */ 1443void 1444vfs_unbusy_pages(struct buf * bp) 1445{ 1446 int i; 1447 1448 if (bp->b_flags & B_VMIO) { 1449 struct vnode *vp = bp->b_vp; 1450 vm_object_t obj = vp->v_object; 1451 vm_ooffset_t foff; 1452 1453 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1454 1455 for (i = 0; i < bp->b_npages; i++) { 1456 vm_page_t m = bp->b_pages[i]; 1457 1458 if (m == bogus_page) { 1459 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1460 if (!m) { 1461 panic("vfs_unbusy_pages: page missing\n"); 1462 } 1463 bp->b_pages[i] = m; 1464 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1465 } 1466 --obj->paging_in_progress; 1467 --m->busy; 1468 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1469 m->flags &= ~PG_WANTED; 1470 wakeup(m); 1471 } 1472 } 1473 if (obj->paging_in_progress == 0 && 1474 (obj->flags & OBJ_PIPWNT)) { 1475 obj->flags &= ~OBJ_PIPWNT; 1476 wakeup(obj); 1477 } 1478 } 1479} 1480 1481/* 1482 * This routine is called before a device strategy routine. 1483 * It is used to tell the VM system that paging I/O is in 1484 * progress, and treat the pages associated with the buffer 1485 * almost as being PG_BUSY. Also the object paging_in_progress 1486 * flag is handled to make sure that the object doesn't become 1487 * inconsistant. 1488 */ 1489void 1490vfs_busy_pages(struct buf * bp, int clear_modify) 1491{ 1492 int i; 1493 1494 if (bp->b_flags & B_VMIO) { 1495 vm_object_t obj = bp->b_vp->v_object; 1496 vm_ooffset_t foff; 1497 int iocount = bp->b_bufsize; 1498 1499 if (bp->b_vp->v_type == VBLK) 1500 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1501 else 1502 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1503 vfs_setdirty(bp); 1504 for (i = 0; i < bp->b_npages; i++) { 1505 vm_page_t m = bp->b_pages[i]; 1506 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1507 1508 if (resid > iocount) 1509 resid = iocount; 1510 if ((bp->b_flags & B_CLUSTER) == 0) { 1511 obj->paging_in_progress++; 1512 m->busy++; 1513 } 1514 if (clear_modify) { 1515 vm_page_protect(m, VM_PROT_READ); 1516 vm_page_set_validclean(m, 1517 (vm_offset_t) (foff & (PAGE_SIZE-1)), resid); 1518 } else if (bp->b_bcount >= PAGE_SIZE) { 1519 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1520 bp->b_pages[i] = bogus_page; 1521 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1522 } 1523 } 1524 foff += resid; 1525 iocount -= resid; 1526 } 1527 } 1528} 1529 1530/* 1531 * Tell the VM system that the pages associated with this buffer 1532 * are clean. This is used for delayed writes where the data is 1533 * going to go to disk eventually without additional VM intevention. 1534 */ 1535void 1536vfs_clean_pages(struct buf * bp) 1537{ 1538 int i; 1539 1540 if (bp->b_flags & B_VMIO) { 1541 vm_ooffset_t foff; 1542 int iocount = bp->b_bufsize; 1543 1544 if (bp->b_vp->v_type == VBLK) 1545 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1546 else 1547 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1548 1549 for (i = 0; i < bp->b_npages; i++) { 1550 vm_page_t m = bp->b_pages[i]; 1551 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1552 1553 if (resid > iocount) 1554 resid = iocount; 1555 if (resid > 0) { 1556 vm_page_set_validclean(m, 1557 ((vm_offset_t) foff & (PAGE_SIZE-1)), resid); 1558 } 1559 foff += resid; 1560 iocount -= resid; 1561 } 1562 } 1563} 1564 1565void 1566vfs_bio_clrbuf(struct buf *bp) { 1567 int i; 1568 if( bp->b_flags & B_VMIO) { 1569 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1570 int mask; 1571 mask = 0; 1572 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1573 mask |= (1 << (i/DEV_BSIZE)); 1574 if( bp->b_pages[0]->valid != mask) { 1575 bzero(bp->b_data, bp->b_bufsize); 1576 } 1577 bp->b_pages[0]->valid = mask; 1578 bp->b_resid = 0; 1579 return; 1580 } 1581 for(i=0;i<bp->b_npages;i++) { 1582 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1583 continue; 1584 if( bp->b_pages[i]->valid == 0) { 1585 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) 1586 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1587 } else { 1588 int j; 1589 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1590 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1591 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1592 } 1593 } 1594 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 1595 } 1596 bp->b_resid = 0; 1597 } else { 1598 clrbuf(bp); 1599 } 1600} 1601 1602/* 1603 * vm_hold_load_pages and vm_hold_unload pages get pages into 1604 * a buffers address space. The pages are anonymous and are 1605 * not associated with a file object. 1606 */ 1607void 1608vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1609{ 1610 vm_offset_t pg; 1611 vm_page_t p; 1612 1613 to = round_page(to); 1614 1615 for (pg = round_page(from); pg < to; pg += PAGE_SIZE) { 1616 1617tryagain: 1618 1619 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1620 VM_ALLOC_NORMAL); 1621 if (!p) { 1622 VM_WAIT; 1623 goto tryagain; 1624 } 1625 vm_page_wire(p); 1626 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1627 bp->b_pages[(pg - trunc_page(bp->b_data)) >> PAGE_SHIFT] = p; 1628 PAGE_WAKEUP(p); 1629 bp->b_npages++; 1630 } 1631} 1632 1633void 1634vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1635{ 1636 vm_offset_t pg; 1637 vm_page_t p; 1638 int index; 1639 1640 from = round_page(from); 1641 to = round_page(to); 1642 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1643 1644 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1645 p = bp->b_pages[index]; 1646 bp->b_pages[index] = 0; 1647 pmap_kremove(pg); 1648 vm_page_free(p); 1649 --bp->b_npages; 1650 } 1651} 1652