vfs_bio.c revision 13211
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.79 1995/12/14 08:32:09 phk Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/sysproto.h> 39#include <sys/kernel.h> 40#include <sys/sysctl.h> 41#include <sys/proc.h> 42#include <sys/vnode.h> 43#include <sys/vmmeter.h> 44#include <vm/vm.h> 45#include <vm/vm_param.h> 46#include <vm/vm_prot.h> 47#include <vm/vm_kern.h> 48#include <vm/vm_pageout.h> 49#include <vm/vm_page.h> 50#include <vm/vm_object.h> 51#include <vm/vm_extern.h> 52#include <sys/buf.h> 53#include <sys/mount.h> 54#include <sys/malloc.h> 55#include <sys/resourcevar.h> 56#include <sys/proc.h> 57 58#include <miscfs/specfs/specdev.h> 59 60static void vfs_update __P((void)); 61static struct proc *updateproc; 62static struct kproc_desc up_kp = { 63 "update", 64 vfs_update, 65 &updateproc 66}; 67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 68 69struct buf *buf; /* buffer header pool */ 70struct swqueue bswlist; 71 72int count_lock_queue __P((void)); 73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 74 vm_offset_t to); 75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vfs_clean_pages(struct buf * bp); 78static void vfs_setdirty(struct buf *bp); 79 80int needsbuffer; 81 82/* 83 * Internal update daemon, process 3 84 * The variable vfs_update_wakeup allows for internal syncs. 85 */ 86int vfs_update_wakeup; 87 88 89/* 90 * buffers base kva 91 */ 92caddr_t buffers_kva; 93 94/* 95 * bogus page -- for I/O to/from partially complete buffers 96 * this is a temporary solution to the problem, but it is not 97 * really that bad. it would be better to split the buffer 98 * for input in the case of buffers partially already in memory, 99 * but the code is intricate enough already. 100 */ 101vm_page_t bogus_page; 102static vm_offset_t bogus_offset; 103 104static int bufspace, maxbufspace; 105 106static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 107static struct bqueues bufqueues[BUFFER_QUEUES]; 108 109#define BUF_MAXUSE 8 110 111/* 112 * Initialize buffer headers and related structures. 113 */ 114void 115bufinit() 116{ 117 struct buf *bp; 118 int i; 119 120 TAILQ_INIT(&bswlist); 121 LIST_INIT(&invalhash); 122 123 /* first, make a null hash table */ 124 for (i = 0; i < BUFHSZ; i++) 125 LIST_INIT(&bufhashtbl[i]); 126 127 /* next, make a null set of free lists */ 128 for (i = 0; i < BUFFER_QUEUES; i++) 129 TAILQ_INIT(&bufqueues[i]); 130 131 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 132 /* finally, initialize each buffer header and stick on empty q */ 133 for (i = 0; i < nbuf; i++) { 134 bp = &buf[i]; 135 bzero(bp, sizeof *bp); 136 bp->b_flags = B_INVAL; /* we're just an empty header */ 137 bp->b_dev = NODEV; 138 bp->b_rcred = NOCRED; 139 bp->b_wcred = NOCRED; 140 bp->b_qindex = QUEUE_EMPTY; 141 bp->b_vnbufs.le_next = NOLIST; 142 bp->b_data = buffers_kva + i * MAXBSIZE; 143 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 144 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 145 } 146/* 147 * maxbufspace is currently calculated to support all filesystem blocks 148 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 149 * cache is still the same as it would be for 8K filesystems. This 150 * keeps the size of the buffer cache "in check" for big block filesystems. 151 */ 152 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 153 154 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 155 bogus_page = vm_page_alloc(kernel_object, 156 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 157 VM_ALLOC_NORMAL); 158 159} 160 161/* 162 * remove the buffer from the appropriate free list 163 */ 164void 165bremfree(struct buf * bp) 166{ 167 int s = splbio(); 168 169 if (bp->b_qindex != QUEUE_NONE) { 170 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 171 bp->b_qindex = QUEUE_NONE; 172 } else { 173 panic("bremfree: removing a buffer when not on a queue"); 174 } 175 splx(s); 176} 177 178/* 179 * Get a buffer with the specified data. Look in the cache first. 180 */ 181int 182bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 183 struct buf ** bpp) 184{ 185 struct buf *bp; 186 187 bp = getblk(vp, blkno, size, 0, 0); 188 *bpp = bp; 189 190 /* if not found in cache, do some I/O */ 191 if ((bp->b_flags & B_CACHE) == 0) { 192 if (curproc != NULL) 193 curproc->p_stats->p_ru.ru_inblock++; 194 bp->b_flags |= B_READ; 195 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 196 if (bp->b_rcred == NOCRED) { 197 if (cred != NOCRED) 198 crhold(cred); 199 bp->b_rcred = cred; 200 } 201 vfs_busy_pages(bp, 0); 202 VOP_STRATEGY(bp); 203 return (biowait(bp)); 204 } 205 return (0); 206} 207 208/* 209 * Operates like bread, but also starts asynchronous I/O on 210 * read-ahead blocks. 211 */ 212int 213breadn(struct vnode * vp, daddr_t blkno, int size, 214 daddr_t * rablkno, int *rabsize, 215 int cnt, struct ucred * cred, struct buf ** bpp) 216{ 217 struct buf *bp, *rabp; 218 int i; 219 int rv = 0, readwait = 0; 220 221 *bpp = bp = getblk(vp, blkno, size, 0, 0); 222 223 /* if not found in cache, do some I/O */ 224 if ((bp->b_flags & B_CACHE) == 0) { 225 if (curproc != NULL) 226 curproc->p_stats->p_ru.ru_inblock++; 227 bp->b_flags |= B_READ; 228 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 229 if (bp->b_rcred == NOCRED) { 230 if (cred != NOCRED) 231 crhold(cred); 232 bp->b_rcred = cred; 233 } 234 vfs_busy_pages(bp, 0); 235 VOP_STRATEGY(bp); 236 ++readwait; 237 } 238 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 239 if (inmem(vp, *rablkno)) 240 continue; 241 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 242 243 if ((rabp->b_flags & B_CACHE) == 0) { 244 if (curproc != NULL) 245 curproc->p_stats->p_ru.ru_inblock++; 246 rabp->b_flags |= B_READ | B_ASYNC; 247 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 248 if (rabp->b_rcred == NOCRED) { 249 if (cred != NOCRED) 250 crhold(cred); 251 rabp->b_rcred = cred; 252 } 253 vfs_busy_pages(rabp, 0); 254 VOP_STRATEGY(rabp); 255 } else { 256 brelse(rabp); 257 } 258 } 259 260 if (readwait) { 261 rv = biowait(bp); 262 } 263 return (rv); 264} 265 266/* 267 * Write, release buffer on completion. (Done by iodone 268 * if async.) 269 */ 270int 271bwrite(struct buf * bp) 272{ 273 int oldflags = bp->b_flags; 274 275 if (bp->b_flags & B_INVAL) { 276 brelse(bp); 277 return (0); 278 } 279 if (!(bp->b_flags & B_BUSY)) 280 panic("bwrite: buffer is not busy???"); 281 282 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 283 bp->b_flags |= B_WRITEINPROG; 284 285 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 286 reassignbuf(bp, bp->b_vp); 287 } 288 289 bp->b_vp->v_numoutput++; 290 vfs_busy_pages(bp, 1); 291 if (curproc != NULL) 292 curproc->p_stats->p_ru.ru_oublock++; 293 VOP_STRATEGY(bp); 294 295 if ((oldflags & B_ASYNC) == 0) { 296 int rtval = biowait(bp); 297 298 if (oldflags & B_DELWRI) { 299 reassignbuf(bp, bp->b_vp); 300 } 301 brelse(bp); 302 return (rtval); 303 } 304 return (0); 305} 306 307int 308vn_bwrite(ap) 309 struct vop_bwrite_args *ap; 310{ 311 return (bwrite(ap->a_bp)); 312} 313 314/* 315 * Delayed write. (Buffer is marked dirty). 316 */ 317void 318bdwrite(struct buf * bp) 319{ 320 321 if ((bp->b_flags & B_BUSY) == 0) { 322 panic("bdwrite: buffer is not busy"); 323 } 324 if (bp->b_flags & B_INVAL) { 325 brelse(bp); 326 return; 327 } 328 if (bp->b_flags & B_TAPE) { 329 bawrite(bp); 330 return; 331 } 332 bp->b_flags &= ~(B_READ|B_RELBUF); 333 if ((bp->b_flags & B_DELWRI) == 0) { 334 bp->b_flags |= B_DONE | B_DELWRI; 335 reassignbuf(bp, bp->b_vp); 336 } 337 338 /* 339 * This bmap keeps the system from needing to do the bmap later, 340 * perhaps when the system is attempting to do a sync. Since it 341 * is likely that the indirect block -- or whatever other datastructure 342 * that the filesystem needs is still in memory now, it is a good 343 * thing to do this. Note also, that if the pageout daemon is 344 * requesting a sync -- there might not be enough memory to do 345 * the bmap then... So, this is important to do. 346 */ 347 if( bp->b_lblkno == bp->b_blkno) { 348 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 349 } 350 351 /* 352 * Set the *dirty* buffer range based upon the VM system dirty pages. 353 */ 354 vfs_setdirty(bp); 355 356 /* 357 * We need to do this here to satisfy the vnode_pager and the 358 * pageout daemon, so that it thinks that the pages have been 359 * "cleaned". Note that since the pages are in a delayed write 360 * buffer -- the VFS layer "will" see that the pages get written 361 * out on the next sync, or perhaps the cluster will be completed. 362 */ 363 vfs_clean_pages(bp); 364 brelse(bp); 365 return; 366} 367 368/* 369 * Asynchronous write. 370 * Start output on a buffer, but do not wait for it to complete. 371 * The buffer is released when the output completes. 372 */ 373void 374bawrite(struct buf * bp) 375{ 376 bp->b_flags |= B_ASYNC; 377 (void) VOP_BWRITE(bp); 378} 379 380/* 381 * Release a buffer. 382 */ 383void 384brelse(struct buf * bp) 385{ 386 int s; 387 388 if (bp->b_flags & B_CLUSTER) { 389 relpbuf(bp); 390 return; 391 } 392 /* anyone need a "free" block? */ 393 s = splbio(); 394 395 if (needsbuffer) { 396 needsbuffer = 0; 397 wakeup(&needsbuffer); 398 } 399 400 /* anyone need this block? */ 401 if (bp->b_flags & B_WANTED) { 402 bp->b_flags &= ~(B_WANTED | B_AGE); 403 wakeup(bp); 404 } 405 406 if (bp->b_flags & B_LOCKED) 407 bp->b_flags &= ~B_ERROR; 408 409 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 410 (bp->b_bufsize <= 0)) { 411 bp->b_flags |= B_INVAL; 412 bp->b_flags &= ~(B_DELWRI | B_CACHE); 413 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 414 brelvp(bp); 415 } 416 417 /* 418 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 419 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 420 * but the VM object is kept around. The B_NOCACHE flag is used to 421 * invalidate the pages in the VM object. 422 */ 423 if (bp->b_flags & B_VMIO) { 424 vm_ooffset_t foff; 425 vm_object_t obj; 426 int i, resid; 427 vm_page_t m; 428 struct vnode *vp; 429 int iototal = bp->b_bufsize; 430 431 vp = bp->b_vp; 432 if (!vp) 433 panic("brelse: missing vp"); 434 435 if (bp->b_npages) { 436 vm_pindex_t poff; 437 obj = (vm_object_t) vp->v_object; 438 if (vp->v_type == VBLK) 439 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 440 else 441 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 442 poff = OFF_TO_IDX(foff); 443 for (i = 0; i < bp->b_npages; i++) { 444 m = bp->b_pages[i]; 445 if (m == bogus_page) { 446 m = vm_page_lookup(obj, poff + i); 447 if (!m) { 448 panic("brelse: page missing\n"); 449 } 450 bp->b_pages[i] = m; 451 pmap_qenter(trunc_page(bp->b_data), 452 bp->b_pages, bp->b_npages); 453 } 454 resid = IDX_TO_OFF(m->pindex+1) - foff; 455 if (resid > iototal) 456 resid = iototal; 457 if (resid > 0) { 458 /* 459 * Don't invalidate the page if the local machine has already 460 * modified it. This is the lesser of two evils, and should 461 * be fixed. 462 */ 463 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 464 vm_page_test_dirty(m); 465 if (m->dirty == 0) { 466 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 467 if (m->valid == 0) 468 vm_page_protect(m, VM_PROT_NONE); 469 } 470 } 471 } 472 foff += resid; 473 iototal -= resid; 474 } 475 } 476 477 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 478 for(i = 0; i < bp->b_npages; i++) { 479 m = bp->b_pages[i]; 480 --m->bmapped; 481 if (m->bmapped == 0) { 482 if (m->flags & PG_WANTED) { 483 m->flags &= ~PG_WANTED; 484 wakeup(m); 485 } 486 if ((m->busy == 0) && ((m->flags & PG_BUSY) == 0)) { 487 if (m->object->flags & OBJ_MIGHTBEDIRTY) { 488 vm_page_test_dirty(m); 489 } 490 /* 491 * if page isn't valid, no sense in keeping it around 492 */ 493 if (m->valid == 0) { 494 vm_page_protect(m, VM_PROT_NONE); 495 vm_page_free(m); 496 /* 497 * if page isn't dirty and hasn't been referenced by 498 * a process, then cache it 499 */ 500 } else if ((m->dirty & m->valid) == 0 && 501 (m->flags & PG_REFERENCED) == 0 && 502 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 503 vm_page_cache(m); 504 /* 505 * otherwise activate it 506 */ 507 } else if ((m->flags & PG_ACTIVE) == 0) { 508 vm_page_activate(m); 509 m->act_count = 0; 510 } 511 } 512 } 513 } 514 bufspace -= bp->b_bufsize; 515 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 516 bp->b_npages = 0; 517 bp->b_bufsize = 0; 518 bp->b_flags &= ~B_VMIO; 519 if (bp->b_vp) 520 brelvp(bp); 521 } 522 } 523 if (bp->b_qindex != QUEUE_NONE) 524 panic("brelse: free buffer onto another queue???"); 525 526 /* enqueue */ 527 /* buffers with no memory */ 528 if (bp->b_bufsize == 0) { 529 bp->b_qindex = QUEUE_EMPTY; 530 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 531 LIST_REMOVE(bp, b_hash); 532 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 533 bp->b_dev = NODEV; 534 /* buffers with junk contents */ 535 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 536 bp->b_qindex = QUEUE_AGE; 537 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 538 LIST_REMOVE(bp, b_hash); 539 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 540 bp->b_dev = NODEV; 541 /* buffers that are locked */ 542 } else if (bp->b_flags & B_LOCKED) { 543 bp->b_qindex = QUEUE_LOCKED; 544 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 545 /* buffers with stale but valid contents */ 546 } else if (bp->b_flags & B_AGE) { 547 bp->b_qindex = QUEUE_AGE; 548 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 549 /* buffers with valid and quite potentially reuseable contents */ 550 } else { 551 bp->b_qindex = QUEUE_LRU; 552 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 553 } 554 555 /* unlock */ 556 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 557 splx(s); 558} 559 560/* 561 * Check to see if a block is currently memory resident. 562 */ 563__inline struct buf * 564gbincore(struct vnode * vp, daddr_t blkno) 565{ 566 struct buf *bp; 567 struct bufhashhdr *bh; 568 569 bh = BUFHASH(vp, blkno); 570 bp = bh->lh_first; 571 572 /* Search hash chain */ 573 while (bp != NULL) { 574 /* hit */ 575 if (bp->b_vp == vp && bp->b_lblkno == blkno && 576 (bp->b_flags & B_INVAL) == 0) { 577 break; 578 } 579 bp = bp->b_hash.le_next; 580 } 581 return (bp); 582} 583 584/* 585 * this routine implements clustered async writes for 586 * clearing out B_DELWRI buffers... This is much better 587 * than the old way of writing only one buffer at a time. 588 */ 589int 590vfs_bio_awrite(struct buf * bp) 591{ 592 int i; 593 daddr_t lblkno = bp->b_lblkno; 594 struct vnode *vp = bp->b_vp; 595 int s; 596 int ncl; 597 struct buf *bpa; 598 int nwritten; 599 600 s = splbio(); 601 /* 602 * right now we support clustered writing only to regular files 603 */ 604 if ((vp->v_type == VREG) && 605 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 606 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 607 int size; 608 int maxcl; 609 610 size = vp->v_mount->mnt_stat.f_iosize; 611 maxcl = MAXPHYS / size; 612 613 for (i = 1; i < maxcl; i++) { 614 if ((bpa = gbincore(vp, lblkno + i)) && 615 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 616 (B_DELWRI | B_CLUSTEROK)) && 617 (bpa->b_bufsize == size)) { 618 if ((bpa->b_blkno == bpa->b_lblkno) || 619 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 620 break; 621 } else { 622 break; 623 } 624 } 625 ncl = i; 626 /* 627 * this is a possible cluster write 628 */ 629 if (ncl != 1) { 630 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 631 splx(s); 632 return nwritten; 633 } 634 } 635 bremfree(bp); 636 splx(s); 637 /* 638 * default (old) behavior, writing out only one block 639 */ 640 bp->b_flags |= B_BUSY | B_ASYNC; 641 nwritten = bp->b_bufsize; 642 (void) VOP_BWRITE(bp); 643 return nwritten; 644} 645 646 647/* 648 * Find a buffer header which is available for use. 649 */ 650static struct buf * 651getnewbuf(int slpflag, int slptimeo, int doingvmio) 652{ 653 struct buf *bp; 654 int s; 655 int nbyteswritten = 0; 656 657 s = splbio(); 658start: 659 if (bufspace >= maxbufspace) 660 goto trytofreespace; 661 662 /* can we constitute a new buffer? */ 663 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 664 if (bp->b_qindex != QUEUE_EMPTY) 665 panic("getnewbuf: inconsistent EMPTY queue"); 666 bremfree(bp); 667 goto fillbuf; 668 } 669trytofreespace: 670 /* 671 * We keep the file I/O from hogging metadata I/O 672 * This is desirable because file data is cached in the 673 * VM/Buffer cache even if a buffer is freed. 674 */ 675 if ((bp = bufqueues[QUEUE_AGE].tqh_first)) { 676 if (bp->b_qindex != QUEUE_AGE) 677 panic("getnewbuf: inconsistent AGE queue"); 678 } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) { 679 if (bp->b_qindex != QUEUE_LRU) 680 panic("getnewbuf: inconsistent LRU queue"); 681 } 682 if (!bp) { 683 /* wait for a free buffer of any kind */ 684 needsbuffer = 1; 685 tsleep(&needsbuffer, 686 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 687 splx(s); 688 return (0); 689 } 690 691 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 692 --bp->b_usecount; 693 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 694 if (bufqueues[QUEUE_LRU].tqh_first != NULL) { 695 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 696 goto start; 697 } 698 } 699 700 /* if we are a delayed write, convert to an async write */ 701 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 702 nbyteswritten += vfs_bio_awrite(bp); 703 if (!slpflag && !slptimeo) { 704 splx(s); 705 return (0); 706 } 707 goto start; 708 } 709 710 if (bp->b_flags & B_WANTED) { 711 bp->b_flags &= ~B_WANTED; 712 wakeup(bp); 713 } 714 bremfree(bp); 715 716 if (bp->b_flags & B_VMIO) { 717 bp->b_flags |= B_RELBUF | B_BUSY | B_DONE; 718 brelse(bp); 719 bremfree(bp); 720 } 721 722 if (bp->b_vp) 723 brelvp(bp); 724 725fillbuf: 726 /* we are not free, nor do we contain interesting data */ 727 if (bp->b_rcred != NOCRED) { 728 crfree(bp->b_rcred); 729 bp->b_rcred = NOCRED; 730 } 731 if (bp->b_wcred != NOCRED) { 732 crfree(bp->b_wcred); 733 bp->b_wcred = NOCRED; 734 } 735 bp->b_flags |= B_BUSY; 736 LIST_REMOVE(bp, b_hash); 737 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 738 splx(s); 739 if (bp->b_bufsize) { 740 allocbuf(bp, 0); 741 } 742 bp->b_flags = B_BUSY; 743 bp->b_dev = NODEV; 744 bp->b_vp = NULL; 745 bp->b_blkno = bp->b_lblkno = 0; 746 bp->b_iodone = 0; 747 bp->b_error = 0; 748 bp->b_resid = 0; 749 bp->b_bcount = 0; 750 bp->b_npages = 0; 751 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 752 bp->b_dirtyoff = bp->b_dirtyend = 0; 753 bp->b_validoff = bp->b_validend = 0; 754 bp->b_usecount = 2; 755 if (bufspace >= maxbufspace + nbyteswritten) { 756 s = splbio(); 757 bp->b_flags |= B_INVAL; 758 brelse(bp); 759 goto trytofreespace; 760 } 761 return (bp); 762} 763 764/* 765 * Check to see if a block is currently memory resident. 766 */ 767struct buf * 768incore(struct vnode * vp, daddr_t blkno) 769{ 770 struct buf *bp; 771 struct bufhashhdr *bh; 772 773 int s = splbio(); 774 775 bh = BUFHASH(vp, blkno); 776 bp = bh->lh_first; 777 778 /* Search hash chain */ 779 while (bp != NULL) { 780 /* hit */ 781 if (bp->b_vp == vp && bp->b_lblkno == blkno && 782 (bp->b_flags & B_INVAL) == 0) { 783 break; 784 } 785 bp = bp->b_hash.le_next; 786 } 787 splx(s); 788 return (bp); 789} 790 791/* 792 * Returns true if no I/O is needed to access the 793 * associated VM object. This is like incore except 794 * it also hunts around in the VM system for the data. 795 */ 796 797int 798inmem(struct vnode * vp, daddr_t blkno) 799{ 800 vm_object_t obj; 801 vm_offset_t toff, tinc; 802 vm_page_t m; 803 vm_ooffset_t off; 804 805 if (incore(vp, blkno)) 806 return 1; 807 if (vp->v_mount == NULL) 808 return 0; 809 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 810 return 0; 811 812 obj = vp->v_object; 813 tinc = PAGE_SIZE; 814 if (tinc > vp->v_mount->mnt_stat.f_iosize) 815 tinc = vp->v_mount->mnt_stat.f_iosize; 816 off = blkno * vp->v_mount->mnt_stat.f_iosize; 817 818 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 819 820 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 821 if (!m) 822 return 0; 823 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 824 return 0; 825 } 826 return 1; 827} 828 829/* 830 * now we set the dirty range for the buffer -- 831 * for NFS -- if the file is mapped and pages have 832 * been written to, let it know. We want the 833 * entire range of the buffer to be marked dirty if 834 * any of the pages have been written to for consistancy 835 * with the b_validoff, b_validend set in the nfs write 836 * code, and used by the nfs read code. 837 */ 838static void 839vfs_setdirty(struct buf *bp) { 840 int i; 841 vm_object_t object; 842 vm_offset_t boffset, offset; 843 /* 844 * We qualify the scan for modified pages on whether the 845 * object has been flushed yet. The OBJ_WRITEABLE flag 846 * is not cleared simply by protecting pages off. 847 */ 848 if ((bp->b_flags & B_VMIO) && 849 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 850 /* 851 * test the pages to see if they have been modified directly 852 * by users through the VM system. 853 */ 854 for (i = 0; i < bp->b_npages; i++) 855 vm_page_test_dirty(bp->b_pages[i]); 856 857 /* 858 * scan forwards for the first page modified 859 */ 860 for (i = 0; i < bp->b_npages; i++) { 861 if (bp->b_pages[i]->dirty) { 862 break; 863 } 864 } 865 boffset = (i << PAGE_SHIFT); 866 if (boffset < bp->b_dirtyoff) { 867 bp->b_dirtyoff = boffset; 868 } 869 870 /* 871 * scan backwards for the last page modified 872 */ 873 for (i = bp->b_npages - 1; i >= 0; --i) { 874 if (bp->b_pages[i]->dirty) { 875 break; 876 } 877 } 878 boffset = (i + 1); 879 offset = boffset + bp->b_pages[0]->pindex; 880 if (offset >= object->size) 881 boffset = object->size - bp->b_pages[0]->pindex; 882 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 883 bp->b_dirtyend = (boffset << PAGE_SHIFT); 884 } 885} 886 887/* 888 * Get a block given a specified block and offset into a file/device. 889 */ 890struct buf * 891getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 892{ 893 struct buf *bp; 894 int s; 895 struct bufhashhdr *bh; 896 897 s = splbio(); 898loop: 899 if ((bp = gbincore(vp, blkno))) { 900 if (bp->b_flags & B_BUSY) { 901 bp->b_flags |= B_WANTED; 902 if (bp->b_usecount < BUF_MAXUSE) 903 ++bp->b_usecount; 904 if (!tsleep(bp, 905 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 906 goto loop; 907 908 splx(s); 909 return (struct buf *) NULL; 910 } 911 bp->b_flags |= B_BUSY | B_CACHE; 912 bremfree(bp); 913 914 /* 915 * check for size inconsistancies (note that they shouldn't happen 916 * but do when filesystems don't handle the size changes correctly.) 917 * We are conservative on metadata and don't just extend the buffer 918 * but write and re-constitute it. 919 */ 920 921 if (bp->b_bcount != size) { 922 if (bp->b_flags & B_VMIO) { 923 allocbuf(bp, size); 924 } else { 925 bp->b_flags |= B_NOCACHE; 926 VOP_BWRITE(bp); 927 goto loop; 928 } 929 } 930 931 /* 932 * make sure that all pages in the buffer are valid, if they 933 * aren't, clear the cache flag. 934 * ASSUMPTION: 935 * if the buffer is greater than 1 page in size, it is assumed 936 * that the buffer address starts on a page boundary... 937 */ 938 if (bp->b_flags & B_VMIO) { 939 int szleft, i; 940 szleft = size; 941 for (i=0;i<bp->b_npages;i++) { 942 if (szleft > PAGE_SIZE) { 943 if ((bp->b_pages[i]->valid & VM_PAGE_BITS_ALL) != 944 VM_PAGE_BITS_ALL) { 945 bp->b_flags &= ~(B_CACHE|B_DONE); 946 break; 947 } 948 szleft -= PAGE_SIZE; 949 } else { 950 if (!vm_page_is_valid(bp->b_pages[i], 951 (((vm_offset_t) bp->b_data) & PAGE_MASK), 952 szleft)) { 953 bp->b_flags &= ~(B_CACHE|B_DONE); 954 break; 955 } 956 szleft = 0; 957 } 958 } 959 } 960 if (bp->b_usecount < BUF_MAXUSE) 961 ++bp->b_usecount; 962 splx(s); 963 return (bp); 964 } else { 965 vm_object_t obj; 966 int doingvmio; 967 968 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 969 doingvmio = 1; 970 } else { 971 doingvmio = 0; 972 } 973 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 974 if (slpflag || slptimeo) { 975 splx(s); 976 return NULL; 977 } 978 goto loop; 979 } 980 981 /* 982 * This code is used to make sure that a buffer is not 983 * created while the getnewbuf routine is blocked. 984 * Normally the vnode is locked so this isn't a problem. 985 * VBLK type I/O requests, however, don't lock the vnode. 986 */ 987 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 988 bp->b_flags |= B_INVAL; 989 brelse(bp); 990 goto loop; 991 } 992 993 /* 994 * Insert the buffer into the hash, so that it can 995 * be found by incore. 996 */ 997 bp->b_blkno = bp->b_lblkno = blkno; 998 bgetvp(vp, bp); 999 LIST_REMOVE(bp, b_hash); 1000 bh = BUFHASH(vp, blkno); 1001 LIST_INSERT_HEAD(bh, bp, b_hash); 1002 1003 if (doingvmio) { 1004 bp->b_flags |= (B_VMIO | B_CACHE); 1005#if defined(VFS_BIO_DEBUG) 1006 if (vp->v_type != VREG) 1007 printf("getblk: vmioing file type %d???\n", vp->v_type); 1008#endif 1009 } else { 1010 bp->b_flags &= ~B_VMIO; 1011 } 1012 splx(s); 1013 1014 allocbuf(bp, size); 1015 return (bp); 1016 } 1017} 1018 1019/* 1020 * Get an empty, disassociated buffer of given size. 1021 */ 1022struct buf * 1023geteblk(int size) 1024{ 1025 struct buf *bp; 1026 1027 while ((bp = getnewbuf(0, 0, 0)) == 0); 1028 allocbuf(bp, size); 1029 bp->b_flags |= B_INVAL; 1030 return (bp); 1031} 1032 1033/* 1034 * This code constitutes the buffer memory from either anonymous system 1035 * memory (in the case of non-VMIO operations) or from an associated 1036 * VM object (in the case of VMIO operations). 1037 * 1038 * Note that this code is tricky, and has many complications to resolve 1039 * deadlock or inconsistant data situations. Tread lightly!!! 1040 * 1041 * Modify the length of a buffer's underlying buffer storage without 1042 * destroying information (unless, of course the buffer is shrinking). 1043 */ 1044int 1045allocbuf(struct buf * bp, int size) 1046{ 1047 1048 int s; 1049 int newbsize, mbsize; 1050 int i; 1051 1052 if (!(bp->b_flags & B_BUSY)) 1053 panic("allocbuf: buffer not busy"); 1054 1055 if ((bp->b_flags & B_VMIO) == 0) { 1056 /* 1057 * Just get anonymous memory from the kernel 1058 */ 1059 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1060 newbsize = round_page(size); 1061 1062 if (newbsize < bp->b_bufsize) { 1063 vm_hold_free_pages( 1064 bp, 1065 (vm_offset_t) bp->b_data + newbsize, 1066 (vm_offset_t) bp->b_data + bp->b_bufsize); 1067 } else if (newbsize > bp->b_bufsize) { 1068 vm_hold_load_pages( 1069 bp, 1070 (vm_offset_t) bp->b_data + bp->b_bufsize, 1071 (vm_offset_t) bp->b_data + newbsize); 1072 } 1073 } else { 1074 vm_page_t m; 1075 int desiredpages; 1076 1077 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1078 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1079 1080 if (newbsize < bp->b_bufsize) { 1081 if (desiredpages < bp->b_npages) { 1082 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1083 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1084 for (i = desiredpages; i < bp->b_npages; i++) { 1085 m = bp->b_pages[i]; 1086 s = splhigh(); 1087 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1088 m->flags |= PG_WANTED; 1089 tsleep(m, PVM, "biodep", 0); 1090 } 1091 splx(s); 1092 1093 if (m->bmapped == 0) { 1094 printf("allocbuf: bmapped is zero for page %d\n", i); 1095 panic("allocbuf: error"); 1096 } 1097 --m->bmapped; 1098 if (m->bmapped == 0) { 1099 vm_page_protect(m, VM_PROT_NONE); 1100 vm_page_free(m); 1101 } 1102 bp->b_pages[i] = NULL; 1103 } 1104 bp->b_npages = desiredpages; 1105 } 1106 } else if (newbsize > bp->b_bufsize) { 1107 vm_object_t obj; 1108 vm_offset_t tinc, toff; 1109 vm_ooffset_t off; 1110 vm_pindex_t objoff; 1111 int pageindex, curbpnpages; 1112 struct vnode *vp; 1113 int bsize; 1114 1115 vp = bp->b_vp; 1116 1117 if (vp->v_type == VBLK) 1118 bsize = DEV_BSIZE; 1119 else 1120 bsize = vp->v_mount->mnt_stat.f_iosize; 1121 1122 if (bp->b_npages < desiredpages) { 1123 obj = vp->v_object; 1124 tinc = PAGE_SIZE; 1125 if (tinc > bsize) 1126 tinc = bsize; 1127 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1128 doretry: 1129 curbpnpages = bp->b_npages; 1130 bp->b_flags |= B_CACHE; 1131 for (toff = 0; toff < newbsize; toff += tinc) { 1132 int bytesinpage; 1133 1134 pageindex = toff >> PAGE_SHIFT; 1135 objoff = OFF_TO_IDX(off + toff); 1136 if (pageindex < curbpnpages) { 1137 1138 m = bp->b_pages[pageindex]; 1139 if (m->pindex != objoff) 1140 panic("allocbuf: page changed offset??!!!?"); 1141 bytesinpage = tinc; 1142 if (tinc > (newbsize - toff)) 1143 bytesinpage = newbsize - toff; 1144 if (!vm_page_is_valid(m, 1145 (vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)), 1146 bytesinpage)) { 1147 bp->b_flags &= ~B_CACHE; 1148 } 1149 if ((m->flags & PG_ACTIVE) == 0) { 1150 vm_page_activate(m); 1151 m->act_count = 0; 1152 } 1153 continue; 1154 } 1155 m = vm_page_lookup(obj, objoff); 1156 if (!m) { 1157 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1158 if (!m) { 1159 int j; 1160 1161 for (j = bp->b_npages; j < pageindex; j++) { 1162 PAGE_WAKEUP(bp->b_pages[j]); 1163 } 1164 VM_WAIT; 1165 goto doretry; 1166 } 1167 vm_page_activate(m); 1168 m->act_count = 0; 1169 m->valid = 0; 1170 bp->b_flags &= ~B_CACHE; 1171 } else if (m->flags & PG_BUSY) { 1172 int j; 1173 1174 for (j = bp->b_npages; j < pageindex; j++) { 1175 PAGE_WAKEUP(bp->b_pages[j]); 1176 } 1177 1178 s = splbio(); 1179 m->flags |= PG_WANTED; 1180 tsleep(m, PVM, "pgtblk", 0); 1181 splx(s); 1182 1183 goto doretry; 1184 } else { 1185 if ((curproc != pageproc) && 1186 (m->flags & PG_CACHE) && 1187 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 1188 pagedaemon_wakeup(); 1189 } 1190 bytesinpage = tinc; 1191 if (tinc > (newbsize - toff)) 1192 bytesinpage = newbsize - toff; 1193 if (!vm_page_is_valid(m, 1194 (vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)), 1195 bytesinpage)) { 1196 bp->b_flags &= ~B_CACHE; 1197 } 1198 if ((m->flags & PG_ACTIVE) == 0) { 1199 vm_page_activate(m); 1200 m->act_count = 0; 1201 } 1202 m->flags |= PG_BUSY; 1203 } 1204 bp->b_pages[pageindex] = m; 1205 curbpnpages = pageindex + 1; 1206 } 1207 for (i = bp->b_npages; i < curbpnpages; i++) { 1208 m = bp->b_pages[i]; 1209 m->bmapped++; 1210 PAGE_WAKEUP(m); 1211 } 1212 bp->b_npages = curbpnpages; 1213 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1214 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1215 bp->b_data += off & (PAGE_SIZE - 1); 1216 } 1217 } 1218 } 1219 bufspace += (newbsize - bp->b_bufsize); 1220 bp->b_bufsize = newbsize; 1221 bp->b_bcount = size; 1222 return 1; 1223} 1224 1225/* 1226 * Wait for buffer I/O completion, returning error status. 1227 */ 1228int 1229biowait(register struct buf * bp) 1230{ 1231 int s; 1232 1233 s = splbio(); 1234 while ((bp->b_flags & B_DONE) == 0) 1235 tsleep(bp, PRIBIO, "biowait", 0); 1236 splx(s); 1237 if (bp->b_flags & B_EINTR) { 1238 bp->b_flags &= ~B_EINTR; 1239 return (EINTR); 1240 } 1241 if (bp->b_flags & B_ERROR) { 1242 return (bp->b_error ? bp->b_error : EIO); 1243 } else { 1244 return (0); 1245 } 1246} 1247 1248/* 1249 * Finish I/O on a buffer, calling an optional function. 1250 * This is usually called from interrupt level, so process blocking 1251 * is not *a good idea*. 1252 */ 1253void 1254biodone(register struct buf * bp) 1255{ 1256 int s; 1257 1258 s = splbio(); 1259 if (!(bp->b_flags & B_BUSY)) 1260 panic("biodone: buffer not busy"); 1261 1262 if (bp->b_flags & B_DONE) { 1263 splx(s); 1264 printf("biodone: buffer already done\n"); 1265 return; 1266 } 1267 bp->b_flags |= B_DONE; 1268 1269 if ((bp->b_flags & B_READ) == 0) { 1270 vwakeup(bp); 1271 } 1272#ifdef BOUNCE_BUFFERS 1273 if (bp->b_flags & B_BOUNCE) 1274 vm_bounce_free(bp); 1275#endif 1276 1277 /* call optional completion function if requested */ 1278 if (bp->b_flags & B_CALL) { 1279 bp->b_flags &= ~B_CALL; 1280 (*bp->b_iodone) (bp); 1281 splx(s); 1282 return; 1283 } 1284 if (bp->b_flags & B_VMIO) { 1285 int i, resid; 1286 vm_ooffset_t foff; 1287 vm_page_t m; 1288 vm_object_t obj; 1289 int iosize; 1290 struct vnode *vp = bp->b_vp; 1291 1292 if (vp->v_type == VBLK) 1293 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1294 else 1295 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1296 obj = vp->v_object; 1297 if (!obj) { 1298 panic("biodone: no object"); 1299 } 1300#if defined(VFS_BIO_DEBUG) 1301 if (obj->paging_in_progress < bp->b_npages) { 1302 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1303 obj->paging_in_progress, bp->b_npages); 1304 } 1305#endif 1306 iosize = bp->b_bufsize; 1307 for (i = 0; i < bp->b_npages; i++) { 1308 int bogusflag = 0; 1309 m = bp->b_pages[i]; 1310 if (m == bogus_page) { 1311 bogusflag = 1; 1312 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1313 if (!m) { 1314#if defined(VFS_BIO_DEBUG) 1315 printf("biodone: page disappeared\n"); 1316#endif 1317 --obj->paging_in_progress; 1318 continue; 1319 } 1320 bp->b_pages[i] = m; 1321 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1322 } 1323#if defined(VFS_BIO_DEBUG) 1324 if (OFF_TO_IDX(foff) != m->pindex) { 1325 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1326 } 1327#endif 1328 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1329 if (resid > iosize) 1330 resid = iosize; 1331 /* 1332 * In the write case, the valid and clean bits are 1333 * already changed correctly, so we only need to do this 1334 * here in the read case. 1335 */ 1336 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1337 vm_page_set_validclean(m, 1338 (vm_offset_t) (foff & (PAGE_SIZE-1)), resid); 1339 } 1340 1341 /* 1342 * when debugging new filesystems or buffer I/O methods, this 1343 * is the most common error that pops up. if you see this, you 1344 * have not set the page busy flag correctly!!! 1345 */ 1346 if (m->busy == 0) { 1347 printf("biodone: page busy < 0, " 1348 "pindex: %d, foff: 0x(%x,%x), " 1349 "resid: %d, index: %d\n", 1350 (int) m->pindex, (int)(foff >> 32), 1351 (int) foff & 0xffffffff, resid, i); 1352 if (vp->v_type != VBLK) 1353 printf(" iosize: %d, lblkno: %d, flags: 0x%lx, npages: %d\n", 1354 bp->b_vp->v_mount->mnt_stat.f_iosize, 1355 (int) bp->b_lblkno, 1356 bp->b_flags, bp->b_npages); 1357 else 1358 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1359 (int) bp->b_lblkno, 1360 bp->b_flags, bp->b_npages); 1361 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1362 m->valid, m->dirty, m->bmapped); 1363 panic("biodone: page busy < 0\n"); 1364 } 1365 --m->busy; 1366 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1367 m->flags &= ~PG_WANTED; 1368 wakeup(m); 1369 } 1370 --obj->paging_in_progress; 1371 foff += resid; 1372 iosize -= resid; 1373 } 1374 if (obj && obj->paging_in_progress == 0 && 1375 (obj->flags & OBJ_PIPWNT)) { 1376 obj->flags &= ~OBJ_PIPWNT; 1377 wakeup(obj); 1378 } 1379 } 1380 /* 1381 * For asynchronous completions, release the buffer now. The brelse 1382 * checks for B_WANTED and will do the wakeup there if necessary - so 1383 * no need to do a wakeup here in the async case. 1384 */ 1385 1386 if (bp->b_flags & B_ASYNC) { 1387 brelse(bp); 1388 } else { 1389 wakeup(bp); 1390 } 1391 splx(s); 1392} 1393 1394int 1395count_lock_queue() 1396{ 1397 int count; 1398 struct buf *bp; 1399 1400 count = 0; 1401 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1402 bp != NULL; 1403 bp = bp->b_freelist.tqe_next) 1404 count++; 1405 return (count); 1406} 1407 1408int vfs_update_interval = 30; 1409 1410static void 1411vfs_update() 1412{ 1413 (void) spl0(); /* XXX redundant? wrong place? */ 1414 while (1) { 1415 tsleep(&vfs_update_wakeup, PUSER, "update", 1416 hz * vfs_update_interval); 1417 vfs_update_wakeup = 0; 1418 sync(curproc, NULL, NULL); 1419 } 1420} 1421 1422static int 1423sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1424{ 1425 int error = sysctl_handle_int(oidp, 1426 oidp->oid_arg1, oidp->oid_arg2, req); 1427 if (!error) 1428 wakeup(&vfs_update_wakeup); 1429 return error; 1430} 1431 1432SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1433 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1434 1435 1436/* 1437 * This routine is called in lieu of iodone in the case of 1438 * incomplete I/O. This keeps the busy status for pages 1439 * consistant. 1440 */ 1441void 1442vfs_unbusy_pages(struct buf * bp) 1443{ 1444 int i; 1445 1446 if (bp->b_flags & B_VMIO) { 1447 struct vnode *vp = bp->b_vp; 1448 vm_object_t obj = vp->v_object; 1449 vm_ooffset_t foff; 1450 1451 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1452 1453 for (i = 0; i < bp->b_npages; i++) { 1454 vm_page_t m = bp->b_pages[i]; 1455 1456 if (m == bogus_page) { 1457 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1458 if (!m) { 1459 panic("vfs_unbusy_pages: page missing\n"); 1460 } 1461 bp->b_pages[i] = m; 1462 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1463 } 1464 --obj->paging_in_progress; 1465 --m->busy; 1466 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1467 m->flags &= ~PG_WANTED; 1468 wakeup(m); 1469 } 1470 } 1471 if (obj->paging_in_progress == 0 && 1472 (obj->flags & OBJ_PIPWNT)) { 1473 obj->flags &= ~OBJ_PIPWNT; 1474 wakeup(obj); 1475 } 1476 } 1477} 1478 1479/* 1480 * This routine is called before a device strategy routine. 1481 * It is used to tell the VM system that paging I/O is in 1482 * progress, and treat the pages associated with the buffer 1483 * almost as being PG_BUSY. Also the object paging_in_progress 1484 * flag is handled to make sure that the object doesn't become 1485 * inconsistant. 1486 */ 1487void 1488vfs_busy_pages(struct buf * bp, int clear_modify) 1489{ 1490 int i; 1491 1492 if (bp->b_flags & B_VMIO) { 1493 vm_object_t obj = bp->b_vp->v_object; 1494 vm_ooffset_t foff; 1495 int iocount = bp->b_bufsize; 1496 1497 if (bp->b_vp->v_type == VBLK) 1498 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1499 else 1500 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1501 vfs_setdirty(bp); 1502 for (i = 0; i < bp->b_npages; i++) { 1503 vm_page_t m = bp->b_pages[i]; 1504 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1505 1506 if (resid > iocount) 1507 resid = iocount; 1508 if ((bp->b_flags & B_CLUSTER) == 0) { 1509 obj->paging_in_progress++; 1510 m->busy++; 1511 } 1512 if (clear_modify) { 1513 vm_page_protect(m, VM_PROT_READ); 1514 vm_page_set_validclean(m, 1515 (vm_offset_t) (foff & (PAGE_SIZE-1)), resid); 1516 } else if (bp->b_bcount >= PAGE_SIZE) { 1517 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1518 bp->b_pages[i] = bogus_page; 1519 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1520 } 1521 } 1522 foff += resid; 1523 iocount -= resid; 1524 } 1525 } 1526} 1527 1528/* 1529 * Tell the VM system that the pages associated with this buffer 1530 * are clean. This is used for delayed writes where the data is 1531 * going to go to disk eventually without additional VM intevention. 1532 */ 1533void 1534vfs_clean_pages(struct buf * bp) 1535{ 1536 int i; 1537 1538 if (bp->b_flags & B_VMIO) { 1539 vm_ooffset_t foff; 1540 int iocount = bp->b_bufsize; 1541 1542 if (bp->b_vp->v_type == VBLK) 1543 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1544 else 1545 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1546 1547 for (i = 0; i < bp->b_npages; i++) { 1548 vm_page_t m = bp->b_pages[i]; 1549 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1550 1551 if (resid > iocount) 1552 resid = iocount; 1553 if (resid > 0) { 1554 vm_page_set_validclean(m, 1555 ((vm_offset_t) foff & (PAGE_SIZE-1)), resid); 1556 } 1557 foff += resid; 1558 iocount -= resid; 1559 } 1560 } 1561} 1562 1563void 1564vfs_bio_clrbuf(struct buf *bp) { 1565 int i; 1566 if( bp->b_flags & B_VMIO) { 1567 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1568 int mask; 1569 mask = 0; 1570 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1571 mask |= (1 << (i/DEV_BSIZE)); 1572 if( bp->b_pages[0]->valid != mask) { 1573 bzero(bp->b_data, bp->b_bufsize); 1574 } 1575 bp->b_pages[0]->valid = mask; 1576 bp->b_resid = 0; 1577 return; 1578 } 1579 for(i=0;i<bp->b_npages;i++) { 1580 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1581 continue; 1582 if( bp->b_pages[i]->valid == 0) { 1583 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) 1584 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1585 } else { 1586 int j; 1587 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1588 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1589 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1590 } 1591 } 1592 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 1593 } 1594 bp->b_resid = 0; 1595 } else { 1596 clrbuf(bp); 1597 } 1598} 1599 1600/* 1601 * vm_hold_load_pages and vm_hold_unload pages get pages into 1602 * a buffers address space. The pages are anonymous and are 1603 * not associated with a file object. 1604 */ 1605void 1606vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1607{ 1608 vm_offset_t pg; 1609 vm_page_t p; 1610 vm_offset_t from = round_page(froma); 1611 vm_offset_t to = round_page(toa); 1612 1613 for (pg = from; pg < to; pg += PAGE_SIZE) { 1614 1615tryagain: 1616 1617 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1618 VM_ALLOC_NORMAL); 1619 if (!p) { 1620 VM_WAIT; 1621 goto tryagain; 1622 } 1623 vm_page_wire(p); 1624 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1625 bp->b_pages[((caddr_t) pg - bp->b_data) >> PAGE_SHIFT] = p; 1626 PAGE_WAKEUP(p); 1627 bp->b_npages++; 1628 } 1629} 1630 1631void 1632vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1633{ 1634 vm_offset_t pg; 1635 vm_page_t p; 1636 vm_offset_t from = round_page(froma); 1637 vm_offset_t to = round_page(toa); 1638 1639 for (pg = from; pg < to; pg += PAGE_SIZE) { 1640 int index = ((caddr_t) pg - bp->b_data) >> PAGE_SHIFT; 1641 p = bp->b_pages[index]; 1642 bp->b_pages[index] = 0; 1643 pmap_kremove(pg); 1644 vm_page_free(p); 1645 --bp->b_npages; 1646 } 1647} 1648