vfs_bio.c revision 14319
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.85 1996/03/02 03:45:04 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81static void vfs_vmio_release(struct buf *bp); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95caddr_t buffers_kva; 96 97/* 98 * bogus page -- for I/O to/from partially complete buffers 99 * this is a temporary solution to the problem, but it is not 100 * really that bad. it would be better to split the buffer 101 * for input in the case of buffers partially already in memory, 102 * but the code is intricate enough already. 103 */ 104vm_page_t bogus_page; 105static vm_offset_t bogus_offset; 106 107static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 108 bufmallocspace, maxbufmallocspace; 109 110static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 111static struct bqueues bufqueues[BUFFER_QUEUES]; 112 113extern int vm_swap_size; 114 115#define BUF_MAXUSE 8 116 117/* 118 * Initialize buffer headers and related structures. 119 */ 120void 121bufinit() 122{ 123 struct buf *bp; 124 int i; 125 126 TAILQ_INIT(&bswlist); 127 LIST_INIT(&invalhash); 128 129 /* first, make a null hash table */ 130 for (i = 0; i < BUFHSZ; i++) 131 LIST_INIT(&bufhashtbl[i]); 132 133 /* next, make a null set of free lists */ 134 for (i = 0; i < BUFFER_QUEUES; i++) 135 TAILQ_INIT(&bufqueues[i]); 136 137 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 138 /* finally, initialize each buffer header and stick on empty q */ 139 for (i = 0; i < nbuf; i++) { 140 bp = &buf[i]; 141 bzero(bp, sizeof *bp); 142 bp->b_flags = B_INVAL; /* we're just an empty header */ 143 bp->b_dev = NODEV; 144 bp->b_rcred = NOCRED; 145 bp->b_wcred = NOCRED; 146 bp->b_qindex = QUEUE_EMPTY; 147 bp->b_vnbufs.le_next = NOLIST; 148 bp->b_data = buffers_kva + i * MAXBSIZE; 149 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 150 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 151 } 152/* 153 * maxbufspace is currently calculated to support all filesystem blocks 154 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 155 * cache is still the same as it would be for 8K filesystems. This 156 * keeps the size of the buffer cache "in check" for big block filesystems. 157 */ 158 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 159/* 160 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 161 */ 162 maxvmiobufspace = 2 * maxbufspace / 3; 163/* 164 * Limit the amount of malloc memory since it is wired permanently into 165 * the kernel space. Even though this is accounted for in the buffer 166 * allocation, we don't want the malloced region to grow uncontrolled. 167 * The malloc scheme improves memory utilization significantly on average 168 * (small) directories. 169 */ 170 maxbufmallocspace = maxbufspace / 20; 171 172 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 173 bogus_page = vm_page_alloc(kernel_object, 174 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 175 VM_ALLOC_NORMAL); 176 177} 178 179/* 180 * remove the buffer from the appropriate free list 181 */ 182void 183bremfree(struct buf * bp) 184{ 185 int s = splbio(); 186 187 if (bp->b_qindex != QUEUE_NONE) { 188 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 189 bp->b_qindex = QUEUE_NONE; 190 } else { 191 panic("bremfree: removing a buffer when not on a queue"); 192 } 193 splx(s); 194} 195 196/* 197 * Get a buffer with the specified data. Look in the cache first. 198 */ 199int 200bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 201 struct buf ** bpp) 202{ 203 struct buf *bp; 204 205 bp = getblk(vp, blkno, size, 0, 0); 206 *bpp = bp; 207 208 /* if not found in cache, do some I/O */ 209 if ((bp->b_flags & B_CACHE) == 0) { 210 if (curproc != NULL) 211 curproc->p_stats->p_ru.ru_inblock++; 212 bp->b_flags |= B_READ; 213 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 214 if (bp->b_rcred == NOCRED) { 215 if (cred != NOCRED) 216 crhold(cred); 217 bp->b_rcred = cred; 218 } 219 vfs_busy_pages(bp, 0); 220 VOP_STRATEGY(bp); 221 return (biowait(bp)); 222 } 223 return (0); 224} 225 226/* 227 * Operates like bread, but also starts asynchronous I/O on 228 * read-ahead blocks. 229 */ 230int 231breadn(struct vnode * vp, daddr_t blkno, int size, 232 daddr_t * rablkno, int *rabsize, 233 int cnt, struct ucred * cred, struct buf ** bpp) 234{ 235 struct buf *bp, *rabp; 236 int i; 237 int rv = 0, readwait = 0; 238 239 *bpp = bp = getblk(vp, blkno, size, 0, 0); 240 241 /* if not found in cache, do some I/O */ 242 if ((bp->b_flags & B_CACHE) == 0) { 243 if (curproc != NULL) 244 curproc->p_stats->p_ru.ru_inblock++; 245 bp->b_flags |= B_READ; 246 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 247 if (bp->b_rcred == NOCRED) { 248 if (cred != NOCRED) 249 crhold(cred); 250 bp->b_rcred = cred; 251 } 252 vfs_busy_pages(bp, 0); 253 VOP_STRATEGY(bp); 254 ++readwait; 255 } 256 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 257 if (inmem(vp, *rablkno)) 258 continue; 259 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 260 261 if ((rabp->b_flags & B_CACHE) == 0) { 262 if (curproc != NULL) 263 curproc->p_stats->p_ru.ru_inblock++; 264 rabp->b_flags |= B_READ | B_ASYNC; 265 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 266 if (rabp->b_rcred == NOCRED) { 267 if (cred != NOCRED) 268 crhold(cred); 269 rabp->b_rcred = cred; 270 } 271 vfs_busy_pages(rabp, 0); 272 VOP_STRATEGY(rabp); 273 } else { 274 brelse(rabp); 275 } 276 } 277 278 if (readwait) { 279 rv = biowait(bp); 280 } 281 return (rv); 282} 283 284/* 285 * Write, release buffer on completion. (Done by iodone 286 * if async.) 287 */ 288int 289bwrite(struct buf * bp) 290{ 291 int oldflags = bp->b_flags; 292 293 if (bp->b_flags & B_INVAL) { 294 brelse(bp); 295 return (0); 296 } 297 if (!(bp->b_flags & B_BUSY)) 298 panic("bwrite: buffer is not busy???"); 299 300 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 301 bp->b_flags |= B_WRITEINPROG; 302 303 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 304 reassignbuf(bp, bp->b_vp); 305 } 306 307 bp->b_vp->v_numoutput++; 308 vfs_busy_pages(bp, 1); 309 if (curproc != NULL) 310 curproc->p_stats->p_ru.ru_oublock++; 311 VOP_STRATEGY(bp); 312 313 if ((oldflags & B_ASYNC) == 0) { 314 int rtval = biowait(bp); 315 316 if (oldflags & B_DELWRI) { 317 reassignbuf(bp, bp->b_vp); 318 } 319 brelse(bp); 320 return (rtval); 321 } 322 return (0); 323} 324 325int 326vn_bwrite(ap) 327 struct vop_bwrite_args *ap; 328{ 329 return (bwrite(ap->a_bp)); 330} 331 332/* 333 * Delayed write. (Buffer is marked dirty). 334 */ 335void 336bdwrite(struct buf * bp) 337{ 338 339 if ((bp->b_flags & B_BUSY) == 0) { 340 panic("bdwrite: buffer is not busy"); 341 } 342 if (bp->b_flags & B_INVAL) { 343 brelse(bp); 344 return; 345 } 346 if (bp->b_flags & B_TAPE) { 347 bawrite(bp); 348 return; 349 } 350 bp->b_flags &= ~(B_READ|B_RELBUF); 351 if ((bp->b_flags & B_DELWRI) == 0) { 352 bp->b_flags |= B_DONE | B_DELWRI; 353 reassignbuf(bp, bp->b_vp); 354 } 355 356 /* 357 * This bmap keeps the system from needing to do the bmap later, 358 * perhaps when the system is attempting to do a sync. Since it 359 * is likely that the indirect block -- or whatever other datastructure 360 * that the filesystem needs is still in memory now, it is a good 361 * thing to do this. Note also, that if the pageout daemon is 362 * requesting a sync -- there might not be enough memory to do 363 * the bmap then... So, this is important to do. 364 */ 365 if( bp->b_lblkno == bp->b_blkno) { 366 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 367 } 368 369 /* 370 * Set the *dirty* buffer range based upon the VM system dirty pages. 371 */ 372 vfs_setdirty(bp); 373 374 /* 375 * We need to do this here to satisfy the vnode_pager and the 376 * pageout daemon, so that it thinks that the pages have been 377 * "cleaned". Note that since the pages are in a delayed write 378 * buffer -- the VFS layer "will" see that the pages get written 379 * out on the next sync, or perhaps the cluster will be completed. 380 */ 381 vfs_clean_pages(bp); 382 bqrelse(bp); 383 return; 384} 385 386/* 387 * Asynchronous write. 388 * Start output on a buffer, but do not wait for it to complete. 389 * The buffer is released when the output completes. 390 */ 391void 392bawrite(struct buf * bp) 393{ 394 bp->b_flags |= B_ASYNC; 395 (void) VOP_BWRITE(bp); 396} 397 398/* 399 * Release a buffer. 400 */ 401void 402brelse(struct buf * bp) 403{ 404 int s; 405 406 if (bp->b_flags & B_CLUSTER) { 407 relpbuf(bp); 408 return; 409 } 410 /* anyone need a "free" block? */ 411 s = splbio(); 412 413 /* anyone need this block? */ 414 if (bp->b_flags & B_WANTED) { 415 bp->b_flags &= ~(B_WANTED | B_AGE); 416 wakeup(bp); 417 } 418 419 if (bp->b_flags & B_LOCKED) 420 bp->b_flags &= ~B_ERROR; 421 422 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 423 (bp->b_bufsize <= 0)) { 424 bp->b_flags |= B_INVAL; 425 bp->b_flags &= ~(B_DELWRI | B_CACHE); 426 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 427 if (bp->b_bufsize) 428 allocbuf(bp, 0); 429 brelvp(bp); 430 } 431 } 432 433 /* 434 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 435 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 436 * but the VM object is kept around. The B_NOCACHE flag is used to 437 * invalidate the pages in the VM object. 438 */ 439 if (bp->b_flags & B_VMIO) { 440 vm_ooffset_t foff; 441 vm_object_t obj; 442 int i, resid; 443 vm_page_t m; 444 struct vnode *vp; 445 int iototal = bp->b_bufsize; 446 447 vp = bp->b_vp; 448 if (!vp) 449 panic("brelse: missing vp"); 450 451 if (bp->b_npages) { 452 vm_pindex_t poff; 453 obj = (vm_object_t) vp->v_object; 454 if (vp->v_type == VBLK) 455 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 456 else 457 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 458 poff = OFF_TO_IDX(foff); 459 for (i = 0; i < bp->b_npages; i++) { 460 m = bp->b_pages[i]; 461 if (m == bogus_page) { 462 m = vm_page_lookup(obj, poff + i); 463 if (!m) { 464 panic("brelse: page missing\n"); 465 } 466 bp->b_pages[i] = m; 467 pmap_qenter(trunc_page(bp->b_data), 468 bp->b_pages, bp->b_npages); 469 } 470 resid = IDX_TO_OFF(m->pindex+1) - foff; 471 if (resid > iototal) 472 resid = iototal; 473 if (resid > 0) { 474 /* 475 * Don't invalidate the page if the local machine has already 476 * modified it. This is the lesser of two evils, and should 477 * be fixed. 478 */ 479 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 480 vm_page_test_dirty(m); 481 if (m->dirty == 0) { 482 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 483 if (m->valid == 0) 484 vm_page_protect(m, VM_PROT_NONE); 485 } 486 } 487 if (resid >= PAGE_SIZE) { 488 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 489 bp->b_flags |= B_INVAL; 490 } 491 } else { 492 if (!vm_page_is_valid(m, 493 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 494 bp->b_flags |= B_INVAL; 495 } 496 } 497 } 498 foff += resid; 499 iototal -= resid; 500 } 501 } 502 if (bp->b_flags & (B_INVAL | B_RELBUF)) 503 vfs_vmio_release(bp); 504 } 505 if (bp->b_qindex != QUEUE_NONE) 506 panic("brelse: free buffer onto another queue???"); 507 508 /* enqueue */ 509 /* buffers with no memory */ 510 if (bp->b_bufsize == 0) { 511 bp->b_qindex = QUEUE_EMPTY; 512 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 513 LIST_REMOVE(bp, b_hash); 514 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 515 bp->b_dev = NODEV; 516 if (needsbuffer) { 517 wakeup(&needsbuffer); 518 needsbuffer=0; 519 } 520 /* buffers with junk contents */ 521 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 522 bp->b_qindex = QUEUE_AGE; 523 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 524 LIST_REMOVE(bp, b_hash); 525 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 526 bp->b_dev = NODEV; 527 if (needsbuffer) { 528 wakeup(&needsbuffer); 529 needsbuffer=0; 530 } 531 /* buffers that are locked */ 532 } else if (bp->b_flags & B_LOCKED) { 533 bp->b_qindex = QUEUE_LOCKED; 534 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 535 /* buffers with stale but valid contents */ 536 } else if (bp->b_flags & B_AGE) { 537 bp->b_qindex = QUEUE_AGE; 538 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 539 if (needsbuffer) { 540 wakeup(&needsbuffer); 541 needsbuffer=0; 542 } 543 /* buffers with valid and quite potentially reuseable contents */ 544 } else { 545 bp->b_qindex = QUEUE_LRU; 546 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 547 if (needsbuffer) { 548 wakeup(&needsbuffer); 549 needsbuffer=0; 550 } 551 } 552 553 /* unlock */ 554 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 555 splx(s); 556} 557 558/* 559 * Release a buffer. 560 */ 561void 562bqrelse(struct buf * bp) 563{ 564 int s; 565 566 s = splbio(); 567 568 569 /* anyone need this block? */ 570 if (bp->b_flags & B_WANTED) { 571 bp->b_flags &= ~(B_WANTED | B_AGE); 572 wakeup(bp); 573 } 574 575 if (bp->b_qindex != QUEUE_NONE) 576 panic("bqrelse: free buffer onto another queue???"); 577 578 if (bp->b_flags & B_LOCKED) { 579 bp->b_flags &= ~B_ERROR; 580 bp->b_qindex = QUEUE_LOCKED; 581 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 582 /* buffers with stale but valid contents */ 583 } else { 584 bp->b_qindex = QUEUE_LRU; 585 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 586 if (needsbuffer) { 587 wakeup(&needsbuffer); 588 needsbuffer=0; 589 } 590 } 591 592 /* unlock */ 593 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 594 splx(s); 595} 596 597static void 598vfs_vmio_release(bp) 599 struct buf *bp; 600{ 601 int i; 602 vm_page_t m; 603 604 for (i = 0; i < bp->b_npages; i++) { 605 m = bp->b_pages[i]; 606 bp->b_pages[i] = NULL; 607 if (m->flags & PG_WANTED) { 608 m->flags &= ~PG_WANTED; 609 wakeup(m); 610 } 611 vm_page_unwire(m); 612 if (m->wire_count == 0) { 613 if (m->valid) { 614 /* 615 * this keeps pressure off of the process memory 616 */ 617 if ((vm_swap_size == 0) || 618 (cnt.v_free_count < cnt.v_free_min)) 619 vm_page_cache(m); 620 } else if ((m->hold_count == 0) && 621 ((m->flags & PG_BUSY) == 0) && 622 (m->busy == 0)) { 623 vm_page_protect(m, VM_PROT_NONE); 624 vm_page_free(m); 625 } 626 } 627 } 628 bufspace -= bp->b_bufsize; 629 vmiospace -= bp->b_bufsize; 630 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 631 bp->b_npages = 0; 632 bp->b_bufsize = 0; 633 bp->b_flags &= ~B_VMIO; 634 if (bp->b_vp) 635 brelvp(bp); 636} 637 638/* 639 * Check to see if a block is currently memory resident. 640 */ 641__inline struct buf * 642gbincore(struct vnode * vp, daddr_t blkno) 643{ 644 struct buf *bp; 645 struct bufhashhdr *bh; 646 647 bh = BUFHASH(vp, blkno); 648 bp = bh->lh_first; 649 650 /* Search hash chain */ 651 while (bp != NULL) { 652 /* hit */ 653 if (bp->b_vp == vp && bp->b_lblkno == blkno && 654 (bp->b_flags & B_INVAL) == 0) { 655 break; 656 } 657 bp = bp->b_hash.le_next; 658 } 659 return (bp); 660} 661 662/* 663 * this routine implements clustered async writes for 664 * clearing out B_DELWRI buffers... This is much better 665 * than the old way of writing only one buffer at a time. 666 */ 667int 668vfs_bio_awrite(struct buf * bp) 669{ 670 int i; 671 daddr_t lblkno = bp->b_lblkno; 672 struct vnode *vp = bp->b_vp; 673 int s; 674 int ncl; 675 struct buf *bpa; 676 int nwritten; 677 678 s = splbio(); 679 /* 680 * right now we support clustered writing only to regular files 681 */ 682 if ((vp->v_type == VREG) && 683 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 684 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 685 int size; 686 int maxcl; 687 688 size = vp->v_mount->mnt_stat.f_iosize; 689 maxcl = MAXPHYS / size; 690 691 for (i = 1; i < maxcl; i++) { 692 if ((bpa = gbincore(vp, lblkno + i)) && 693 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 694 (B_DELWRI | B_CLUSTEROK)) && 695 (bpa->b_bufsize == size)) { 696 if ((bpa->b_blkno == bpa->b_lblkno) || 697 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 698 break; 699 } else { 700 break; 701 } 702 } 703 ncl = i; 704 /* 705 * this is a possible cluster write 706 */ 707 if (ncl != 1) { 708 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 709 splx(s); 710 return nwritten; 711 } 712 } 713 bremfree(bp); 714 splx(s); 715 /* 716 * default (old) behavior, writing out only one block 717 */ 718 bp->b_flags |= B_BUSY | B_ASYNC; 719 nwritten = bp->b_bufsize; 720 (void) VOP_BWRITE(bp); 721 return nwritten; 722} 723 724 725/* 726 * Find a buffer header which is available for use. 727 */ 728static struct buf * 729getnewbuf(int slpflag, int slptimeo, int doingvmio) 730{ 731 struct buf *bp; 732 int s; 733 int nbyteswritten = 0; 734 735start: 736 if (bufspace >= maxbufspace) 737 goto trytofreespace; 738 739 /* can we constitute a new buffer? */ 740 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 741 if (bp->b_qindex != QUEUE_EMPTY) 742 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 743 bp->b_qindex); 744 bp->b_flags |= B_BUSY; 745 bremfree(bp); 746 goto fillbuf; 747 } 748trytofreespace: 749 /* 750 * We keep the file I/O from hogging metadata I/O 751 * This is desirable because file data is cached in the 752 * VM/Buffer cache even if a buffer is freed. 753 */ 754 if ((bp = bufqueues[QUEUE_AGE].tqh_first)) { 755 if (bp->b_qindex != QUEUE_AGE) 756 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 757 bp->b_qindex); 758 } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) { 759 if (bp->b_qindex != QUEUE_LRU) 760 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 761 bp->b_qindex); 762 } 763 if (!bp) { 764 /* wait for a free buffer of any kind */ 765 needsbuffer = 1; 766 tsleep(&needsbuffer, 767 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 768 return (0); 769 } 770 771 /* 772 * We are fairly aggressive about freeing VMIO buffers, but since 773 * the buffering is intact without buffer headers, there is not 774 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 775 */ 776 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 777 if ((bp->b_flags & B_VMIO) == 0 || 778 (vmiospace < maxvmiobufspace)) { 779 --bp->b_usecount; 780 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 781 if (bufqueues[QUEUE_LRU].tqh_first != NULL) { 782 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 783 goto start; 784 } 785 /* 786 * Make sure that the buffer is flagged as not being on a 787 * queue. 788 */ 789 bp->b_qindex = QUEUE_NONE; 790 } 791 } 792 793 /* if we are a delayed write, convert to an async write */ 794 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 795 nbyteswritten += vfs_bio_awrite(bp); 796 if (!slpflag && !slptimeo) { 797 return (0); 798 } 799 goto start; 800 } 801 802 if (bp->b_flags & B_WANTED) { 803 bp->b_flags &= ~B_WANTED; 804 wakeup(bp); 805 } 806 bremfree(bp); 807 bp->b_flags |= B_BUSY; 808 809 if (bp->b_flags & B_VMIO) 810 vfs_vmio_release(bp); 811 812 if (bp->b_vp) 813 brelvp(bp); 814 815fillbuf: 816 /* we are not free, nor do we contain interesting data */ 817 if (bp->b_rcred != NOCRED) { 818 crfree(bp->b_rcred); 819 bp->b_rcred = NOCRED; 820 } 821 if (bp->b_wcred != NOCRED) { 822 crfree(bp->b_wcred); 823 bp->b_wcred = NOCRED; 824 } 825 826 LIST_REMOVE(bp, b_hash); 827 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 828 if (bp->b_bufsize) { 829 allocbuf(bp, 0); 830 } 831 bp->b_flags = B_BUSY; 832 bp->b_dev = NODEV; 833 bp->b_vp = NULL; 834 bp->b_blkno = bp->b_lblkno = 0; 835 bp->b_iodone = 0; 836 bp->b_error = 0; 837 bp->b_resid = 0; 838 bp->b_bcount = 0; 839 bp->b_npages = 0; 840 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 841 bp->b_dirtyoff = bp->b_dirtyend = 0; 842 bp->b_validoff = bp->b_validend = 0; 843 bp->b_usecount = 2; 844 if (bufspace >= maxbufspace + nbyteswritten) { 845 bp->b_flags |= B_INVAL; 846 brelse(bp); 847 goto trytofreespace; 848 } 849 return (bp); 850} 851 852/* 853 * Check to see if a block is currently memory resident. 854 */ 855struct buf * 856incore(struct vnode * vp, daddr_t blkno) 857{ 858 struct buf *bp; 859 struct bufhashhdr *bh; 860 861 int s = splbio(); 862 bp = gbincore(vp, blkno); 863 splx(s); 864 return (bp); 865} 866 867/* 868 * Returns true if no I/O is needed to access the 869 * associated VM object. This is like incore except 870 * it also hunts around in the VM system for the data. 871 */ 872 873int 874inmem(struct vnode * vp, daddr_t blkno) 875{ 876 vm_object_t obj; 877 vm_offset_t toff, tinc; 878 vm_page_t m; 879 vm_ooffset_t off; 880 881 if (incore(vp, blkno)) 882 return 1; 883 if (vp->v_mount == NULL) 884 return 0; 885 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 886 return 0; 887 888 obj = vp->v_object; 889 tinc = PAGE_SIZE; 890 if (tinc > vp->v_mount->mnt_stat.f_iosize) 891 tinc = vp->v_mount->mnt_stat.f_iosize; 892 off = blkno * vp->v_mount->mnt_stat.f_iosize; 893 894 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 895 896 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 897 if (!m) 898 return 0; 899 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 900 return 0; 901 } 902 return 1; 903} 904 905/* 906 * now we set the dirty range for the buffer -- 907 * for NFS -- if the file is mapped and pages have 908 * been written to, let it know. We want the 909 * entire range of the buffer to be marked dirty if 910 * any of the pages have been written to for consistancy 911 * with the b_validoff, b_validend set in the nfs write 912 * code, and used by the nfs read code. 913 */ 914static void 915vfs_setdirty(struct buf *bp) { 916 int i; 917 vm_object_t object; 918 vm_offset_t boffset, offset; 919 /* 920 * We qualify the scan for modified pages on whether the 921 * object has been flushed yet. The OBJ_WRITEABLE flag 922 * is not cleared simply by protecting pages off. 923 */ 924 if ((bp->b_flags & B_VMIO) && 925 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 926 /* 927 * test the pages to see if they have been modified directly 928 * by users through the VM system. 929 */ 930 for (i = 0; i < bp->b_npages; i++) 931 vm_page_test_dirty(bp->b_pages[i]); 932 933 /* 934 * scan forwards for the first page modified 935 */ 936 for (i = 0; i < bp->b_npages; i++) { 937 if (bp->b_pages[i]->dirty) { 938 break; 939 } 940 } 941 boffset = (i << PAGE_SHIFT); 942 if (boffset < bp->b_dirtyoff) { 943 bp->b_dirtyoff = boffset; 944 } 945 946 /* 947 * scan backwards for the last page modified 948 */ 949 for (i = bp->b_npages - 1; i >= 0; --i) { 950 if (bp->b_pages[i]->dirty) { 951 break; 952 } 953 } 954 boffset = (i + 1); 955 offset = boffset + bp->b_pages[0]->pindex; 956 if (offset >= object->size) 957 boffset = object->size - bp->b_pages[0]->pindex; 958 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 959 bp->b_dirtyend = (boffset << PAGE_SHIFT); 960 } 961} 962 963/* 964 * Get a block given a specified block and offset into a file/device. 965 */ 966struct buf * 967getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 968{ 969 struct buf *bp; 970 int s; 971 struct bufhashhdr *bh; 972 973 s = splbio(); 974loop: 975 if ((bp = gbincore(vp, blkno))) { 976 if (bp->b_flags & B_BUSY) { 977 bp->b_flags |= B_WANTED; 978 if (bp->b_usecount < BUF_MAXUSE) 979 ++bp->b_usecount; 980 if (!tsleep(bp, 981 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 982 goto loop; 983 984 splx(s); 985 return (struct buf *) NULL; 986 } 987 bp->b_flags |= B_BUSY | B_CACHE; 988 bremfree(bp); 989 990 /* 991 * check for size inconsistancies (note that they shouldn't happen 992 * but do when filesystems don't handle the size changes correctly.) 993 * We are conservative on metadata and don't just extend the buffer 994 * but write and re-constitute it. 995 */ 996 997 if (bp->b_bcount != size) { 998 if (bp->b_flags & B_VMIO) { 999 allocbuf(bp, size); 1000 } else { 1001 bp->b_flags |= B_NOCACHE; 1002 VOP_BWRITE(bp); 1003 goto loop; 1004 } 1005 } 1006 1007 if (bp->b_usecount < BUF_MAXUSE) 1008 ++bp->b_usecount; 1009 splx(s); 1010 return (bp); 1011 } else { 1012 vm_object_t obj; 1013 int doingvmio; 1014 1015 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1016 doingvmio = 1; 1017 } else { 1018 doingvmio = 0; 1019 } 1020 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 1021 if (slpflag || slptimeo) { 1022 splx(s); 1023 return NULL; 1024 } 1025 goto loop; 1026 } 1027 1028 /* 1029 * This code is used to make sure that a buffer is not 1030 * created while the getnewbuf routine is blocked. 1031 * Normally the vnode is locked so this isn't a problem. 1032 * VBLK type I/O requests, however, don't lock the vnode. 1033 */ 1034 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1035 bp->b_flags |= B_INVAL; 1036 brelse(bp); 1037 goto loop; 1038 } 1039 1040 /* 1041 * Insert the buffer into the hash, so that it can 1042 * be found by incore. 1043 */ 1044 bp->b_blkno = bp->b_lblkno = blkno; 1045 bgetvp(vp, bp); 1046 LIST_REMOVE(bp, b_hash); 1047 bh = BUFHASH(vp, blkno); 1048 LIST_INSERT_HEAD(bh, bp, b_hash); 1049 1050 if (doingvmio) { 1051 bp->b_flags |= (B_VMIO | B_CACHE); 1052#if defined(VFS_BIO_DEBUG) 1053 if (vp->v_type != VREG && vp->v_type != VBLK) 1054 printf("getblk: vmioing file type %d???\n", vp->v_type); 1055#endif 1056 } else { 1057 bp->b_flags &= ~B_VMIO; 1058 } 1059 splx(s); 1060 1061 allocbuf(bp, size); 1062 return (bp); 1063 } 1064} 1065 1066/* 1067 * Get an empty, disassociated buffer of given size. 1068 */ 1069struct buf * 1070geteblk(int size) 1071{ 1072 struct buf *bp; 1073 int s; 1074 1075 s = splbio(); 1076 while ((bp = getnewbuf(0, 0, 0)) == 0); 1077 splx(s); 1078 allocbuf(bp, size); 1079 bp->b_flags |= B_INVAL; 1080 return (bp); 1081} 1082 1083 1084/* 1085 * This code constitutes the buffer memory from either anonymous system 1086 * memory (in the case of non-VMIO operations) or from an associated 1087 * VM object (in the case of VMIO operations). 1088 * 1089 * Note that this code is tricky, and has many complications to resolve 1090 * deadlock or inconsistant data situations. Tread lightly!!! 1091 * 1092 * Modify the length of a buffer's underlying buffer storage without 1093 * destroying information (unless, of course the buffer is shrinking). 1094 */ 1095int 1096allocbuf(struct buf * bp, int size) 1097{ 1098 1099 int s; 1100 int newbsize, mbsize; 1101 int i; 1102 1103 if (!(bp->b_flags & B_BUSY)) 1104 panic("allocbuf: buffer not busy"); 1105 1106 if ((bp->b_flags & B_VMIO) == 0) { 1107 caddr_t origbuf; 1108 int origbufsize; 1109 /* 1110 * Just get anonymous memory from the kernel 1111 */ 1112 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1113 if (bp->b_flags & B_MALLOC) 1114 newbsize = mbsize; 1115 else 1116 newbsize = round_page(size); 1117 1118 if (newbsize < bp->b_bufsize) { 1119 /* 1120 * malloced buffers are not shrunk 1121 */ 1122 if (bp->b_flags & B_MALLOC) { 1123 if (newbsize) { 1124 bp->b_bcount = size; 1125 } else { 1126 free(bp->b_data, M_TEMP); 1127 bufspace -= bp->b_bufsize; 1128 bufmallocspace -= bp->b_bufsize; 1129 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1130 bp->b_bufsize = 0; 1131 bp->b_bcount = 0; 1132 bp->b_flags &= ~B_MALLOC; 1133 } 1134 return 1; 1135 } 1136 vm_hold_free_pages( 1137 bp, 1138 (vm_offset_t) bp->b_data + newbsize, 1139 (vm_offset_t) bp->b_data + bp->b_bufsize); 1140 } else if (newbsize > bp->b_bufsize) { 1141 /* 1142 * We only use malloced memory on the first allocation. 1143 * and revert to page-allocated memory when the buffer grows. 1144 */ 1145 if ( (bufmallocspace < maxbufmallocspace) && 1146 (bp->b_bufsize == 0) && 1147 (mbsize <= PAGE_SIZE/2)) { 1148 1149 bp->b_data = malloc(mbsize, M_TEMP, M_WAITOK); 1150 bp->b_bufsize = mbsize; 1151 bp->b_bcount = size; 1152 bp->b_flags |= B_MALLOC; 1153 bufspace += mbsize; 1154 bufmallocspace += mbsize; 1155 return 1; 1156 } 1157 origbuf = NULL; 1158 origbufsize = 0; 1159 /* 1160 * If the buffer is growing on it's other-than-first allocation, 1161 * then we revert to the page-allocation scheme. 1162 */ 1163 if (bp->b_flags & B_MALLOC) { 1164 origbuf = bp->b_data; 1165 origbufsize = bp->b_bufsize; 1166 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1167 bufspace -= bp->b_bufsize; 1168 bufmallocspace -= bp->b_bufsize; 1169 bp->b_bufsize = 0; 1170 bp->b_flags &= ~B_MALLOC; 1171 newbsize = round_page(newbsize); 1172 } 1173 vm_hold_load_pages( 1174 bp, 1175 (vm_offset_t) bp->b_data + bp->b_bufsize, 1176 (vm_offset_t) bp->b_data + newbsize); 1177 if (origbuf) { 1178 bcopy(origbuf, bp->b_data, origbufsize); 1179 free(origbuf, M_TEMP); 1180 } 1181 } 1182 } else { 1183 vm_page_t m; 1184 int desiredpages; 1185 1186 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1187 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1188 1189 if (bp->b_flags & B_MALLOC) 1190 panic("allocbuf: VMIO buffer can't be malloced"); 1191 1192 if (newbsize < bp->b_bufsize) { 1193 if (desiredpages < bp->b_npages) { 1194 for (i = desiredpages; i < bp->b_npages; i++) { 1195 /* 1196 * the page is not freed here -- it 1197 * is the responsibility of vnode_pager_setsize 1198 */ 1199 m = bp->b_pages[i]; 1200 s = splhigh(); 1201 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1202 m->flags |= PG_WANTED; 1203 tsleep(m, PVM, "biodep", 0); 1204 } 1205 splx(s); 1206 1207 bp->b_pages[i] = NULL; 1208 vm_page_unwire(m); 1209 } 1210 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1211 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1212 bp->b_npages = desiredpages; 1213 } 1214 } else if (newbsize > bp->b_bufsize) { 1215 vm_object_t obj; 1216 vm_offset_t tinc, toff; 1217 vm_ooffset_t off; 1218 vm_pindex_t objoff; 1219 int pageindex, curbpnpages; 1220 struct vnode *vp; 1221 int bsize; 1222 1223 vp = bp->b_vp; 1224 1225 if (vp->v_type == VBLK) 1226 bsize = DEV_BSIZE; 1227 else 1228 bsize = vp->v_mount->mnt_stat.f_iosize; 1229 1230 if (bp->b_npages < desiredpages) { 1231 obj = vp->v_object; 1232 tinc = PAGE_SIZE; 1233 if (tinc > bsize) 1234 tinc = bsize; 1235 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1236 doretry: 1237 curbpnpages = bp->b_npages; 1238 bp->b_flags |= B_CACHE; 1239 for (toff = 0; toff < newbsize; toff += tinc) { 1240 int bytesinpage; 1241 1242 pageindex = toff >> PAGE_SHIFT; 1243 objoff = OFF_TO_IDX(off + toff); 1244 if (pageindex < curbpnpages) { 1245 1246 m = bp->b_pages[pageindex]; 1247#ifdef VFS_BIO_DIAG 1248 if (m->pindex != objoff) 1249 panic("allocbuf: page changed offset??!!!?"); 1250#endif 1251 bytesinpage = tinc; 1252 if (tinc > (newbsize - toff)) 1253 bytesinpage = newbsize - toff; 1254 if ((bp->b_flags & B_CACHE) && 1255 !vm_page_is_valid(m, 1256 (vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)), 1257 bytesinpage)) { 1258 bp->b_flags &= ~B_CACHE; 1259 } 1260 continue; 1261 } 1262 m = vm_page_lookup(obj, objoff); 1263 if (!m) { 1264 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1265 if (!m) { 1266 VM_WAIT; 1267 goto doretry; 1268 } 1269 /* 1270 * Normally it is unwise to clear PG_BUSY without 1271 * PAGE_WAKEUP -- but it is okay here, as there is 1272 * no chance for blocking between here and vm_page_alloc 1273 */ 1274 m->flags &= ~PG_BUSY; 1275 vm_page_wire(m); 1276 bp->b_flags &= ~B_CACHE; 1277 } else if (m->flags & PG_BUSY) { 1278 1279 s = splhigh(); 1280 m->flags |= PG_WANTED; 1281 tsleep(m, PVM, "pgtblk", 0); 1282 splx(s); 1283 1284 goto doretry; 1285 } else { 1286 if ((curproc != pageproc) && 1287 (m->queue == PQ_CACHE) && 1288 ((cnt.v_free_count + cnt.v_cache_count) < 1289 (cnt.v_free_min + cnt.v_cache_min))) { 1290 pagedaemon_wakeup(); 1291 } 1292 bytesinpage = tinc; 1293 if (tinc > (newbsize - toff)) 1294 bytesinpage = newbsize - toff; 1295 if ((bp->b_flags & B_CACHE) && 1296 !vm_page_is_valid(m, 1297 (vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)), 1298 bytesinpage)) { 1299 bp->b_flags &= ~B_CACHE; 1300 } 1301 vm_page_wire(m); 1302 } 1303 bp->b_pages[pageindex] = m; 1304 curbpnpages = pageindex + 1; 1305 } 1306 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1307 bp->b_npages = curbpnpages; 1308 pmap_qenter((vm_offset_t) bp->b_data, 1309 bp->b_pages, bp->b_npages); 1310 ((vm_offset_t) bp->b_data) |= off & (PAGE_SIZE - 1); 1311 } 1312 } 1313 } 1314 if (bp->b_flags & B_VMIO) 1315 vmiospace += bp->b_bufsize; 1316 bufspace += (newbsize - bp->b_bufsize); 1317 bp->b_bufsize = newbsize; 1318 bp->b_bcount = size; 1319 return 1; 1320} 1321 1322/* 1323 * Wait for buffer I/O completion, returning error status. 1324 */ 1325int 1326biowait(register struct buf * bp) 1327{ 1328 int s; 1329 1330 s = splbio(); 1331 while ((bp->b_flags & B_DONE) == 0) 1332 tsleep(bp, PRIBIO, "biowait", 0); 1333 splx(s); 1334 if (bp->b_flags & B_EINTR) { 1335 bp->b_flags &= ~B_EINTR; 1336 return (EINTR); 1337 } 1338 if (bp->b_flags & B_ERROR) { 1339 return (bp->b_error ? bp->b_error : EIO); 1340 } else { 1341 return (0); 1342 } 1343} 1344 1345/* 1346 * Finish I/O on a buffer, calling an optional function. 1347 * This is usually called from interrupt level, so process blocking 1348 * is not *a good idea*. 1349 */ 1350void 1351biodone(register struct buf * bp) 1352{ 1353 int s; 1354 1355 s = splbio(); 1356 if (!(bp->b_flags & B_BUSY)) 1357 panic("biodone: buffer not busy"); 1358 1359 if (bp->b_flags & B_DONE) { 1360 splx(s); 1361 printf("biodone: buffer already done\n"); 1362 return; 1363 } 1364 bp->b_flags |= B_DONE; 1365 1366 if ((bp->b_flags & B_READ) == 0) { 1367 vwakeup(bp); 1368 } 1369#ifdef BOUNCE_BUFFERS 1370 if (bp->b_flags & B_BOUNCE) 1371 vm_bounce_free(bp); 1372#endif 1373 1374 /* call optional completion function if requested */ 1375 if (bp->b_flags & B_CALL) { 1376 bp->b_flags &= ~B_CALL; 1377 (*bp->b_iodone) (bp); 1378 splx(s); 1379 return; 1380 } 1381 if (bp->b_flags & B_VMIO) { 1382 int i, resid; 1383 vm_ooffset_t foff; 1384 vm_page_t m; 1385 vm_object_t obj; 1386 int iosize; 1387 struct vnode *vp = bp->b_vp; 1388 1389 if (vp->v_type == VBLK) 1390 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1391 else 1392 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1393 obj = vp->v_object; 1394 if (!obj) { 1395 panic("biodone: no object"); 1396 } 1397#if defined(VFS_BIO_DEBUG) 1398 if (obj->paging_in_progress < bp->b_npages) { 1399 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1400 obj->paging_in_progress, bp->b_npages); 1401 } 1402#endif 1403 iosize = bp->b_bufsize; 1404 for (i = 0; i < bp->b_npages; i++) { 1405 int bogusflag = 0; 1406 m = bp->b_pages[i]; 1407 if (m == bogus_page) { 1408 bogusflag = 1; 1409 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1410 if (!m) { 1411#if defined(VFS_BIO_DEBUG) 1412 printf("biodone: page disappeared\n"); 1413#endif 1414 --obj->paging_in_progress; 1415 continue; 1416 } 1417 bp->b_pages[i] = m; 1418 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1419 } 1420#if defined(VFS_BIO_DEBUG) 1421 if (OFF_TO_IDX(foff) != m->pindex) { 1422 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1423 } 1424#endif 1425 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1426 if (resid > iosize) 1427 resid = iosize; 1428 /* 1429 * In the write case, the valid and clean bits are 1430 * already changed correctly, so we only need to do this 1431 * here in the read case. 1432 */ 1433 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1434 vm_page_set_validclean(m, 1435 (vm_offset_t) (foff & (PAGE_SIZE-1)), resid); 1436 } 1437 1438 /* 1439 * when debugging new filesystems or buffer I/O methods, this 1440 * is the most common error that pops up. if you see this, you 1441 * have not set the page busy flag correctly!!! 1442 */ 1443 if (m->busy == 0) { 1444 printf("biodone: page busy < 0, " 1445 "pindex: %d, foff: 0x(%x,%x), " 1446 "resid: %d, index: %d\n", 1447 (int) m->pindex, (int)(foff >> 32), 1448 (int) foff & 0xffffffff, resid, i); 1449 if (vp->v_type != VBLK) 1450 printf(" iosize: %d, lblkno: %d, flags: 0x%lx, npages: %d\n", 1451 bp->b_vp->v_mount->mnt_stat.f_iosize, 1452 (int) bp->b_lblkno, 1453 bp->b_flags, bp->b_npages); 1454 else 1455 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1456 (int) bp->b_lblkno, 1457 bp->b_flags, bp->b_npages); 1458 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1459 m->valid, m->dirty, m->wire_count); 1460 panic("biodone: page busy < 0\n"); 1461 } 1462 --m->busy; 1463 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1464 m->flags &= ~PG_WANTED; 1465 wakeup(m); 1466 } 1467 --obj->paging_in_progress; 1468 foff += resid; 1469 iosize -= resid; 1470 } 1471 if (obj && obj->paging_in_progress == 0 && 1472 (obj->flags & OBJ_PIPWNT)) { 1473 obj->flags &= ~OBJ_PIPWNT; 1474 wakeup(obj); 1475 } 1476 } 1477 /* 1478 * For asynchronous completions, release the buffer now. The brelse 1479 * checks for B_WANTED and will do the wakeup there if necessary - so 1480 * no need to do a wakeup here in the async case. 1481 */ 1482 1483 if (bp->b_flags & B_ASYNC) { 1484 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1485 brelse(bp); 1486 else 1487 bqrelse(bp); 1488 } else { 1489 wakeup(bp); 1490 } 1491 splx(s); 1492} 1493 1494int 1495count_lock_queue() 1496{ 1497 int count; 1498 struct buf *bp; 1499 1500 count = 0; 1501 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1502 bp != NULL; 1503 bp = bp->b_freelist.tqe_next) 1504 count++; 1505 return (count); 1506} 1507 1508int vfs_update_interval = 30; 1509 1510static void 1511vfs_update() 1512{ 1513 (void) spl0(); /* XXX redundant? wrong place? */ 1514 while (1) { 1515 tsleep(&vfs_update_wakeup, PUSER, "update", 1516 hz * vfs_update_interval); 1517 vfs_update_wakeup = 0; 1518 sync(curproc, NULL, NULL); 1519 } 1520} 1521 1522static int 1523sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1524{ 1525 int error = sysctl_handle_int(oidp, 1526 oidp->oid_arg1, oidp->oid_arg2, req); 1527 if (!error) 1528 wakeup(&vfs_update_wakeup); 1529 return error; 1530} 1531 1532SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1533 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1534 1535 1536/* 1537 * This routine is called in lieu of iodone in the case of 1538 * incomplete I/O. This keeps the busy status for pages 1539 * consistant. 1540 */ 1541void 1542vfs_unbusy_pages(struct buf * bp) 1543{ 1544 int i; 1545 1546 if (bp->b_flags & B_VMIO) { 1547 struct vnode *vp = bp->b_vp; 1548 vm_object_t obj = vp->v_object; 1549 vm_ooffset_t foff; 1550 1551 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1552 1553 for (i = 0; i < bp->b_npages; i++) { 1554 vm_page_t m = bp->b_pages[i]; 1555 1556 if (m == bogus_page) { 1557 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1558 if (!m) { 1559 panic("vfs_unbusy_pages: page missing\n"); 1560 } 1561 bp->b_pages[i] = m; 1562 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1563 } 1564 --obj->paging_in_progress; 1565 --m->busy; 1566 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1567 m->flags &= ~PG_WANTED; 1568 wakeup(m); 1569 } 1570 } 1571 if (obj->paging_in_progress == 0 && 1572 (obj->flags & OBJ_PIPWNT)) { 1573 obj->flags &= ~OBJ_PIPWNT; 1574 wakeup(obj); 1575 } 1576 } 1577} 1578 1579/* 1580 * This routine is called before a device strategy routine. 1581 * It is used to tell the VM system that paging I/O is in 1582 * progress, and treat the pages associated with the buffer 1583 * almost as being PG_BUSY. Also the object paging_in_progress 1584 * flag is handled to make sure that the object doesn't become 1585 * inconsistant. 1586 */ 1587void 1588vfs_busy_pages(struct buf * bp, int clear_modify) 1589{ 1590 int i; 1591 1592 if (bp->b_flags & B_VMIO) { 1593 vm_object_t obj = bp->b_vp->v_object; 1594 vm_ooffset_t foff; 1595 int iocount = bp->b_bufsize; 1596 1597 if (bp->b_vp->v_type == VBLK) 1598 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1599 else 1600 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1601 vfs_setdirty(bp); 1602 for (i = 0; i < bp->b_npages; i++) { 1603 vm_page_t m = bp->b_pages[i]; 1604 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1605 1606 if (resid > iocount) 1607 resid = iocount; 1608 if ((bp->b_flags & B_CLUSTER) == 0) { 1609 obj->paging_in_progress++; 1610 m->busy++; 1611 } 1612 if (clear_modify) { 1613 vm_page_protect(m, VM_PROT_READ); 1614 vm_page_set_validclean(m, 1615 (vm_offset_t) (foff & (PAGE_SIZE-1)), resid); 1616 } else if (bp->b_bcount >= PAGE_SIZE) { 1617 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1618 bp->b_pages[i] = bogus_page; 1619 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1620 } 1621 } 1622 foff += resid; 1623 iocount -= resid; 1624 } 1625 } 1626} 1627 1628/* 1629 * Tell the VM system that the pages associated with this buffer 1630 * are clean. This is used for delayed writes where the data is 1631 * going to go to disk eventually without additional VM intevention. 1632 */ 1633void 1634vfs_clean_pages(struct buf * bp) 1635{ 1636 int i; 1637 1638 if (bp->b_flags & B_VMIO) { 1639 vm_ooffset_t foff; 1640 int iocount = bp->b_bufsize; 1641 1642 if (bp->b_vp->v_type == VBLK) 1643 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1644 else 1645 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1646 1647 for (i = 0; i < bp->b_npages; i++) { 1648 vm_page_t m = bp->b_pages[i]; 1649 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1650 1651 if (resid > iocount) 1652 resid = iocount; 1653 if (resid > 0) { 1654 vm_page_set_validclean(m, 1655 ((vm_offset_t) foff & (PAGE_SIZE-1)), resid); 1656 } 1657 foff += resid; 1658 iocount -= resid; 1659 } 1660 } 1661} 1662 1663void 1664vfs_bio_clrbuf(struct buf *bp) { 1665 int i; 1666 int remapbuffer = 0; 1667 if( bp->b_flags & B_VMIO) { 1668 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1669 int mask; 1670 mask = 0; 1671 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1672 mask |= (1 << (i/DEV_BSIZE)); 1673 if( bp->b_pages[0]->valid != mask) { 1674 bzero(bp->b_data, bp->b_bufsize); 1675 } 1676 bp->b_pages[0]->valid = mask; 1677 bp->b_resid = 0; 1678 return; 1679 } 1680 for(i=0;i<bp->b_npages;i++) { 1681 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1682 continue; 1683 if( bp->b_pages[i]->valid == 0) { 1684 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1685 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1686 } 1687 } else { 1688 int j; 1689 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1690 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1691 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1692 } 1693 } 1694 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 1695 } 1696 bp->b_resid = 0; 1697 } else { 1698 clrbuf(bp); 1699 } 1700 if (remapbuffer) 1701 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1702} 1703 1704/* 1705 * vm_hold_load_pages and vm_hold_unload pages get pages into 1706 * a buffers address space. The pages are anonymous and are 1707 * not associated with a file object. 1708 */ 1709void 1710vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1711{ 1712 vm_offset_t pg; 1713 vm_page_t p; 1714 int index; 1715 1716 to = round_page(to); 1717 from = round_page(from); 1718 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1719 1720 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1721 1722tryagain: 1723 1724 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1725 VM_ALLOC_NORMAL); 1726 if (!p) { 1727 VM_WAIT; 1728 goto tryagain; 1729 } 1730 vm_page_wire(p); 1731 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1732 bp->b_pages[index] = p; 1733 PAGE_WAKEUP(p); 1734 } 1735 bp->b_npages = to >> PAGE_SHIFT; 1736} 1737 1738void 1739vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1740{ 1741 vm_offset_t pg; 1742 vm_page_t p; 1743 int index; 1744 1745 from = round_page(from); 1746 to = round_page(to); 1747 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1748 1749 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1750 p = bp->b_pages[index]; 1751 if (p && (index < bp->b_npages)) { 1752 if (p->busy) { 1753 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1754 bp->b_blkno, bp->b_lblkno); 1755 } 1756 bp->b_pages[index] = NULL; 1757 pmap_kremove(pg); 1758 vm_page_unwire(p); 1759 vm_page_free(p); 1760 } 1761 } 1762 bp->b_npages = from >> PAGE_SHIFT; 1763} 1764