vfs_bio.c revision 7878
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.40 1995/04/16 05:11:14 davidg Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/proc.h> 40#include <sys/vnode.h> 41#include <vm/vm.h> 42#include <vm/vm_kern.h> 43#include <vm/vm_pageout.h> 44#include <vm/vm_page.h> 45#include <vm/vm_object.h> 46#include <sys/buf.h> 47#include <sys/mount.h> 48#include <sys/malloc.h> 49#include <sys/resourcevar.h> 50#include <sys/proc.h> 51 52#include <miscfs/specfs/specdev.h> 53 54struct buf *buf; /* buffer header pool */ 55int nbuf; /* number of buffer headers calculated 56 * elsewhere */ 57struct swqueue bswlist; 58 59void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 60void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 61void vfs_clean_pages(struct buf * bp); 62 63int needsbuffer; 64 65/* 66 * Internal update daemon, process 3 67 * The variable vfs_update_wakeup allows for internal syncs. 68 */ 69int vfs_update_wakeup; 70 71 72/* 73 * buffers base kva 74 */ 75caddr_t buffers_kva; 76 77/* 78 * bogus page -- for I/O to/from partially complete buffers 79 * this is a temporary solution to the problem, but it is not 80 * really that bad. it would be better to split the buffer 81 * for input in the case of buffers partially already in memory, 82 * but the code is intricate enough already. 83 */ 84vm_page_t bogus_page; 85vm_offset_t bogus_offset; 86 87int bufspace, maxbufspace; 88 89/* 90 * advisory minimum for size of LRU queue or VMIO queue 91 */ 92int minbuf; 93 94/* 95 * Initialize buffer headers and related structures. 96 */ 97void 98bufinit() 99{ 100 struct buf *bp; 101 int i; 102 103 TAILQ_INIT(&bswlist); 104 LIST_INIT(&invalhash); 105 106 /* first, make a null hash table */ 107 for (i = 0; i < BUFHSZ; i++) 108 LIST_INIT(&bufhashtbl[i]); 109 110 /* next, make a null set of free lists */ 111 for (i = 0; i < BUFFER_QUEUES; i++) 112 TAILQ_INIT(&bufqueues[i]); 113 114 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 115 /* finally, initialize each buffer header and stick on empty q */ 116 for (i = 0; i < nbuf; i++) { 117 bp = &buf[i]; 118 bzero(bp, sizeof *bp); 119 bp->b_flags = B_INVAL; /* we're just an empty header */ 120 bp->b_dev = NODEV; 121 bp->b_rcred = NOCRED; 122 bp->b_wcred = NOCRED; 123 bp->b_qindex = QUEUE_EMPTY; 124 bp->b_vnbufs.le_next = NOLIST; 125 bp->b_data = buffers_kva + i * MAXBSIZE; 126 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 127 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 128 } 129/* 130 * this will change later!!! 131 */ 132 minbuf = nbuf / 3; 133 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 134 135 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 136 bogus_page = vm_page_alloc(kernel_object, 137 bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL); 138 139} 140 141/* 142 * remove the buffer from the appropriate free list 143 */ 144void 145bremfree(struct buf * bp) 146{ 147 int s = splbio(); 148 149 if (bp->b_qindex != QUEUE_NONE) { 150 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 151 bp->b_qindex = QUEUE_NONE; 152 } else { 153 panic("bremfree: removing a buffer when not on a queue"); 154 } 155 splx(s); 156} 157 158/* 159 * Get a buffer with the specified data. Look in the cache first. 160 */ 161int 162bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 163 struct buf ** bpp) 164{ 165 struct buf *bp; 166 167 bp = getblk(vp, blkno, size, 0, 0); 168 *bpp = bp; 169 170 /* if not found in cache, do some I/O */ 171 if ((bp->b_flags & B_CACHE) == 0) { 172 if (curproc && curproc->p_stats) /* count block I/O */ 173 curproc->p_stats->p_ru.ru_inblock++; 174 bp->b_flags |= B_READ; 175 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 176 if (bp->b_rcred == NOCRED) { 177 if (cred != NOCRED) 178 crhold(cred); 179 bp->b_rcred = cred; 180 } 181 vfs_busy_pages(bp, 0); 182 VOP_STRATEGY(bp); 183 return (biowait(bp)); 184 } 185 return (0); 186} 187 188/* 189 * Operates like bread, but also starts asynchronous I/O on 190 * read-ahead blocks. 191 */ 192int 193breadn(struct vnode * vp, daddr_t blkno, int size, 194 daddr_t * rablkno, int *rabsize, 195 int cnt, struct ucred * cred, struct buf ** bpp) 196{ 197 struct buf *bp, *rabp; 198 int i; 199 int rv = 0, readwait = 0; 200 201 *bpp = bp = getblk(vp, blkno, size, 0, 0); 202 203 /* if not found in cache, do some I/O */ 204 if ((bp->b_flags & B_CACHE) == 0) { 205 if (curproc && curproc->p_stats) /* count block I/O */ 206 curproc->p_stats->p_ru.ru_inblock++; 207 bp->b_flags |= B_READ; 208 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 209 if (bp->b_rcred == NOCRED) { 210 if (cred != NOCRED) 211 crhold(cred); 212 bp->b_rcred = cred; 213 } 214 vfs_busy_pages(bp, 0); 215 VOP_STRATEGY(bp); 216 ++readwait; 217 } 218 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 219 if (inmem(vp, *rablkno)) 220 continue; 221 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 222 223 if ((rabp->b_flags & B_CACHE) == 0) { 224 if (curproc && curproc->p_stats) 225 curproc->p_stats->p_ru.ru_inblock++; 226 rabp->b_flags |= B_READ | B_ASYNC; 227 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 228 if (rabp->b_rcred == NOCRED) { 229 if (cred != NOCRED) 230 crhold(cred); 231 rabp->b_rcred = cred; 232 } 233 vfs_busy_pages(rabp, 0); 234 VOP_STRATEGY(rabp); 235 } else { 236 brelse(rabp); 237 } 238 } 239 240 if (readwait) { 241 rv = biowait(bp); 242 } 243 return (rv); 244} 245 246/* 247 * Write, release buffer on completion. (Done by iodone 248 * if async.) 249 */ 250int 251bwrite(struct buf * bp) 252{ 253 int oldflags = bp->b_flags; 254 255 if (bp->b_flags & B_INVAL) { 256 brelse(bp); 257 return (0); 258 } 259 if (!(bp->b_flags & B_BUSY)) 260 panic("bwrite: buffer is not busy???"); 261 262 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 263 bp->b_flags |= B_WRITEINPROG; 264 265 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 266 reassignbuf(bp, bp->b_vp); 267 } 268 269 bp->b_vp->v_numoutput++; 270 vfs_busy_pages(bp, 1); 271 curproc->p_stats->p_ru.ru_oublock++; 272 VOP_STRATEGY(bp); 273 274 if ((oldflags & B_ASYNC) == 0) { 275 int rtval = biowait(bp); 276 277 if (oldflags & B_DELWRI) { 278 reassignbuf(bp, bp->b_vp); 279 } 280 brelse(bp); 281 return (rtval); 282 } 283 return (0); 284} 285 286int 287vn_bwrite(ap) 288 struct vop_bwrite_args *ap; 289{ 290 return (bwrite(ap->a_bp)); 291} 292 293/* 294 * Delayed write. (Buffer is marked dirty). 295 */ 296void 297bdwrite(struct buf * bp) 298{ 299 300 if ((bp->b_flags & B_BUSY) == 0) { 301 panic("bdwrite: buffer is not busy"); 302 } 303 if (bp->b_flags & B_INVAL) { 304 brelse(bp); 305 return; 306 } 307 if (bp->b_flags & B_TAPE) { 308 bawrite(bp); 309 return; 310 } 311 bp->b_flags &= ~(B_READ|B_RELBUF); 312 if ((bp->b_flags & B_DELWRI) == 0) { 313 bp->b_flags |= B_DONE | B_DELWRI; 314 reassignbuf(bp, bp->b_vp); 315 } 316 if( bp->b_lblkno == bp->b_blkno) { 317 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); 318 } 319 vfs_clean_pages(bp); 320 brelse(bp); 321 return; 322} 323 324/* 325 * Asynchronous write. 326 * Start output on a buffer, but do not wait for it to complete. 327 * The buffer is released when the output completes. 328 */ 329void 330bawrite(struct buf * bp) 331{ 332 bp->b_flags |= B_ASYNC; 333 (void) VOP_BWRITE(bp); 334} 335 336/* 337 * Release a buffer. 338 */ 339void 340brelse(struct buf * bp) 341{ 342 int s; 343 344 if (bp->b_flags & B_CLUSTER) { 345 relpbuf(bp); 346 return; 347 } 348 /* anyone need a "free" block? */ 349 s = splbio(); 350 351 if (needsbuffer) { 352 needsbuffer = 0; 353 wakeup((caddr_t) &needsbuffer); 354 } 355 356 /* anyone need this block? */ 357 if (bp->b_flags & B_WANTED) { 358 bp->b_flags &= ~B_WANTED | B_AGE; 359 wakeup((caddr_t) bp); 360 } else if (bp->b_flags & B_VMIO) { 361 bp->b_flags &= ~B_WANTED; 362 wakeup((caddr_t) bp); 363 } 364 if (bp->b_flags & B_LOCKED) 365 bp->b_flags &= ~B_ERROR; 366 367 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 368 (bp->b_bufsize <= 0)) { 369 bp->b_flags |= B_INVAL; 370 bp->b_flags &= ~(B_DELWRI | B_CACHE); 371 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 372 brelvp(bp); 373 } 374 375 /* 376 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 377 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 378 * but the VM object is kept around. The B_NOCACHE flag is used to 379 * invalidate the pages in the VM object. 380 */ 381 if (bp->b_flags & B_VMIO) { 382 vm_offset_t foff; 383 vm_object_t obj; 384 int i, resid; 385 vm_page_t m; 386 int iototal = bp->b_bufsize; 387 388 foff = 0; 389 obj = 0; 390 if (bp->b_npages) { 391 if (bp->b_vp && bp->b_vp->v_mount) { 392 foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 393 } else { 394 /* 395 * vnode pointer has been ripped away -- 396 * probably file gone... 397 */ 398 foff = bp->b_pages[0]->offset; 399 } 400 } 401 for (i = 0; i < bp->b_npages; i++) { 402 m = bp->b_pages[i]; 403 if (m == bogus_page) { 404 m = vm_page_lookup(obj, foff); 405 if (!m) { 406 panic("brelse: page missing\n"); 407 } 408 bp->b_pages[i] = m; 409 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 410 } 411 resid = (m->offset + PAGE_SIZE) - foff; 412 if (resid > iototal) 413 resid = iototal; 414 if (resid > 0) { 415 if (bp->b_flags & (B_ERROR | B_NOCACHE)) { 416 vm_page_set_invalid(m, foff, resid); 417 if (m->valid == 0) 418 vm_page_protect(m, VM_PROT_NONE); 419 } 420 } 421 foff += resid; 422 iototal -= resid; 423 } 424 425 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_RELBUF)) { 426 for(i=0;i<bp->b_npages;i++) { 427 m = bp->b_pages[i]; 428 --m->bmapped; 429 if (m->bmapped == 0) { 430 vm_page_test_dirty(m); 431 if(m->flags & PG_WANTED) { 432 wakeup((caddr_t) m); 433 m->flags &= ~PG_WANTED; 434 } 435 if ((m->dirty & m->valid) == 0 && 436 (m->flags & PG_REFERENCED) == 0 && 437 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) 438 vm_page_cache(m); 439 else if ((m->flags & PG_ACTIVE) == 0) { 440 vm_page_activate(m); 441 m->act_count = 0; 442 } 443 } 444 } 445 bufspace -= bp->b_bufsize; 446 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 447 bp->b_npages = 0; 448 bp->b_bufsize = 0; 449 bp->b_flags &= ~B_VMIO; 450 if (bp->b_vp) 451 brelvp(bp); 452 } 453 } 454 if (bp->b_qindex != QUEUE_NONE) 455 panic("brelse: free buffer onto another queue???"); 456 457 /* enqueue */ 458 /* buffers with no memory */ 459 if (bp->b_bufsize == 0) { 460 bp->b_qindex = QUEUE_EMPTY; 461 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 462 LIST_REMOVE(bp, b_hash); 463 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 464 bp->b_dev = NODEV; 465 /* buffers with junk contents */ 466 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 467 bp->b_qindex = QUEUE_AGE; 468 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 469 LIST_REMOVE(bp, b_hash); 470 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 471 bp->b_dev = NODEV; 472 /* buffers that are locked */ 473 } else if (bp->b_flags & B_LOCKED) { 474 bp->b_qindex = QUEUE_LOCKED; 475 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 476 /* buffers with stale but valid contents */ 477 } else if (bp->b_flags & B_AGE) { 478 bp->b_qindex = QUEUE_AGE; 479 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 480 /* buffers with valid and quite potentially reuseable contents */ 481 } else { 482 bp->b_qindex = QUEUE_LRU; 483 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 484 } 485 486 /* unlock */ 487 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 488 splx(s); 489} 490 491/* 492 * this routine implements clustered async writes for 493 * clearing out B_DELWRI buffers... This is much better 494 * than the old way of writing only one buffer at a time. 495 */ 496void 497vfs_bio_awrite(struct buf * bp) 498{ 499 int i; 500 daddr_t lblkno = bp->b_lblkno; 501 struct vnode *vp = bp->b_vp; 502 int s; 503 int ncl; 504 struct buf *bpa; 505 506 s = splbio(); 507 if( vp->v_mount && (vp->v_flag & VVMIO) && 508 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 509 int size = vp->v_mount->mnt_stat.f_iosize; 510 511 for (i = 1; i < MAXPHYS / size; i++) { 512 if ((bpa = incore(vp, lblkno + i)) && 513 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) && 514 (bpa->b_bufsize == size)) { 515 if ((bpa->b_blkno == bpa->b_lblkno) || 516 (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE)) 517 break; 518 } else { 519 break; 520 } 521 } 522 ncl = i; 523 /* 524 * this is a possible cluster write 525 */ 526 if (ncl != 1) { 527 bremfree(bp); 528 cluster_wbuild(vp, bp, size, lblkno, ncl, -1); 529 splx(s); 530 return; 531 } 532 } 533 /* 534 * default (old) behavior, writing out only one block 535 */ 536 bremfree(bp); 537 bp->b_flags |= B_BUSY | B_ASYNC; 538 (void) VOP_BWRITE(bp); 539 splx(s); 540} 541 542 543/* 544 * Find a buffer header which is available for use. 545 */ 546static struct buf * 547getnewbuf(int slpflag, int slptimeo, int doingvmio) 548{ 549 struct buf *bp; 550 int s; 551 int firstbp = 1; 552 553 s = splbio(); 554start: 555 if (bufspace >= maxbufspace) 556 goto trytofreespace; 557 558 /* can we constitute a new buffer? */ 559 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 560 if (bp->b_qindex != QUEUE_EMPTY) 561 panic("getnewbuf: inconsistent EMPTY queue"); 562 bremfree(bp); 563 goto fillbuf; 564 } 565trytofreespace: 566 /* 567 * We keep the file I/O from hogging metadata I/O 568 * This is desirable because file data is cached in the 569 * VM/Buffer cache even if a buffer is freed. 570 */ 571 if ((bp = bufqueues[QUEUE_AGE].tqh_first)) { 572 if (bp->b_qindex != QUEUE_AGE) 573 panic("getnewbuf: inconsistent AGE queue"); 574 } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) { 575 if (bp->b_qindex != QUEUE_LRU) 576 panic("getnewbuf: inconsistent LRU queue"); 577 } 578 if (!bp) { 579 /* wait for a free buffer of any kind */ 580 needsbuffer = 1; 581 tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo); 582 splx(s); 583 return (0); 584 } 585 586 /* if we are a delayed write, convert to an async write */ 587 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 588 vfs_bio_awrite(bp); 589 if (!slpflag && !slptimeo) { 590 splx(s); 591 return (0); 592 } 593 goto start; 594 } 595 596 if (bp->b_flags & B_WANTED) { 597 bp->b_flags &= ~B_WANTED; 598 wakeup((caddr_t) bp); 599 } 600 bremfree(bp); 601 602 if (bp->b_flags & B_VMIO) { 603 bp->b_flags |= B_RELBUF | B_BUSY | B_DONE; 604 brelse(bp); 605 bremfree(bp); 606 } 607 608 if (bp->b_vp) 609 brelvp(bp); 610 611 /* we are not free, nor do we contain interesting data */ 612 if (bp->b_rcred != NOCRED) 613 crfree(bp->b_rcred); 614 if (bp->b_wcred != NOCRED) 615 crfree(bp->b_wcred); 616fillbuf: 617 bp->b_flags |= B_BUSY; 618 LIST_REMOVE(bp, b_hash); 619 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 620 splx(s); 621 if (bp->b_bufsize) { 622 allocbuf(bp, 0); 623 } 624 bp->b_flags = B_BUSY; 625 bp->b_dev = NODEV; 626 bp->b_vp = NULL; 627 bp->b_blkno = bp->b_lblkno = 0; 628 bp->b_iodone = 0; 629 bp->b_error = 0; 630 bp->b_resid = 0; 631 bp->b_bcount = 0; 632 bp->b_npages = 0; 633 bp->b_wcred = bp->b_rcred = NOCRED; 634 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 635 bp->b_dirtyoff = bp->b_dirtyend = 0; 636 bp->b_validoff = bp->b_validend = 0; 637 if (bufspace >= maxbufspace) { 638 s = splbio(); 639 bp->b_flags |= B_INVAL; 640 brelse(bp); 641 goto trytofreespace; 642 } 643 return (bp); 644} 645 646/* 647 * Check to see if a block is currently memory resident. 648 */ 649struct buf * 650incore(struct vnode * vp, daddr_t blkno) 651{ 652 struct buf *bp; 653 struct bufhashhdr *bh; 654 655 int s = splbio(); 656 657 bh = BUFHASH(vp, blkno); 658 bp = bh->lh_first; 659 660 /* Search hash chain */ 661 while (bp) { 662 /* hit */ 663 if (bp->b_lblkno == blkno && bp->b_vp == vp 664 && (bp->b_flags & B_INVAL) == 0) { 665 splx(s); 666 return (bp); 667 } 668 bp = bp->b_hash.le_next; 669 } 670 splx(s); 671 672 return (0); 673} 674 675/* 676 * Returns true if no I/O is needed to access the 677 * associated VM object. This is like incore except 678 * it also hunts around in the VM system for the data. 679 */ 680 681int 682inmem(struct vnode * vp, daddr_t blkno) 683{ 684 vm_object_t obj; 685 vm_offset_t off, toff, tinc; 686 vm_page_t m; 687 688 if (incore(vp, blkno)) 689 return 1; 690 if (vp->v_mount == 0) 691 return 0; 692 if ((vp->v_vmdata == 0) || (vp->v_flag & VVMIO) == 0) 693 return 0; 694 695 obj = (vm_object_t) vp->v_vmdata; 696 tinc = PAGE_SIZE; 697 if (tinc > vp->v_mount->mnt_stat.f_iosize) 698 tinc = vp->v_mount->mnt_stat.f_iosize; 699 off = blkno * vp->v_mount->mnt_stat.f_iosize; 700 701 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 702 int mask; 703 704 m = vm_page_lookup(obj, trunc_page(toff + off)); 705 if (!m) 706 return 0; 707 if (vm_page_is_valid(m, toff + off, tinc) == 0) 708 return 0; 709 } 710 return 1; 711} 712 713/* 714 * Get a block given a specified block and offset into a file/device. 715 */ 716struct buf * 717getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 718{ 719 struct buf *bp; 720 int s; 721 struct bufhashhdr *bh; 722 vm_offset_t off; 723 int nleft; 724 725 s = splbio(); 726loop: 727 if (bp = incore(vp, blkno)) { 728 if (bp->b_flags & B_BUSY) { 729 bp->b_flags |= B_WANTED; 730 if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo)) 731 goto loop; 732 733 splx(s); 734 return (struct buf *) NULL; 735 } 736 bp->b_flags |= B_BUSY | B_CACHE; 737 bremfree(bp); 738 /* 739 * check for size inconsistancies 740 */ 741 if (bp->b_bcount != size) { 742#if defined(VFS_BIO_DEBUG) 743 printf("getblk: invalid buffer size: %ld\n", bp->b_bcount); 744#endif 745 bp->b_flags |= B_NOCACHE; 746 (void) VOP_BWRITE(bp); 747 goto loop; 748 } 749 splx(s); 750 return (bp); 751 } else { 752 vm_object_t obj; 753 int doingvmio; 754 755 if ((obj = (vm_object_t) vp->v_vmdata) && (vp->v_flag & VVMIO)) { 756 doingvmio = 1; 757 } else { 758 doingvmio = 0; 759 } 760 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 761 if (slpflag || slptimeo) 762 return NULL; 763 goto loop; 764 } 765 766 /* 767 * This code is used to make sure that a buffer is not 768 * created while the getnewbuf routine is blocked. 769 * Normally the vnode is locked so this isn't a problem. 770 * VBLK type I/O requests, however, don't lock the vnode. 771 * VOP_ISLOCKED would be much better but is also much 772 * slower. 773 */ 774 if ((vp->v_type == VBLK) && incore(vp, blkno)) { 775 bp->b_flags |= B_INVAL; 776 brelse(bp); 777 goto loop; 778 } 779 780 /* 781 * Insert the buffer into the hash, so that it can 782 * be found by incore. 783 */ 784 bp->b_blkno = bp->b_lblkno = blkno; 785 bgetvp(vp, bp); 786 LIST_REMOVE(bp, b_hash); 787 bh = BUFHASH(vp, blkno); 788 LIST_INSERT_HEAD(bh, bp, b_hash); 789 790 if (doingvmio) { 791 bp->b_flags |= (B_VMIO | B_CACHE); 792#if defined(VFS_BIO_DEBUG) 793 if (vp->v_type != VREG) 794 printf("getblk: vmioing file type %d???\n", vp->v_type); 795#endif 796 } else { 797 bp->b_flags &= ~B_VMIO; 798 } 799 splx(s); 800 801 allocbuf(bp, size); 802 return (bp); 803 } 804} 805 806/* 807 * Get an empty, disassociated buffer of given size. 808 */ 809struct buf * 810geteblk(int size) 811{ 812 struct buf *bp; 813 814 while ((bp = getnewbuf(0, 0, 0)) == 0); 815 allocbuf(bp, size); 816 bp->b_flags |= B_INVAL; 817 return (bp); 818} 819 820/* 821 * This code constitutes the buffer memory from either anonymous system 822 * memory (in the case of non-VMIO operations) or from an associated 823 * VM object (in the case of VMIO operations). 824 * 825 * Note that this code is tricky, and has many complications to resolve 826 * deadlock or inconsistant data situations. Tread lightly!!! 827 * 828 * Modify the length of a buffer's underlying buffer storage without 829 * destroying information (unless, of course the buffer is shrinking). 830 */ 831int 832allocbuf(struct buf * bp, int size) 833{ 834 835 int s; 836 int newbsize, mbsize; 837 int i; 838 839 if ((bp->b_flags & B_VMIO) == 0) { 840 /* 841 * Just get anonymous memory from the kernel 842 */ 843 mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 844 newbsize = round_page(size); 845 846 if (newbsize == bp->b_bufsize) { 847 bp->b_bcount = size; 848 return 1; 849 } else if (newbsize < bp->b_bufsize) { 850 vm_hold_free_pages( 851 bp, 852 (vm_offset_t) bp->b_data + newbsize, 853 (vm_offset_t) bp->b_data + bp->b_bufsize); 854 bufspace -= (bp->b_bufsize - newbsize); 855 } else if (newbsize > bp->b_bufsize) { 856 vm_hold_load_pages( 857 bp, 858 (vm_offset_t) bp->b_data + bp->b_bufsize, 859 (vm_offset_t) bp->b_data + newbsize); 860 bufspace += (newbsize - bp->b_bufsize); 861 } 862 } else { 863 vm_page_t m; 864 int desiredpages; 865 866 newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 867 desiredpages = round_page(newbsize) / PAGE_SIZE; 868 869 if (newbsize == bp->b_bufsize) { 870 bp->b_bcount = size; 871 return 1; 872 } else if (newbsize < bp->b_bufsize) { 873 if (desiredpages < bp->b_npages) { 874 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 875 desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages)); 876 for (i = desiredpages; i < bp->b_npages; i++) { 877 m = bp->b_pages[i]; 878 s = splhigh(); 879 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 880 m->flags |= PG_WANTED; 881 tsleep(m, PVM, "biodep", 0); 882 } 883 splx(s); 884 885 if (m->bmapped == 0) { 886 printf("allocbuf: bmapped is zero for page %d\n", i); 887 panic("allocbuf: error"); 888 } 889 --m->bmapped; 890 if (m->bmapped == 0) { 891 vm_page_protect(m, VM_PROT_NONE); 892 vm_page_free(m); 893 } 894 bp->b_pages[i] = NULL; 895 } 896 bp->b_npages = desiredpages; 897 bufspace -= (bp->b_bufsize - newbsize); 898 } 899 } else { 900 vm_object_t obj; 901 vm_offset_t tinc, off, toff, objoff; 902 int pageindex, curbpnpages; 903 struct vnode *vp; 904 int bsize; 905 906 vp = bp->b_vp; 907 bsize = vp->v_mount->mnt_stat.f_iosize; 908 909 if (bp->b_npages < desiredpages) { 910 obj = (vm_object_t) vp->v_vmdata; 911 tinc = PAGE_SIZE; 912 if (tinc > bsize) 913 tinc = bsize; 914 off = bp->b_lblkno * bsize; 915 curbpnpages = bp->b_npages; 916 doretry: 917 bp->b_flags |= B_CACHE; 918 for (toff = 0; toff < newbsize; toff += tinc) { 919 int mask; 920 int bytesinpage; 921 922 pageindex = toff / PAGE_SIZE; 923 objoff = trunc_page(toff + off); 924 if (pageindex < curbpnpages) { 925 int pb; 926 927 m = bp->b_pages[pageindex]; 928 if (m->offset != objoff) 929 panic("allocbuf: page changed offset??!!!?"); 930 bytesinpage = tinc; 931 if (tinc > (newbsize - toff)) 932 bytesinpage = newbsize - toff; 933 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 934 bp->b_flags &= ~B_CACHE; 935 } 936 if ((m->flags & PG_ACTIVE) == 0) { 937 vm_page_activate(m); 938 m->act_count = 0; 939 } 940 continue; 941 } 942 m = vm_page_lookup(obj, objoff); 943 if (!m) { 944 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 945 if (!m) { 946 int j; 947 948 for (j = bp->b_npages; j < pageindex; j++) { 949 PAGE_WAKEUP(bp->b_pages[j]); 950 } 951 VM_WAIT; 952 curbpnpages = bp->b_npages; 953 goto doretry; 954 } 955 vm_page_activate(m); 956 m->act_count = 0; 957 m->valid = 0; 958 } else if (m->flags & PG_BUSY) { 959 int j; 960 961 for (j = bp->b_npages; j < pageindex; j++) { 962 PAGE_WAKEUP(bp->b_pages[j]); 963 } 964 965 s = splbio(); 966 m->flags |= PG_WANTED; 967 tsleep(m, PRIBIO, "pgtblk", 0); 968 splx(s); 969 970 curbpnpages = bp->b_npages; 971 goto doretry; 972 } else { 973 int pb; 974 if ((curproc != pageproc) && 975 (m->flags & PG_CACHE) && 976 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 977 pagedaemon_wakeup(); 978 } 979 bytesinpage = tinc; 980 if (tinc > (newbsize - toff)) 981 bytesinpage = newbsize - toff; 982 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 983 bp->b_flags &= ~B_CACHE; 984 } 985 if ((m->flags & PG_ACTIVE) == 0) { 986 vm_page_activate(m); 987 m->act_count = 0; 988 } 989 m->flags |= PG_BUSY; 990 } 991 bp->b_pages[pageindex] = m; 992 curbpnpages = pageindex + 1; 993 } 994 if (bsize >= PAGE_SIZE) { 995 for (i = bp->b_npages; i < curbpnpages; i++) { 996 m = bp->b_pages[i]; 997 if (m->valid == 0) { 998 bp->b_flags &= ~B_CACHE; 999 } 1000 m->bmapped++; 1001 PAGE_WAKEUP(m); 1002 } 1003 } else { 1004 if (!vm_page_is_valid(bp->b_pages[0], off, bsize)) 1005 bp->b_flags &= ~B_CACHE; 1006 bp->b_pages[0]->bmapped++; 1007 PAGE_WAKEUP(bp->b_pages[0]); 1008 } 1009 bp->b_npages = curbpnpages; 1010 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1011 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1012 bp->b_data += off % PAGE_SIZE; 1013 } 1014 bufspace += (newbsize - bp->b_bufsize); 1015 } 1016 } 1017 bp->b_bufsize = newbsize; 1018 bp->b_bcount = size; 1019 return 1; 1020} 1021 1022/* 1023 * Wait for buffer I/O completion, returning error status. 1024 */ 1025int 1026biowait(register struct buf * bp) 1027{ 1028 int s; 1029 1030 s = splbio(); 1031 while ((bp->b_flags & B_DONE) == 0) 1032 tsleep((caddr_t) bp, PRIBIO, "biowait", 0); 1033 splx(s); 1034 if (bp->b_flags & B_EINTR) { 1035 bp->b_flags &= ~B_EINTR; 1036 return (EINTR); 1037 } 1038 if (bp->b_flags & B_ERROR) { 1039 return (bp->b_error ? bp->b_error : EIO); 1040 } else { 1041 return (0); 1042 } 1043} 1044 1045/* 1046 * Finish I/O on a buffer, calling an optional function. 1047 * This is usually called from interrupt level, so process blocking 1048 * is not *a good idea*. 1049 */ 1050void 1051biodone(register struct buf * bp) 1052{ 1053 int s; 1054 1055 s = splbio(); 1056 if (bp->b_flags & B_DONE) { 1057 splx(s); 1058 printf("biodone: buffer already done\n"); 1059 return; 1060 } 1061 bp->b_flags |= B_DONE; 1062 1063 if ((bp->b_flags & B_READ) == 0) { 1064 struct vnode *vp = bp->b_vp; 1065 vwakeup(bp); 1066 } 1067#ifdef BOUNCE_BUFFERS 1068 if (bp->b_flags & B_BOUNCE) 1069 vm_bounce_free(bp); 1070#endif 1071 1072 /* call optional completion function if requested */ 1073 if (bp->b_flags & B_CALL) { 1074 bp->b_flags &= ~B_CALL; 1075 (*bp->b_iodone) (bp); 1076 splx(s); 1077 return; 1078 } 1079 if (bp->b_flags & B_VMIO) { 1080 int i, resid; 1081 vm_offset_t foff; 1082 vm_page_t m; 1083 vm_object_t obj; 1084 int iosize; 1085 struct vnode *vp = bp->b_vp; 1086 1087 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1088 obj = (vm_object_t) vp->v_vmdata; 1089 if (!obj) { 1090 return; 1091 } 1092#if defined(VFS_BIO_DEBUG) 1093 if (obj->paging_in_progress < bp->b_npages) { 1094 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1095 obj->paging_in_progress, bp->b_npages); 1096 } 1097#endif 1098 iosize = bp->b_bufsize; 1099 for (i = 0; i < bp->b_npages; i++) { 1100 int bogusflag = 0; 1101 m = bp->b_pages[i]; 1102 if (m == bogus_page) { 1103 bogusflag = 1; 1104 m = vm_page_lookup(obj, foff); 1105 if (!m) { 1106#if defined(VFS_BIO_DEBUG) 1107 printf("biodone: page disappeared\n"); 1108#endif 1109 --obj->paging_in_progress; 1110 continue; 1111 } 1112 bp->b_pages[i] = m; 1113 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1114 } 1115#if defined(VFS_BIO_DEBUG) 1116 if (trunc_page(foff) != m->offset) { 1117 printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset); 1118 } 1119#endif 1120 resid = (m->offset + PAGE_SIZE) - foff; 1121 if (resid > iosize) 1122 resid = iosize; 1123 if (!bogusflag && resid > 0) { 1124 vm_page_set_valid(m, foff & (PAGE_SIZE-1), resid); 1125 vm_page_set_clean(m, foff & (PAGE_SIZE-1), resid); 1126 } 1127 1128 /* 1129 * when debugging new filesystems or buffer I/O methods, this 1130 * is the most common error that pops up. if you see this, you 1131 * have not set the page busy flag correctly!!! 1132 */ 1133 if (m->busy == 0) { 1134 printf("biodone: page busy < 0, off: %d, foff: %d, resid: %d, index: %d\n", 1135 m->offset, foff, resid, i); 1136 printf(" iosize: %d, lblkno: %d\n", 1137 bp->b_vp->v_mount->mnt_stat.f_iosize, bp->b_lblkno); 1138 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1139 m->valid, m->dirty, m->bmapped); 1140 panic("biodone: page busy < 0\n"); 1141 } 1142 --m->busy; 1143 if( (m->busy == 0) && (m->flags & PG_WANTED)) 1144 wakeup((caddr_t) m); 1145 --obj->paging_in_progress; 1146 foff += resid; 1147 iosize -= resid; 1148 } 1149 if (obj && obj->paging_in_progress == 0 && 1150 (obj->flags & OBJ_PIPWNT)) { 1151 obj->flags &= ~OBJ_PIPWNT; 1152 wakeup((caddr_t) obj); 1153 } 1154 } 1155 /* 1156 * For asynchronous completions, release the buffer now. The brelse 1157 * checks for B_WANTED and will do the wakeup there if necessary - so 1158 * no need to do a wakeup here in the async case. 1159 */ 1160 1161 if (bp->b_flags & B_ASYNC) { 1162 brelse(bp); 1163 } else { 1164 bp->b_flags &= ~B_WANTED; 1165 wakeup((caddr_t) bp); 1166 } 1167 splx(s); 1168} 1169 1170int 1171count_lock_queue() 1172{ 1173 int count; 1174 struct buf *bp; 1175 1176 count = 0; 1177 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1178 bp != NULL; 1179 bp = bp->b_freelist.tqe_next) 1180 count++; 1181 return (count); 1182} 1183 1184int vfs_update_interval = 30; 1185 1186void 1187vfs_update() 1188{ 1189 (void) spl0(); 1190 while (1) { 1191 tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update", 1192 hz * vfs_update_interval); 1193 vfs_update_wakeup = 0; 1194 sync(curproc, NULL, NULL); 1195 } 1196} 1197 1198/* 1199 * This routine is called in lieu of iodone in the case of 1200 * incomplete I/O. This keeps the busy status for pages 1201 * consistant. 1202 */ 1203void 1204vfs_unbusy_pages(struct buf * bp) 1205{ 1206 int i; 1207 1208 if (bp->b_flags & B_VMIO) { 1209 struct vnode *vp = bp->b_vp; 1210 vm_object_t obj = (vm_object_t) vp->v_vmdata; 1211 vm_offset_t foff; 1212 1213 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1214 1215 for (i = 0; i < bp->b_npages; i++) { 1216 vm_page_t m = bp->b_pages[i]; 1217 1218 if (m == bogus_page) { 1219 m = vm_page_lookup(obj, foff); 1220 if (!m) { 1221 panic("vfs_unbusy_pages: page missing\n"); 1222 } 1223 bp->b_pages[i] = m; 1224 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1225 } 1226 --obj->paging_in_progress; 1227 --m->busy; 1228 if( (m->busy == 0) && (m->flags & PG_WANTED)) 1229 wakeup((caddr_t) m); 1230 } 1231 if (obj->paging_in_progress == 0 && 1232 (obj->flags & OBJ_PIPWNT)) { 1233 obj->flags &= ~OBJ_PIPWNT; 1234 wakeup((caddr_t) obj); 1235 } 1236 } 1237} 1238 1239/* 1240 * This routine is called before a device strategy routine. 1241 * It is used to tell the VM system that paging I/O is in 1242 * progress, and treat the pages associated with the buffer 1243 * almost as being PG_BUSY. Also the object paging_in_progress 1244 * flag is handled to make sure that the object doesn't become 1245 * inconsistant. 1246 */ 1247void 1248vfs_busy_pages(struct buf * bp, int clear_modify) 1249{ 1250 int i; 1251 1252 if (bp->b_flags & B_VMIO) { 1253 vm_object_t obj = (vm_object_t) bp->b_vp->v_vmdata; 1254 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1255 int iocount = bp->b_bufsize; 1256 1257 for (i = 0; i < bp->b_npages; i++) { 1258 vm_page_t m = bp->b_pages[i]; 1259 int resid = (m->offset + PAGE_SIZE) - foff; 1260 1261 if (resid > iocount) 1262 resid = iocount; 1263 obj->paging_in_progress++; 1264 m->busy++; 1265 if (clear_modify) { 1266 vm_page_protect(m, VM_PROT_READ); 1267 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 1268 m->flags &= ~PG_REFERENCED; 1269 vm_page_set_valid(m, 1270 foff & (PAGE_SIZE-1), resid); 1271 vm_page_set_clean(m, 1272 foff & (PAGE_SIZE-1), resid); 1273 } else if (bp->b_bcount >= PAGE_SIZE) { 1274 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1275 bp->b_pages[i] = bogus_page; 1276 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1277 } 1278 } 1279 foff += resid; 1280 iocount -= resid; 1281 } 1282 } 1283} 1284 1285/* 1286 * Tell the VM system that the pages associated with this buffer 1287 * are dirty. This is in case of the unlikely circumstance that 1288 * a buffer has to be destroyed before it is flushed. 1289 */ 1290void 1291vfs_clean_pages(struct buf * bp) 1292{ 1293 int i; 1294 1295 if (bp->b_flags & B_VMIO) { 1296 vm_offset_t foff = 1297 bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1298 int iocount = bp->b_bufsize; 1299 1300 for (i = 0; i < bp->b_npages; i++) { 1301 vm_page_t m = bp->b_pages[i]; 1302 int resid = (m->offset + PAGE_SIZE) - foff; 1303 1304 if (resid > iocount) 1305 resid = iocount; 1306 if (resid > 0) { 1307 vm_page_set_valid(m, 1308 foff & (PAGE_SIZE-1), resid); 1309 vm_page_set_clean(m, 1310 foff & (PAGE_SIZE-1), resid); 1311 } 1312 foff += resid; 1313 iocount -= resid; 1314 } 1315 } 1316} 1317 1318void 1319vfs_bio_clrbuf(struct buf *bp) { 1320 int i; 1321 if( bp->b_flags & B_VMIO) { 1322 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1323 int j; 1324 if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) { 1325 for(j=0; j < bp->b_bufsize / DEV_BSIZE;j++) { 1326 bzero(bp->b_data + j * DEV_BSIZE, DEV_BSIZE); 1327 } 1328 } 1329 bp->b_resid = 0; 1330 return; 1331 } 1332 for(i=0;i<bp->b_npages;i++) { 1333 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1334 continue; 1335 if( bp->b_pages[i]->valid == 0) { 1336 bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE); 1337 } else { 1338 int j; 1339 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1340 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1341 bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE); 1342 } 1343 } 1344 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 1345 } 1346 bp->b_resid = 0; 1347 } else { 1348 clrbuf(bp); 1349 } 1350} 1351 1352/* 1353 * vm_hold_load_pages and vm_hold_unload pages get pages into 1354 * a buffers address space. The pages are anonymous and are 1355 * not associated with a file object. 1356 */ 1357void 1358vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1359{ 1360 vm_offset_t pg; 1361 vm_page_t p; 1362 vm_offset_t from = round_page(froma); 1363 vm_offset_t to = round_page(toa); 1364 1365 for (pg = from; pg < to; pg += PAGE_SIZE) { 1366 1367tryagain: 1368 1369 p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS, 1370 VM_ALLOC_NORMAL); 1371 if (!p) { 1372 VM_WAIT; 1373 goto tryagain; 1374 } 1375 vm_page_wire(p); 1376 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1377 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p; 1378 PAGE_WAKEUP(p); 1379 bp->b_npages++; 1380 } 1381} 1382 1383void 1384vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1385{ 1386 vm_offset_t pg; 1387 vm_page_t p; 1388 vm_offset_t from = round_page(froma); 1389 vm_offset_t to = round_page(toa); 1390 1391 for (pg = from; pg < to; pg += PAGE_SIZE) { 1392 p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE]; 1393 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0; 1394 pmap_kremove(pg); 1395 vm_page_free(p); 1396 --bp->b_npages; 1397 } 1398} 1399 1400void 1401bufstats() 1402{ 1403} 1404