vfs_bio.c revision 8456
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.43 1995/04/30 05:09:13 davidg Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/proc.h> 40#include <sys/vnode.h> 41#include <vm/vm.h> 42#include <vm/vm_kern.h> 43#include <vm/vm_pageout.h> 44#include <vm/vm_page.h> 45#include <vm/vm_object.h> 46#include <sys/buf.h> 47#include <sys/mount.h> 48#include <sys/malloc.h> 49#include <sys/resourcevar.h> 50#include <sys/proc.h> 51 52#include <miscfs/specfs/specdev.h> 53 54struct buf *buf; /* buffer header pool */ 55int nbuf; /* number of buffer headers calculated 56 * elsewhere */ 57struct swqueue bswlist; 58 59void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 60void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 61void vfs_clean_pages(struct buf * bp); 62 63int needsbuffer; 64 65/* 66 * Internal update daemon, process 3 67 * The variable vfs_update_wakeup allows for internal syncs. 68 */ 69int vfs_update_wakeup; 70 71 72/* 73 * buffers base kva 74 */ 75caddr_t buffers_kva; 76 77/* 78 * bogus page -- for I/O to/from partially complete buffers 79 * this is a temporary solution to the problem, but it is not 80 * really that bad. it would be better to split the buffer 81 * for input in the case of buffers partially already in memory, 82 * but the code is intricate enough already. 83 */ 84vm_page_t bogus_page; 85vm_offset_t bogus_offset; 86 87int bufspace, maxbufspace; 88 89/* 90 * advisory minimum for size of LRU queue or VMIO queue 91 */ 92int minbuf; 93 94/* 95 * Initialize buffer headers and related structures. 96 */ 97void 98bufinit() 99{ 100 struct buf *bp; 101 int i; 102 103 TAILQ_INIT(&bswlist); 104 LIST_INIT(&invalhash); 105 106 /* first, make a null hash table */ 107 for (i = 0; i < BUFHSZ; i++) 108 LIST_INIT(&bufhashtbl[i]); 109 110 /* next, make a null set of free lists */ 111 for (i = 0; i < BUFFER_QUEUES; i++) 112 TAILQ_INIT(&bufqueues[i]); 113 114 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 115 /* finally, initialize each buffer header and stick on empty q */ 116 for (i = 0; i < nbuf; i++) { 117 bp = &buf[i]; 118 bzero(bp, sizeof *bp); 119 bp->b_flags = B_INVAL; /* we're just an empty header */ 120 bp->b_dev = NODEV; 121 bp->b_rcred = NOCRED; 122 bp->b_wcred = NOCRED; 123 bp->b_qindex = QUEUE_EMPTY; 124 bp->b_vnbufs.le_next = NOLIST; 125 bp->b_data = buffers_kva + i * MAXBSIZE; 126 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 127 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 128 } 129/* 130 * this will change later!!! 131 */ 132 minbuf = nbuf / 3; 133 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 134 135 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 136 bogus_page = vm_page_alloc(kernel_object, 137 bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL); 138 139} 140 141/* 142 * remove the buffer from the appropriate free list 143 */ 144void 145bremfree(struct buf * bp) 146{ 147 int s = splbio(); 148 149 if (bp->b_qindex != QUEUE_NONE) { 150 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 151 bp->b_qindex = QUEUE_NONE; 152 } else { 153 panic("bremfree: removing a buffer when not on a queue"); 154 } 155 splx(s); 156} 157 158/* 159 * Get a buffer with the specified data. Look in the cache first. 160 */ 161int 162bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 163 struct buf ** bpp) 164{ 165 struct buf *bp; 166 167 bp = getblk(vp, blkno, size, 0, 0); 168 *bpp = bp; 169 170 /* if not found in cache, do some I/O */ 171 if ((bp->b_flags & B_CACHE) == 0) { 172 if (curproc != NULL) 173 curproc->p_stats->p_ru.ru_inblock++; 174 bp->b_flags |= B_READ; 175 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 176 if (bp->b_rcred == NOCRED) { 177 if (cred != NOCRED) 178 crhold(cred); 179 bp->b_rcred = cred; 180 } 181 vfs_busy_pages(bp, 0); 182 VOP_STRATEGY(bp); 183 return (biowait(bp)); 184 } 185 return (0); 186} 187 188/* 189 * Operates like bread, but also starts asynchronous I/O on 190 * read-ahead blocks. 191 */ 192int 193breadn(struct vnode * vp, daddr_t blkno, int size, 194 daddr_t * rablkno, int *rabsize, 195 int cnt, struct ucred * cred, struct buf ** bpp) 196{ 197 struct buf *bp, *rabp; 198 int i; 199 int rv = 0, readwait = 0; 200 201 *bpp = bp = getblk(vp, blkno, size, 0, 0); 202 203 /* if not found in cache, do some I/O */ 204 if ((bp->b_flags & B_CACHE) == 0) { 205 if (curproc != NULL) 206 curproc->p_stats->p_ru.ru_inblock++; 207 bp->b_flags |= B_READ; 208 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 209 if (bp->b_rcred == NOCRED) { 210 if (cred != NOCRED) 211 crhold(cred); 212 bp->b_rcred = cred; 213 } 214 vfs_busy_pages(bp, 0); 215 VOP_STRATEGY(bp); 216 ++readwait; 217 } 218 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 219 if (inmem(vp, *rablkno)) 220 continue; 221 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 222 223 if ((rabp->b_flags & B_CACHE) == 0) { 224 if (curproc != NULL) 225 curproc->p_stats->p_ru.ru_inblock++; 226 rabp->b_flags |= B_READ | B_ASYNC; 227 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 228 if (rabp->b_rcred == NOCRED) { 229 if (cred != NOCRED) 230 crhold(cred); 231 rabp->b_rcred = cred; 232 } 233 vfs_busy_pages(rabp, 0); 234 VOP_STRATEGY(rabp); 235 } else { 236 brelse(rabp); 237 } 238 } 239 240 if (readwait) { 241 rv = biowait(bp); 242 } 243 return (rv); 244} 245 246/* 247 * Write, release buffer on completion. (Done by iodone 248 * if async.) 249 */ 250int 251bwrite(struct buf * bp) 252{ 253 int oldflags = bp->b_flags; 254 255 if (bp->b_flags & B_INVAL) { 256 brelse(bp); 257 return (0); 258 } 259 if (!(bp->b_flags & B_BUSY)) 260 panic("bwrite: buffer is not busy???"); 261 262 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 263 bp->b_flags |= B_WRITEINPROG; 264 265 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 266 reassignbuf(bp, bp->b_vp); 267 } 268 269 bp->b_vp->v_numoutput++; 270 vfs_busy_pages(bp, 1); 271 if (curproc != NULL) 272 curproc->p_stats->p_ru.ru_oublock++; 273 VOP_STRATEGY(bp); 274 275 if ((oldflags & B_ASYNC) == 0) { 276 int rtval = biowait(bp); 277 278 if (oldflags & B_DELWRI) { 279 reassignbuf(bp, bp->b_vp); 280 } 281 brelse(bp); 282 return (rtval); 283 } 284 return (0); 285} 286 287int 288vn_bwrite(ap) 289 struct vop_bwrite_args *ap; 290{ 291 return (bwrite(ap->a_bp)); 292} 293 294/* 295 * Delayed write. (Buffer is marked dirty). 296 */ 297void 298bdwrite(struct buf * bp) 299{ 300 301 if ((bp->b_flags & B_BUSY) == 0) { 302 panic("bdwrite: buffer is not busy"); 303 } 304 if (bp->b_flags & B_INVAL) { 305 brelse(bp); 306 return; 307 } 308 if (bp->b_flags & B_TAPE) { 309 bawrite(bp); 310 return; 311 } 312 bp->b_flags &= ~(B_READ|B_RELBUF); 313 if ((bp->b_flags & B_DELWRI) == 0) { 314 bp->b_flags |= B_DONE | B_DELWRI; 315 reassignbuf(bp, bp->b_vp); 316 } 317 if( bp->b_lblkno == bp->b_blkno) { 318 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); 319 } 320 vfs_clean_pages(bp); 321 brelse(bp); 322 return; 323} 324 325/* 326 * Asynchronous write. 327 * Start output on a buffer, but do not wait for it to complete. 328 * The buffer is released when the output completes. 329 */ 330void 331bawrite(struct buf * bp) 332{ 333 bp->b_flags |= B_ASYNC; 334 (void) VOP_BWRITE(bp); 335} 336 337/* 338 * Release a buffer. 339 */ 340void 341brelse(struct buf * bp) 342{ 343 int s; 344 345 if (bp->b_flags & B_CLUSTER) { 346 relpbuf(bp); 347 return; 348 } 349 /* anyone need a "free" block? */ 350 s = splbio(); 351 352 if (needsbuffer) { 353 needsbuffer = 0; 354 wakeup((caddr_t) &needsbuffer); 355 } 356 357 /* anyone need this block? */ 358 if (bp->b_flags & B_WANTED) { 359 bp->b_flags &= ~B_WANTED | B_AGE; 360 wakeup((caddr_t) bp); 361 } else if (bp->b_flags & B_VMIO) { 362 bp->b_flags &= ~B_WANTED; 363 wakeup((caddr_t) bp); 364 } 365 if (bp->b_flags & B_LOCKED) 366 bp->b_flags &= ~B_ERROR; 367 368 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 369 (bp->b_bufsize <= 0)) { 370 bp->b_flags |= B_INVAL; 371 bp->b_flags &= ~(B_DELWRI | B_CACHE); 372 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 373 brelvp(bp); 374 } 375 376 /* 377 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 378 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 379 * but the VM object is kept around. The B_NOCACHE flag is used to 380 * invalidate the pages in the VM object. 381 */ 382 if (bp->b_flags & B_VMIO) { 383 vm_offset_t foff; 384 vm_object_t obj; 385 int i, resid; 386 vm_page_t m; 387 int iototal = bp->b_bufsize; 388 389 foff = 0; 390 obj = 0; 391 if (bp->b_npages) { 392 if (bp->b_vp && bp->b_vp->v_mount) { 393 foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 394 } else { 395 /* 396 * vnode pointer has been ripped away -- 397 * probably file gone... 398 */ 399 foff = bp->b_pages[0]->offset; 400 } 401 } 402 for (i = 0; i < bp->b_npages; i++) { 403 m = bp->b_pages[i]; 404 if (m == bogus_page) { 405 m = vm_page_lookup(obj, foff); 406 if (!m) { 407 panic("brelse: page missing\n"); 408 } 409 bp->b_pages[i] = m; 410 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 411 } 412 resid = (m->offset + PAGE_SIZE) - foff; 413 if (resid > iototal) 414 resid = iototal; 415 if (resid > 0) { 416 if (bp->b_flags & (B_ERROR | B_NOCACHE)) { 417 vm_page_set_invalid(m, foff, resid); 418 if (m->valid == 0) 419 vm_page_protect(m, VM_PROT_NONE); 420 } 421 } 422 foff += resid; 423 iototal -= resid; 424 } 425 426 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_RELBUF)) { 427 for(i=0;i<bp->b_npages;i++) { 428 m = bp->b_pages[i]; 429 --m->bmapped; 430 if (m->bmapped == 0) { 431 vm_page_test_dirty(m); 432 if(m->flags & PG_WANTED) { 433 wakeup((caddr_t) m); 434 m->flags &= ~PG_WANTED; 435 } 436 if ((m->dirty & m->valid) == 0 && 437 (m->flags & PG_REFERENCED) == 0 && 438 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) 439 vm_page_cache(m); 440 else if ((m->flags & PG_ACTIVE) == 0) { 441 vm_page_activate(m); 442 m->act_count = 0; 443 } 444 } 445 } 446 bufspace -= bp->b_bufsize; 447 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 448 bp->b_npages = 0; 449 bp->b_bufsize = 0; 450 bp->b_flags &= ~B_VMIO; 451 if (bp->b_vp) 452 brelvp(bp); 453 } 454 } 455 if (bp->b_qindex != QUEUE_NONE) 456 panic("brelse: free buffer onto another queue???"); 457 458 /* enqueue */ 459 /* buffers with no memory */ 460 if (bp->b_bufsize == 0) { 461 bp->b_qindex = QUEUE_EMPTY; 462 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 463 LIST_REMOVE(bp, b_hash); 464 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 465 bp->b_dev = NODEV; 466 /* buffers with junk contents */ 467 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 468 bp->b_qindex = QUEUE_AGE; 469 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 470 LIST_REMOVE(bp, b_hash); 471 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 472 bp->b_dev = NODEV; 473 /* buffers that are locked */ 474 } else if (bp->b_flags & B_LOCKED) { 475 bp->b_qindex = QUEUE_LOCKED; 476 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 477 /* buffers with stale but valid contents */ 478 } else if (bp->b_flags & B_AGE) { 479 bp->b_qindex = QUEUE_AGE; 480 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 481 /* buffers with valid and quite potentially reuseable contents */ 482 } else { 483 bp->b_qindex = QUEUE_LRU; 484 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 485 } 486 487 /* unlock */ 488 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 489 splx(s); 490} 491 492/* 493 * this routine implements clustered async writes for 494 * clearing out B_DELWRI buffers... This is much better 495 * than the old way of writing only one buffer at a time. 496 */ 497void 498vfs_bio_awrite(struct buf * bp) 499{ 500 int i; 501 daddr_t lblkno = bp->b_lblkno; 502 struct vnode *vp = bp->b_vp; 503 int s; 504 int ncl; 505 struct buf *bpa; 506 507 s = splbio(); 508 if( vp->v_mount && (vp->v_flag & VVMIO) && 509 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 510 int size = vp->v_mount->mnt_stat.f_iosize; 511 512 for (i = 1; i < MAXPHYS / size; i++) { 513 if ((bpa = incore(vp, lblkno + i)) && 514 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) && 515 (bpa->b_bufsize == size)) { 516 if ((bpa->b_blkno == bpa->b_lblkno) || 517 (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE)) 518 break; 519 } else { 520 break; 521 } 522 } 523 ncl = i; 524 /* 525 * this is a possible cluster write 526 */ 527 if (ncl != 1) { 528 bremfree(bp); 529 cluster_wbuild(vp, bp, size, lblkno, ncl, -1); 530 splx(s); 531 return; 532 } 533 } 534 /* 535 * default (old) behavior, writing out only one block 536 */ 537 bremfree(bp); 538 bp->b_flags |= B_BUSY | B_ASYNC; 539 (void) VOP_BWRITE(bp); 540 splx(s); 541} 542 543 544/* 545 * Find a buffer header which is available for use. 546 */ 547static struct buf * 548getnewbuf(int slpflag, int slptimeo, int doingvmio) 549{ 550 struct buf *bp; 551 int s; 552 int firstbp = 1; 553 554 s = splbio(); 555start: 556 if (bufspace >= maxbufspace) 557 goto trytofreespace; 558 559 /* can we constitute a new buffer? */ 560 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 561 if (bp->b_qindex != QUEUE_EMPTY) 562 panic("getnewbuf: inconsistent EMPTY queue"); 563 bremfree(bp); 564 goto fillbuf; 565 } 566trytofreespace: 567 /* 568 * We keep the file I/O from hogging metadata I/O 569 * This is desirable because file data is cached in the 570 * VM/Buffer cache even if a buffer is freed. 571 */ 572 if ((bp = bufqueues[QUEUE_AGE].tqh_first)) { 573 if (bp->b_qindex != QUEUE_AGE) 574 panic("getnewbuf: inconsistent AGE queue"); 575 } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) { 576 if (bp->b_qindex != QUEUE_LRU) 577 panic("getnewbuf: inconsistent LRU queue"); 578 } 579 if (!bp) { 580 /* wait for a free buffer of any kind */ 581 needsbuffer = 1; 582 tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo); 583 splx(s); 584 return (0); 585 } 586 587 /* if we are a delayed write, convert to an async write */ 588 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 589 vfs_bio_awrite(bp); 590 if (!slpflag && !slptimeo) { 591 splx(s); 592 return (0); 593 } 594 goto start; 595 } 596 597 if (bp->b_flags & B_WANTED) { 598 bp->b_flags &= ~B_WANTED; 599 wakeup((caddr_t) bp); 600 } 601 bremfree(bp); 602 603 if (bp->b_flags & B_VMIO) { 604 bp->b_flags |= B_RELBUF | B_BUSY | B_DONE; 605 brelse(bp); 606 bremfree(bp); 607 } 608 609 if (bp->b_vp) 610 brelvp(bp); 611 612 /* we are not free, nor do we contain interesting data */ 613 if (bp->b_rcred != NOCRED) 614 crfree(bp->b_rcred); 615 if (bp->b_wcred != NOCRED) 616 crfree(bp->b_wcred); 617fillbuf: 618 bp->b_flags |= B_BUSY; 619 LIST_REMOVE(bp, b_hash); 620 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 621 splx(s); 622 if (bp->b_bufsize) { 623 allocbuf(bp, 0); 624 } 625 bp->b_flags = B_BUSY; 626 bp->b_dev = NODEV; 627 bp->b_vp = NULL; 628 bp->b_blkno = bp->b_lblkno = 0; 629 bp->b_iodone = 0; 630 bp->b_error = 0; 631 bp->b_resid = 0; 632 bp->b_bcount = 0; 633 bp->b_npages = 0; 634 bp->b_wcred = bp->b_rcred = NOCRED; 635 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 636 bp->b_dirtyoff = bp->b_dirtyend = 0; 637 bp->b_validoff = bp->b_validend = 0; 638 if (bufspace >= maxbufspace) { 639 s = splbio(); 640 bp->b_flags |= B_INVAL; 641 brelse(bp); 642 goto trytofreespace; 643 } 644 return (bp); 645} 646 647/* 648 * Check to see if a block is currently memory resident. 649 */ 650struct buf * 651incore(struct vnode * vp, daddr_t blkno) 652{ 653 struct buf *bp; 654 struct bufhashhdr *bh; 655 656 int s = splbio(); 657 658 bh = BUFHASH(vp, blkno); 659 bp = bh->lh_first; 660 661 /* Search hash chain */ 662 while (bp) { 663 /* hit */ 664 if (bp->b_lblkno == blkno && bp->b_vp == vp 665 && (bp->b_flags & B_INVAL) == 0) { 666 splx(s); 667 return (bp); 668 } 669 bp = bp->b_hash.le_next; 670 } 671 splx(s); 672 673 return (0); 674} 675 676/* 677 * Returns true if no I/O is needed to access the 678 * associated VM object. This is like incore except 679 * it also hunts around in the VM system for the data. 680 */ 681 682int 683inmem(struct vnode * vp, daddr_t blkno) 684{ 685 vm_object_t obj; 686 vm_offset_t off, toff, tinc; 687 vm_page_t m; 688 689 if (incore(vp, blkno)) 690 return 1; 691 if (vp->v_mount == 0) 692 return 0; 693 if ((vp->v_vmdata == 0) || (vp->v_flag & VVMIO) == 0) 694 return 0; 695 696 obj = (vm_object_t) vp->v_vmdata; 697 tinc = PAGE_SIZE; 698 if (tinc > vp->v_mount->mnt_stat.f_iosize) 699 tinc = vp->v_mount->mnt_stat.f_iosize; 700 off = blkno * vp->v_mount->mnt_stat.f_iosize; 701 702 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 703 int mask; 704 705 m = vm_page_lookup(obj, trunc_page(toff + off)); 706 if (!m) 707 return 0; 708 if (vm_page_is_valid(m, toff + off, tinc) == 0) 709 return 0; 710 } 711 return 1; 712} 713 714/* 715 * Get a block given a specified block and offset into a file/device. 716 */ 717struct buf * 718getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 719{ 720 struct buf *bp; 721 int s; 722 struct bufhashhdr *bh; 723 vm_offset_t off; 724 int nleft; 725 726 s = splbio(); 727loop: 728 if (bp = incore(vp, blkno)) { 729 if (bp->b_flags & B_BUSY) { 730 bp->b_flags |= B_WANTED; 731 if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo)) 732 goto loop; 733 734 splx(s); 735 return (struct buf *) NULL; 736 } 737 bp->b_flags |= B_BUSY | B_CACHE; 738 bremfree(bp); 739 /* 740 * check for size inconsistancies 741 */ 742 if (bp->b_bcount != size) { 743#if defined(VFS_BIO_DEBUG) 744 printf("getblk: invalid buffer size: %ld\n", bp->b_bcount); 745#endif 746 bp->b_flags |= B_NOCACHE; 747 (void) VOP_BWRITE(bp); 748 goto loop; 749 } 750 splx(s); 751 return (bp); 752 } else { 753 vm_object_t obj; 754 int doingvmio; 755 756 if ((obj = (vm_object_t) vp->v_vmdata) && (vp->v_flag & VVMIO)) { 757 doingvmio = 1; 758 } else { 759 doingvmio = 0; 760 } 761 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 762 if (slpflag || slptimeo) 763 return NULL; 764 goto loop; 765 } 766 767 /* 768 * This code is used to make sure that a buffer is not 769 * created while the getnewbuf routine is blocked. 770 * Normally the vnode is locked so this isn't a problem. 771 * VBLK type I/O requests, however, don't lock the vnode. 772 * VOP_ISLOCKED would be much better but is also much 773 * slower. 774 */ 775 if ((vp->v_type == VBLK) && incore(vp, blkno)) { 776 bp->b_flags |= B_INVAL; 777 brelse(bp); 778 goto loop; 779 } 780 781 /* 782 * Insert the buffer into the hash, so that it can 783 * be found by incore. 784 */ 785 bp->b_blkno = bp->b_lblkno = blkno; 786 bgetvp(vp, bp); 787 LIST_REMOVE(bp, b_hash); 788 bh = BUFHASH(vp, blkno); 789 LIST_INSERT_HEAD(bh, bp, b_hash); 790 791 if (doingvmio) { 792 bp->b_flags |= (B_VMIO | B_CACHE); 793#if defined(VFS_BIO_DEBUG) 794 if (vp->v_type != VREG) 795 printf("getblk: vmioing file type %d???\n", vp->v_type); 796#endif 797 } else { 798 bp->b_flags &= ~B_VMIO; 799 } 800 splx(s); 801 802 allocbuf(bp, size); 803 return (bp); 804 } 805} 806 807/* 808 * Get an empty, disassociated buffer of given size. 809 */ 810struct buf * 811geteblk(int size) 812{ 813 struct buf *bp; 814 815 while ((bp = getnewbuf(0, 0, 0)) == 0); 816 allocbuf(bp, size); 817 bp->b_flags |= B_INVAL; 818 return (bp); 819} 820 821/* 822 * This code constitutes the buffer memory from either anonymous system 823 * memory (in the case of non-VMIO operations) or from an associated 824 * VM object (in the case of VMIO operations). 825 * 826 * Note that this code is tricky, and has many complications to resolve 827 * deadlock or inconsistant data situations. Tread lightly!!! 828 * 829 * Modify the length of a buffer's underlying buffer storage without 830 * destroying information (unless, of course the buffer is shrinking). 831 */ 832int 833allocbuf(struct buf * bp, int size) 834{ 835 836 int s; 837 int newbsize, mbsize; 838 int i; 839 840 if ((bp->b_flags & B_VMIO) == 0) { 841 /* 842 * Just get anonymous memory from the kernel 843 */ 844 mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 845 newbsize = round_page(size); 846 847 if (newbsize == bp->b_bufsize) { 848 bp->b_bcount = size; 849 return 1; 850 } else if (newbsize < bp->b_bufsize) { 851 vm_hold_free_pages( 852 bp, 853 (vm_offset_t) bp->b_data + newbsize, 854 (vm_offset_t) bp->b_data + bp->b_bufsize); 855 bufspace -= (bp->b_bufsize - newbsize); 856 } else if (newbsize > bp->b_bufsize) { 857 vm_hold_load_pages( 858 bp, 859 (vm_offset_t) bp->b_data + bp->b_bufsize, 860 (vm_offset_t) bp->b_data + newbsize); 861 bufspace += (newbsize - bp->b_bufsize); 862 } 863 } else { 864 vm_page_t m; 865 int desiredpages; 866 867 newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 868 desiredpages = round_page(newbsize) / PAGE_SIZE; 869 870 if (newbsize == bp->b_bufsize) { 871 bp->b_bcount = size; 872 return 1; 873 } else if (newbsize < bp->b_bufsize) { 874 if (desiredpages < bp->b_npages) { 875 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 876 desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages)); 877 for (i = desiredpages; i < bp->b_npages; i++) { 878 m = bp->b_pages[i]; 879 s = splhigh(); 880 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 881 m->flags |= PG_WANTED; 882 tsleep(m, PVM, "biodep", 0); 883 } 884 splx(s); 885 886 if (m->bmapped == 0) { 887 printf("allocbuf: bmapped is zero for page %d\n", i); 888 panic("allocbuf: error"); 889 } 890 --m->bmapped; 891 if (m->bmapped == 0) { 892 vm_page_protect(m, VM_PROT_NONE); 893 vm_page_free(m); 894 } 895 bp->b_pages[i] = NULL; 896 } 897 bp->b_npages = desiredpages; 898 bufspace -= (bp->b_bufsize - newbsize); 899 } 900 } else { 901 vm_object_t obj; 902 vm_offset_t tinc, off, toff, objoff; 903 int pageindex, curbpnpages; 904 struct vnode *vp; 905 int bsize; 906 907 vp = bp->b_vp; 908 bsize = vp->v_mount->mnt_stat.f_iosize; 909 910 if (bp->b_npages < desiredpages) { 911 obj = (vm_object_t) vp->v_vmdata; 912 tinc = PAGE_SIZE; 913 if (tinc > bsize) 914 tinc = bsize; 915 off = bp->b_lblkno * bsize; 916 curbpnpages = bp->b_npages; 917 doretry: 918 bp->b_flags |= B_CACHE; 919 for (toff = 0; toff < newbsize; toff += tinc) { 920 int mask; 921 int bytesinpage; 922 923 pageindex = toff / PAGE_SIZE; 924 objoff = trunc_page(toff + off); 925 if (pageindex < curbpnpages) { 926 int pb; 927 928 m = bp->b_pages[pageindex]; 929 if (m->offset != objoff) 930 panic("allocbuf: page changed offset??!!!?"); 931 bytesinpage = tinc; 932 if (tinc > (newbsize - toff)) 933 bytesinpage = newbsize - toff; 934 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 935 bp->b_flags &= ~B_CACHE; 936 } 937 if ((m->flags & PG_ACTIVE) == 0) { 938 vm_page_activate(m); 939 m->act_count = 0; 940 } 941 continue; 942 } 943 m = vm_page_lookup(obj, objoff); 944 if (!m) { 945 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 946 if (!m) { 947 int j; 948 949 for (j = bp->b_npages; j < pageindex; j++) { 950 PAGE_WAKEUP(bp->b_pages[j]); 951 } 952 VM_WAIT; 953 curbpnpages = bp->b_npages; 954 goto doretry; 955 } 956 vm_page_activate(m); 957 m->act_count = 0; 958 m->valid = 0; 959 } else if (m->flags & PG_BUSY) { 960 int j; 961 962 for (j = bp->b_npages; j < pageindex; j++) { 963 PAGE_WAKEUP(bp->b_pages[j]); 964 } 965 966 s = splbio(); 967 m->flags |= PG_WANTED; 968 tsleep(m, PRIBIO, "pgtblk", 0); 969 splx(s); 970 971 curbpnpages = bp->b_npages; 972 goto doretry; 973 } else { 974 int pb; 975 if ((curproc != pageproc) && 976 (m->flags & PG_CACHE) && 977 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 978 pagedaemon_wakeup(); 979 } 980 bytesinpage = tinc; 981 if (tinc > (newbsize - toff)) 982 bytesinpage = newbsize - toff; 983 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 984 bp->b_flags &= ~B_CACHE; 985 } 986 if ((m->flags & PG_ACTIVE) == 0) { 987 vm_page_activate(m); 988 m->act_count = 0; 989 } 990 m->flags |= PG_BUSY; 991 } 992 bp->b_pages[pageindex] = m; 993 curbpnpages = pageindex + 1; 994 } 995 if (bsize >= PAGE_SIZE) { 996 for (i = bp->b_npages; i < curbpnpages; i++) { 997 m = bp->b_pages[i]; 998 if (m->valid == 0) { 999 bp->b_flags &= ~B_CACHE; 1000 } 1001 m->bmapped++; 1002 PAGE_WAKEUP(m); 1003 } 1004 } else { 1005 if (!vm_page_is_valid(bp->b_pages[0], off, bsize)) 1006 bp->b_flags &= ~B_CACHE; 1007 bp->b_pages[0]->bmapped++; 1008 PAGE_WAKEUP(bp->b_pages[0]); 1009 } 1010 bp->b_npages = curbpnpages; 1011 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1012 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1013 bp->b_data += off % PAGE_SIZE; 1014 } 1015 bufspace += (newbsize - bp->b_bufsize); 1016 } 1017 } 1018 bp->b_bufsize = newbsize; 1019 bp->b_bcount = size; 1020 return 1; 1021} 1022 1023/* 1024 * Wait for buffer I/O completion, returning error status. 1025 */ 1026int 1027biowait(register struct buf * bp) 1028{ 1029 int s; 1030 1031 s = splbio(); 1032 while ((bp->b_flags & B_DONE) == 0) 1033 tsleep((caddr_t) bp, PRIBIO, "biowait", 0); 1034 splx(s); 1035 if (bp->b_flags & B_EINTR) { 1036 bp->b_flags &= ~B_EINTR; 1037 return (EINTR); 1038 } 1039 if (bp->b_flags & B_ERROR) { 1040 return (bp->b_error ? bp->b_error : EIO); 1041 } else { 1042 return (0); 1043 } 1044} 1045 1046/* 1047 * Finish I/O on a buffer, calling an optional function. 1048 * This is usually called from interrupt level, so process blocking 1049 * is not *a good idea*. 1050 */ 1051void 1052biodone(register struct buf * bp) 1053{ 1054 int s; 1055 1056 s = splbio(); 1057 if (bp->b_flags & B_DONE) { 1058 splx(s); 1059 printf("biodone: buffer already done\n"); 1060 return; 1061 } 1062 bp->b_flags |= B_DONE; 1063 1064 if ((bp->b_flags & B_READ) == 0) { 1065 struct vnode *vp = bp->b_vp; 1066 vwakeup(bp); 1067 } 1068#ifdef BOUNCE_BUFFERS 1069 if (bp->b_flags & B_BOUNCE) 1070 vm_bounce_free(bp); 1071#endif 1072 1073 /* call optional completion function if requested */ 1074 if (bp->b_flags & B_CALL) { 1075 bp->b_flags &= ~B_CALL; 1076 (*bp->b_iodone) (bp); 1077 splx(s); 1078 return; 1079 } 1080 if (bp->b_flags & B_VMIO) { 1081 int i, resid; 1082 vm_offset_t foff; 1083 vm_page_t m; 1084 vm_object_t obj; 1085 int iosize; 1086 struct vnode *vp = bp->b_vp; 1087 1088 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1089 obj = (vm_object_t) vp->v_vmdata; 1090 if (!obj) { 1091 return; 1092 } 1093#if defined(VFS_BIO_DEBUG) 1094 if (obj->paging_in_progress < bp->b_npages) { 1095 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1096 obj->paging_in_progress, bp->b_npages); 1097 } 1098#endif 1099 iosize = bp->b_bufsize; 1100 for (i = 0; i < bp->b_npages; i++) { 1101 int bogusflag = 0; 1102 m = bp->b_pages[i]; 1103 if (m == bogus_page) { 1104 bogusflag = 1; 1105 m = vm_page_lookup(obj, foff); 1106 if (!m) { 1107#if defined(VFS_BIO_DEBUG) 1108 printf("biodone: page disappeared\n"); 1109#endif 1110 --obj->paging_in_progress; 1111 continue; 1112 } 1113 bp->b_pages[i] = m; 1114 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1115 } 1116#if defined(VFS_BIO_DEBUG) 1117 if (trunc_page(foff) != m->offset) { 1118 printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset); 1119 } 1120#endif 1121 resid = (m->offset + PAGE_SIZE) - foff; 1122 if (resid > iosize) 1123 resid = iosize; 1124 if (!bogusflag && resid > 0) { 1125 vm_page_set_valid(m, foff & (PAGE_SIZE-1), resid); 1126 vm_page_set_clean(m, foff & (PAGE_SIZE-1), resid); 1127 } 1128 1129 /* 1130 * when debugging new filesystems or buffer I/O methods, this 1131 * is the most common error that pops up. if you see this, you 1132 * have not set the page busy flag correctly!!! 1133 */ 1134 if (m->busy == 0) { 1135 printf("biodone: page busy < 0, " 1136 "off: %ld, foff: %ld, " 1137 "resid: %d, index: %d\n", 1138 m->offset, foff, resid, i); 1139 printf(" iosize: %ld, lblkno: %ld\n", 1140 bp->b_vp->v_mount->mnt_stat.f_iosize, 1141 bp->b_lblkno); 1142 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1143 m->valid, m->dirty, m->bmapped); 1144 panic("biodone: page busy < 0\n"); 1145 } 1146 --m->busy; 1147 if( (m->busy == 0) && (m->flags & PG_WANTED)) 1148 wakeup((caddr_t) m); 1149 --obj->paging_in_progress; 1150 foff += resid; 1151 iosize -= resid; 1152 } 1153 if (obj && obj->paging_in_progress == 0 && 1154 (obj->flags & OBJ_PIPWNT)) { 1155 obj->flags &= ~OBJ_PIPWNT; 1156 wakeup((caddr_t) obj); 1157 } 1158 } 1159 /* 1160 * For asynchronous completions, release the buffer now. The brelse 1161 * checks for B_WANTED and will do the wakeup there if necessary - so 1162 * no need to do a wakeup here in the async case. 1163 */ 1164 1165 if (bp->b_flags & B_ASYNC) { 1166 brelse(bp); 1167 } else { 1168 bp->b_flags &= ~B_WANTED; 1169 wakeup((caddr_t) bp); 1170 } 1171 splx(s); 1172} 1173 1174int 1175count_lock_queue() 1176{ 1177 int count; 1178 struct buf *bp; 1179 1180 count = 0; 1181 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1182 bp != NULL; 1183 bp = bp->b_freelist.tqe_next) 1184 count++; 1185 return (count); 1186} 1187 1188int vfs_update_interval = 30; 1189 1190void 1191vfs_update() 1192{ 1193 (void) spl0(); 1194 while (1) { 1195 tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update", 1196 hz * vfs_update_interval); 1197 vfs_update_wakeup = 0; 1198 sync(curproc, NULL, NULL); 1199 } 1200} 1201 1202/* 1203 * This routine is called in lieu of iodone in the case of 1204 * incomplete I/O. This keeps the busy status for pages 1205 * consistant. 1206 */ 1207void 1208vfs_unbusy_pages(struct buf * bp) 1209{ 1210 int i; 1211 1212 if (bp->b_flags & B_VMIO) { 1213 struct vnode *vp = bp->b_vp; 1214 vm_object_t obj = (vm_object_t) vp->v_vmdata; 1215 vm_offset_t foff; 1216 1217 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1218 1219 for (i = 0; i < bp->b_npages; i++) { 1220 vm_page_t m = bp->b_pages[i]; 1221 1222 if (m == bogus_page) { 1223 m = vm_page_lookup(obj, foff); 1224 if (!m) { 1225 panic("vfs_unbusy_pages: page missing\n"); 1226 } 1227 bp->b_pages[i] = m; 1228 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1229 } 1230 --obj->paging_in_progress; 1231 --m->busy; 1232 if( (m->busy == 0) && (m->flags & PG_WANTED)) 1233 wakeup((caddr_t) m); 1234 } 1235 if (obj->paging_in_progress == 0 && 1236 (obj->flags & OBJ_PIPWNT)) { 1237 obj->flags &= ~OBJ_PIPWNT; 1238 wakeup((caddr_t) obj); 1239 } 1240 } 1241} 1242 1243/* 1244 * This routine is called before a device strategy routine. 1245 * It is used to tell the VM system that paging I/O is in 1246 * progress, and treat the pages associated with the buffer 1247 * almost as being PG_BUSY. Also the object paging_in_progress 1248 * flag is handled to make sure that the object doesn't become 1249 * inconsistant. 1250 */ 1251void 1252vfs_busy_pages(struct buf * bp, int clear_modify) 1253{ 1254 int i; 1255 1256 if (bp->b_flags & B_VMIO) { 1257 vm_object_t obj = (vm_object_t) bp->b_vp->v_vmdata; 1258 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1259 int iocount = bp->b_bufsize; 1260 1261 for (i = 0; i < bp->b_npages; i++) { 1262 vm_page_t m = bp->b_pages[i]; 1263 int resid = (m->offset + PAGE_SIZE) - foff; 1264 1265 if (resid > iocount) 1266 resid = iocount; 1267 obj->paging_in_progress++; 1268 m->busy++; 1269 if (clear_modify) { 1270 vm_page_protect(m, VM_PROT_READ); 1271 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 1272 m->flags &= ~PG_REFERENCED; 1273 vm_page_set_valid(m, 1274 foff & (PAGE_SIZE-1), resid); 1275 vm_page_set_clean(m, 1276 foff & (PAGE_SIZE-1), resid); 1277 } else if (bp->b_bcount >= PAGE_SIZE) { 1278 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1279 bp->b_pages[i] = bogus_page; 1280 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1281 } 1282 } 1283 foff += resid; 1284 iocount -= resid; 1285 } 1286 } 1287} 1288 1289/* 1290 * Tell the VM system that the pages associated with this buffer 1291 * are dirty. This is in case of the unlikely circumstance that 1292 * a buffer has to be destroyed before it is flushed. 1293 */ 1294void 1295vfs_clean_pages(struct buf * bp) 1296{ 1297 int i; 1298 1299 if (bp->b_flags & B_VMIO) { 1300 vm_offset_t foff = 1301 bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1302 int iocount = bp->b_bufsize; 1303 1304 for (i = 0; i < bp->b_npages; i++) { 1305 vm_page_t m = bp->b_pages[i]; 1306 int resid = (m->offset + PAGE_SIZE) - foff; 1307 1308 if (resid > iocount) 1309 resid = iocount; 1310 if (resid > 0) { 1311 vm_page_set_valid(m, 1312 foff & (PAGE_SIZE-1), resid); 1313 vm_page_set_clean(m, 1314 foff & (PAGE_SIZE-1), resid); 1315 } 1316 foff += resid; 1317 iocount -= resid; 1318 } 1319 } 1320} 1321 1322void 1323vfs_bio_clrbuf(struct buf *bp) { 1324 int i; 1325 if( bp->b_flags & B_VMIO) { 1326 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1327 int j; 1328 if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) { 1329 for(j=0; j < bp->b_bufsize / DEV_BSIZE;j++) { 1330 bzero(bp->b_data + j * DEV_BSIZE, DEV_BSIZE); 1331 } 1332 } 1333 bp->b_resid = 0; 1334 return; 1335 } 1336 for(i=0;i<bp->b_npages;i++) { 1337 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1338 continue; 1339 if( bp->b_pages[i]->valid == 0) { 1340 bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE); 1341 } else { 1342 int j; 1343 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1344 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1345 bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE); 1346 } 1347 } 1348 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 1349 } 1350 bp->b_resid = 0; 1351 } else { 1352 clrbuf(bp); 1353 } 1354} 1355 1356/* 1357 * vm_hold_load_pages and vm_hold_unload pages get pages into 1358 * a buffers address space. The pages are anonymous and are 1359 * not associated with a file object. 1360 */ 1361void 1362vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1363{ 1364 vm_offset_t pg; 1365 vm_page_t p; 1366 vm_offset_t from = round_page(froma); 1367 vm_offset_t to = round_page(toa); 1368 1369 for (pg = from; pg < to; pg += PAGE_SIZE) { 1370 1371tryagain: 1372 1373 p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS, 1374 VM_ALLOC_NORMAL); 1375 if (!p) { 1376 VM_WAIT; 1377 goto tryagain; 1378 } 1379 vm_page_wire(p); 1380 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1381 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p; 1382 PAGE_WAKEUP(p); 1383 bp->b_npages++; 1384 } 1385} 1386 1387void 1388vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1389{ 1390 vm_offset_t pg; 1391 vm_page_t p; 1392 vm_offset_t from = round_page(froma); 1393 vm_offset_t to = round_page(toa); 1394 1395 for (pg = from; pg < to; pg += PAGE_SIZE) { 1396 p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE]; 1397 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0; 1398 pmap_kremove(pg); 1399 vm_page_free(p); 1400 --bp->b_npages; 1401 } 1402} 1403