vfs_bio.c revision 6884
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.33 1995/03/03 22:13:00 davidg Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/proc.h> 40#include <sys/vnode.h> 41#include <vm/vm.h> 42#include <vm/vm_pageout.h> 43#include <vm/vm_page.h> 44#include <vm/vm_object.h> 45#include <sys/buf.h> 46#include <sys/mount.h> 47#include <sys/malloc.h> 48#include <sys/resourcevar.h> 49#include <sys/proc.h> 50 51#include <miscfs/specfs/specdev.h> 52 53struct buf *buf; /* buffer header pool */ 54int nbuf; /* number of buffer headers calculated 55 * elsewhere */ 56struct swqueue bswlist; 57int nvmio, nlru; 58 59extern vm_map_t buffer_map, io_map, kernel_map, pager_map; 60 61void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 62void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 63void vfs_dirty_pages(struct buf * bp); 64void vfs_busy_pages(struct buf *, int clear_modify); 65 66int needsbuffer; 67 68/* 69 * Internal update daemon, process 3 70 * The variable vfs_update_wakeup allows for internal syncs. 71 */ 72int vfs_update_wakeup; 73 74 75/* 76 * buffers base kva 77 */ 78caddr_t buffers_kva; 79 80/* 81 * bogus page -- for I/O to/from partially complete buffers 82 * this is a temporary solution to the problem, but it is not 83 * really that bad. it would be better to split the buffer 84 * for input in the case of buffers partially already in memory, 85 * but the code is intricate enough already. 86 */ 87vm_page_t bogus_page; 88vm_offset_t bogus_offset; 89 90int bufspace, maxbufspace; 91 92/* 93 * advisory minimum for size of LRU queue or VMIO queue 94 */ 95int minbuf; 96 97/* 98 * Initialize buffer headers and related structures. 99 */ 100void 101bufinit() 102{ 103 struct buf *bp; 104 int i; 105 106 TAILQ_INIT(&bswlist); 107 LIST_INIT(&invalhash); 108 109 /* first, make a null hash table */ 110 for (i = 0; i < BUFHSZ; i++) 111 LIST_INIT(&bufhashtbl[i]); 112 113 /* next, make a null set of free lists */ 114 for (i = 0; i < BUFFER_QUEUES; i++) 115 TAILQ_INIT(&bufqueues[i]); 116 117 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 118 /* finally, initialize each buffer header and stick on empty q */ 119 for (i = 0; i < nbuf; i++) { 120 bp = &buf[i]; 121 bzero(bp, sizeof *bp); 122 bp->b_flags = B_INVAL; /* we're just an empty header */ 123 bp->b_dev = NODEV; 124 bp->b_vp = NULL; 125 bp->b_rcred = NOCRED; 126 bp->b_wcred = NOCRED; 127 bp->b_qindex = QUEUE_EMPTY; 128 bp->b_vnbufs.le_next = NOLIST; 129 bp->b_data = buffers_kva + i * MAXBSIZE; 130 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 131 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 132 } 133/* 134 * this will change later!!! 135 */ 136 minbuf = nbuf / 3; 137 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 138 139 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 140 bogus_page = vm_page_alloc(kernel_object, 141 bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL); 142 143} 144 145/* 146 * remove the buffer from the appropriate free list 147 */ 148void 149bremfree(struct buf * bp) 150{ 151 int s = splbio(); 152 153 if (bp->b_qindex != QUEUE_NONE) { 154 if (bp->b_qindex == QUEUE_LRU) 155 --nlru; 156 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 157 bp->b_qindex = QUEUE_NONE; 158 } else { 159 panic("bremfree: removing a buffer when not on a queue"); 160 } 161 splx(s); 162} 163 164/* 165 * Get a buffer with the specified data. Look in the cache first. 166 */ 167int 168bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 169 struct buf ** bpp) 170{ 171 struct buf *bp; 172 173 bp = getblk(vp, blkno, size, 0, 0); 174 *bpp = bp; 175 176 /* if not found in cache, do some I/O */ 177 if ((bp->b_flags & B_CACHE) == 0) { 178 if (curproc && curproc->p_stats) /* count block I/O */ 179 curproc->p_stats->p_ru.ru_inblock++; 180 bp->b_flags |= B_READ; 181 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 182 if (bp->b_rcred == NOCRED) { 183 if (cred != NOCRED) 184 crhold(cred); 185 bp->b_rcred = cred; 186 } 187 vfs_busy_pages(bp, 0); 188 VOP_STRATEGY(bp); 189 return (biowait(bp)); 190 } 191 return (0); 192} 193 194/* 195 * Operates like bread, but also starts asynchronous I/O on 196 * read-ahead blocks. 197 */ 198int 199breadn(struct vnode * vp, daddr_t blkno, int size, 200 daddr_t * rablkno, int *rabsize, 201 int cnt, struct ucred * cred, struct buf ** bpp) 202{ 203 struct buf *bp, *rabp; 204 int i; 205 int rv = 0, readwait = 0; 206 207 *bpp = bp = getblk(vp, blkno, size, 0, 0); 208 209 /* if not found in cache, do some I/O */ 210 if ((bp->b_flags & B_CACHE) == 0) { 211 if (curproc && curproc->p_stats) /* count block I/O */ 212 curproc->p_stats->p_ru.ru_inblock++; 213 bp->b_flags |= B_READ; 214 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 215 if (bp->b_rcred == NOCRED) { 216 if (cred != NOCRED) 217 crhold(cred); 218 bp->b_rcred = cred; 219 } 220 vfs_busy_pages(bp, 0); 221 VOP_STRATEGY(bp); 222 ++readwait; 223 } 224 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 225 if (inmem(vp, *rablkno)) 226 continue; 227 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 228 229 if ((rabp->b_flags & B_CACHE) == 0) { 230 if (curproc && curproc->p_stats) 231 curproc->p_stats->p_ru.ru_inblock++; 232 rabp->b_flags |= B_READ | B_ASYNC; 233 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 234 if (rabp->b_rcred == NOCRED) { 235 if (cred != NOCRED) 236 crhold(cred); 237 rabp->b_rcred = cred; 238 } 239 vfs_busy_pages(rabp, 0); 240 VOP_STRATEGY(rabp); 241 } else { 242 brelse(rabp); 243 } 244 } 245 246 if (readwait) { 247 rv = biowait(bp); 248 } 249 return (rv); 250} 251 252/* 253 * Write, release buffer on completion. (Done by iodone 254 * if async.) 255 */ 256int 257bwrite(struct buf * bp) 258{ 259 int oldflags = bp->b_flags; 260 261 if (bp->b_flags & B_INVAL) { 262 brelse(bp); 263 return (0); 264 } 265 if (!(bp->b_flags & B_BUSY)) 266 panic("bwrite: buffer is not busy???"); 267 268 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 269 bp->b_flags |= B_WRITEINPROG; 270 271 if (oldflags & B_ASYNC) { 272 if (oldflags & B_DELWRI) { 273 reassignbuf(bp, bp->b_vp); 274 } else if (curproc) { 275 ++curproc->p_stats->p_ru.ru_oublock; 276 } 277 } 278 bp->b_vp->v_numoutput++; 279 vfs_busy_pages(bp, 1); 280 VOP_STRATEGY(bp); 281 282 if ((oldflags & B_ASYNC) == 0) { 283 int rtval = biowait(bp); 284 285 if (oldflags & B_DELWRI) { 286 reassignbuf(bp, bp->b_vp); 287 } else if (curproc) { 288 ++curproc->p_stats->p_ru.ru_oublock; 289 } 290 brelse(bp); 291 return (rtval); 292 } 293 return (0); 294} 295 296int 297vn_bwrite(ap) 298 struct vop_bwrite_args *ap; 299{ 300 return (bwrite(ap->a_bp)); 301} 302 303/* 304 * Delayed write. (Buffer is marked dirty). 305 */ 306void 307bdwrite(struct buf * bp) 308{ 309 310 if ((bp->b_flags & B_BUSY) == 0) { 311 panic("bdwrite: buffer is not busy"); 312 } 313 if (bp->b_flags & B_INVAL) { 314 brelse(bp); 315 return; 316 } 317 if (bp->b_flags & B_TAPE) { 318 bawrite(bp); 319 return; 320 } 321 bp->b_flags &= ~B_READ; 322 vfs_dirty_pages(bp); 323 if ((bp->b_flags & B_DELWRI) == 0) { 324 if (curproc) 325 ++curproc->p_stats->p_ru.ru_oublock; 326 bp->b_flags |= B_DONE | B_DELWRI; 327 reassignbuf(bp, bp->b_vp); 328 } 329 if( bp->b_lblkno == bp->b_blkno) { 330 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); 331 } 332 brelse(bp); 333 return; 334} 335 336/* 337 * Asynchronous write. 338 * Start output on a buffer, but do not wait for it to complete. 339 * The buffer is released when the output completes. 340 */ 341void 342bawrite(struct buf * bp) 343{ 344 struct vnode *vp; 345 vp = bp->b_vp; 346 bp->b_flags |= B_ASYNC; 347 (void) bwrite(bp); 348 /* 349 * this code supports limits on the amount of outstanding 350 * writes to a disk file. this helps keep from overwhelming 351 * the buffer cache with writes, thereby allowing other files 352 * to be operated upon. 353 */ 354 if (vp->v_numoutput > (nbuf/2)) { 355 int s = splbio(); 356 357 while (vp->v_numoutput > (nbuf/4)) { 358 vp->v_flag |= VBWAIT; 359 tsleep((caddr_t) &vp->v_numoutput, PRIBIO, "bawnmo", 0); 360 } 361 splx(s); 362 } 363} 364 365/* 366 * Release a buffer. 367 */ 368void 369brelse(struct buf * bp) 370{ 371 int s; 372 373 if (bp->b_flags & B_CLUSTER) { 374 relpbuf(bp); 375 return; 376 } 377 /* anyone need a "free" block? */ 378 s = splbio(); 379 380 if (needsbuffer) { 381 needsbuffer = 0; 382 wakeup((caddr_t) &needsbuffer); 383 } 384 385 /* anyone need this block? */ 386 if (bp->b_flags & B_WANTED) { 387 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_AGE); 388 wakeup((caddr_t) bp); 389 } else if (bp->b_flags & B_VMIO) { 390 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 391 wakeup((caddr_t) bp); 392 } 393 if (bp->b_flags & B_LOCKED) 394 bp->b_flags &= ~B_ERROR; 395 396 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 397 (bp->b_bufsize <= 0)) { 398 bp->b_flags |= B_INVAL; 399 bp->b_flags &= ~(B_DELWRI | B_CACHE); 400 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 401 brelvp(bp); 402 } 403 404 /* 405 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 406 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 407 * but the VM object is kept around. The B_NOCACHE flag is used to 408 * invalidate the pages in the VM object. 409 */ 410 if (bp->b_flags & B_VMIO) { 411 vm_offset_t foff; 412 vm_object_t obj; 413 int i, resid; 414 vm_page_t m; 415 int iototal = bp->b_bufsize; 416 417 foff = 0; 418 obj = 0; 419 if (bp->b_npages) { 420 if (bp->b_vp && bp->b_vp->v_mount) { 421 foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 422 } else { 423 /* 424 * vnode pointer has been ripped away -- 425 * probably file gone... 426 */ 427 foff = bp->b_pages[0]->offset; 428 } 429 } 430 for (i = 0; i < bp->b_npages; i++) { 431 m = bp->b_pages[i]; 432 if (m == bogus_page) { 433 panic("brelse: bogus page found"); 434 } 435 resid = (m->offset + PAGE_SIZE) - foff; 436 if (resid > iototal) 437 resid = iototal; 438 if (resid > 0) { 439 if (bp->b_flags & (B_ERROR | B_NOCACHE)) { 440 vm_page_set_invalid(m, foff, resid); 441 } else if ((bp->b_flags & B_DELWRI) == 0) { 442 vm_page_set_clean(m, foff, resid); 443 vm_page_set_valid(m, foff, resid); 444 } 445 } else { 446 vm_page_test_dirty(m); 447 } 448 foff += resid; 449 iototal -= resid; 450 } 451 452 if (bp->b_flags & B_INVAL) { 453 for(i=0;i<bp->b_npages;i++) { 454 m = bp->b_pages[i]; 455 --m->bmapped; 456 if (m->bmapped == 0) { 457 PAGE_WAKEUP(m); 458 if (m->valid == 0) { 459 vm_page_protect(m, VM_PROT_NONE); 460 vm_page_free(m); 461 } else if ((m->dirty & m->valid) == 0 && 462 (m->flags & PG_REFERENCED) == 0 && 463 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) 464 vm_page_cache(m); 465 else if ((m->flags & PG_ACTIVE) == 0) { 466 vm_page_activate(m); 467 m->act_count = 0; 468 } 469 } 470 } 471 bufspace -= bp->b_bufsize; 472 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 473 bp->b_npages = 0; 474 bp->b_bufsize = 0; 475 bp->b_flags &= ~B_VMIO; 476 if (bp->b_vp) 477 brelvp(bp); 478 --nvmio; 479 } 480 } 481 if (bp->b_qindex != QUEUE_NONE) 482 panic("brelse: free buffer onto another queue???"); 483 484 /* enqueue */ 485 /* buffers with no memory */ 486 if (bp->b_bufsize == 0) { 487 bp->b_qindex = QUEUE_EMPTY; 488 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 489 LIST_REMOVE(bp, b_hash); 490 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 491 bp->b_dev = NODEV; 492 /* buffers with junk contents */ 493 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE)) { 494 bp->b_qindex = QUEUE_AGE; 495 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 496 LIST_REMOVE(bp, b_hash); 497 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 498 bp->b_dev = NODEV; 499 /* buffers that are locked */ 500 } else if (bp->b_flags & B_LOCKED) { 501 bp->b_qindex = QUEUE_LOCKED; 502 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 503 /* buffers with stale but valid contents */ 504 } else if (bp->b_flags & B_AGE) { 505 bp->b_qindex = QUEUE_AGE; 506 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 507 /* buffers with valid and quite potentially reuseable contents */ 508 } else { 509 if (bp->b_flags & B_VMIO) 510 bp->b_qindex = QUEUE_VMIO; 511 else { 512 bp->b_qindex = QUEUE_LRU; 513 ++nlru; 514 } 515 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 516 } 517 518 /* unlock */ 519 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE); 520 splx(s); 521} 522 523/* 524 * this routine implements clustered async writes for 525 * clearing out B_DELWRI buffers... This is much better 526 * than the old way of writing only one buffer at a time. 527 */ 528void 529vfs_bio_awrite(struct buf * bp) 530{ 531 int i; 532 daddr_t lblkno = bp->b_lblkno; 533 struct vnode *vp = bp->b_vp; 534 int s; 535 int ncl; 536 struct buf *bpa; 537 538 s = splbio(); 539 if( vp->v_mount && (vp->v_flag & VVMIO) && 540 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 541 int size = vp->v_mount->mnt_stat.f_iosize; 542 543 for (i = 1; i < MAXPHYS / size; i++) { 544 if ((bpa = incore(vp, lblkno + i)) && 545 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) && 546 (bpa->b_bufsize == size)) { 547 if ((bpa->b_blkno == bpa->b_lblkno) || 548 (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE)) 549 break; 550 } else { 551 break; 552 } 553 } 554 ncl = i; 555 /* 556 * this is a possible cluster write 557 */ 558 if (ncl != 1) { 559 bremfree(bp); 560 cluster_wbuild(vp, bp, size, lblkno, ncl, -1); 561 splx(s); 562 return; 563 } 564 } 565 /* 566 * default (old) behavior, writing out only one block 567 */ 568 bremfree(bp); 569 bp->b_flags |= B_BUSY | B_ASYNC; 570 bwrite(bp); 571 splx(s); 572} 573 574 575/* 576 * Find a buffer header which is available for use. 577 */ 578struct buf * 579getnewbuf(int slpflag, int slptimeo, int doingvmio) 580{ 581 struct buf *bp; 582 int s; 583 int firstbp = 1; 584 585 s = splbio(); 586start: 587 if (bufspace >= maxbufspace) 588 goto trytofreespace; 589 590 /* can we constitute a new buffer? */ 591 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 592 if (bp->b_qindex != QUEUE_EMPTY) 593 panic("getnewbuf: inconsistent EMPTY queue"); 594 bremfree(bp); 595 goto fillbuf; 596 } 597trytofreespace: 598 /* 599 * We keep the file I/O from hogging metadata I/O 600 * This is desirable because file data is cached in the 601 * VM/Buffer cache even if a buffer is freed. 602 */ 603 if (bp = bufqueues[QUEUE_AGE].tqh_first) { 604 if (bp->b_qindex != QUEUE_AGE) 605 panic("getnewbuf: inconsistent AGE queue"); 606 } else if ((nvmio > nbuf - minbuf) 607 && (bp = bufqueues[QUEUE_VMIO].tqh_first)) { 608 if (bp->b_qindex != QUEUE_VMIO) 609 panic("getnewbuf: inconsistent VMIO queue"); 610 } else if ((nlru > nbuf - minbuf) && 611 (bp = bufqueues[QUEUE_LRU].tqh_first)) { 612 if (bp->b_qindex != QUEUE_LRU) 613 panic("getnewbuf: inconsistent LRU queue"); 614 } 615 if (!bp) { 616 if (doingvmio) { 617 if (bp = bufqueues[QUEUE_VMIO].tqh_first) { 618 if (bp->b_qindex != QUEUE_VMIO) 619 panic("getnewbuf: inconsistent VMIO queue"); 620 } else if (bp = bufqueues[QUEUE_LRU].tqh_first) { 621 if (bp->b_qindex != QUEUE_LRU) 622 panic("getnewbuf: inconsistent LRU queue"); 623 } 624 } else { 625 if (bp = bufqueues[QUEUE_LRU].tqh_first) { 626 if (bp->b_qindex != QUEUE_LRU) 627 panic("getnewbuf: inconsistent LRU queue"); 628 } else if (bp = bufqueues[QUEUE_VMIO].tqh_first) { 629 if (bp->b_qindex != QUEUE_VMIO) 630 panic("getnewbuf: inconsistent VMIO queue"); 631 } 632 } 633 } 634 if (!bp) { 635 /* wait for a free buffer of any kind */ 636 needsbuffer = 1; 637 tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo); 638 splx(s); 639 return (0); 640 } 641 /* if we are a delayed write, convert to an async write */ 642 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 643 vfs_bio_awrite(bp); 644 if (!slpflag && !slptimeo) { 645 splx(s); 646 return (0); 647 } 648 goto start; 649 } 650 651 if( bp->b_flags & B_WANTED) { 652 bp->b_flags &= ~(B_WANTED|B_PDWANTED); 653 wakeup((caddr_t) bp); 654 } 655 bremfree(bp); 656 657 if (bp->b_flags & B_VMIO) { 658 bp->b_flags |= B_INVAL | B_BUSY; 659 brelse(bp); 660 bremfree(bp); 661 } 662 if (bp->b_vp) 663 brelvp(bp); 664 665 /* we are not free, nor do we contain interesting data */ 666 if (bp->b_rcred != NOCRED) 667 crfree(bp->b_rcred); 668 if (bp->b_wcred != NOCRED) 669 crfree(bp->b_wcred); 670fillbuf: 671 bp->b_flags |= B_BUSY; 672 LIST_REMOVE(bp, b_hash); 673 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 674 splx(s); 675 if (bp->b_bufsize) { 676 allocbuf(bp, 0, 0); 677 } 678 bp->b_flags = B_BUSY; 679 bp->b_dev = NODEV; 680 bp->b_vp = NULL; 681 bp->b_blkno = bp->b_lblkno = 0; 682 bp->b_iodone = 0; 683 bp->b_error = 0; 684 bp->b_resid = 0; 685 bp->b_bcount = 0; 686 bp->b_npages = 0; 687 bp->b_wcred = bp->b_rcred = NOCRED; 688 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 689 bp->b_dirtyoff = bp->b_dirtyend = 0; 690 bp->b_validoff = bp->b_validend = 0; 691 if (bufspace >= maxbufspace) { 692 s = splbio(); 693 bp->b_flags |= B_INVAL; 694 brelse(bp); 695 goto trytofreespace; 696 } 697 return (bp); 698} 699 700/* 701 * Check to see if a block is currently memory resident. 702 */ 703struct buf * 704incore(struct vnode * vp, daddr_t blkno) 705{ 706 struct buf *bp; 707 struct bufhashhdr *bh; 708 709 int s = splbio(); 710 711 bh = BUFHASH(vp, blkno); 712 bp = bh->lh_first; 713 714 /* Search hash chain */ 715 while (bp) { 716 /* hit */ 717 if (bp->b_lblkno == blkno && bp->b_vp == vp 718 && (bp->b_flags & B_INVAL) == 0) { 719 splx(s); 720 return (bp); 721 } 722 bp = bp->b_hash.le_next; 723 } 724 splx(s); 725 726 return (0); 727} 728 729/* 730 * Returns true if no I/O is needed to access the 731 * associated VM object. This is like incore except 732 * it also hunts around in the VM system for the data. 733 */ 734 735int 736inmem(struct vnode * vp, daddr_t blkno) 737{ 738 vm_object_t obj; 739 vm_offset_t off, toff, tinc; 740 vm_page_t m; 741 742 if (incore(vp, blkno)) 743 return 1; 744 if (vp->v_mount == 0) 745 return 0; 746 if ((vp->v_vmdata == 0) || (vp->v_flag & VVMIO) == 0) 747 return 0; 748 749 obj = (vm_object_t) vp->v_vmdata; 750 tinc = PAGE_SIZE; 751 if (tinc > vp->v_mount->mnt_stat.f_iosize) 752 tinc = vp->v_mount->mnt_stat.f_iosize; 753 off = blkno * vp->v_mount->mnt_stat.f_iosize; 754 755 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 756 int mask; 757 758 m = vm_page_lookup(obj, trunc_page(toff + off)); 759 if (!m) 760 return 0; 761 if (vm_page_is_valid(m, toff + off, tinc) == 0) 762 return 0; 763 } 764 return 1; 765} 766 767/* 768 * Get a block given a specified block and offset into a file/device. 769 */ 770struct buf * 771getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 772{ 773 struct buf *bp; 774 int s; 775 struct bufhashhdr *bh; 776 vm_offset_t off; 777 int nleft; 778 779 s = splbio(); 780loop: 781 if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_cache_min) 782 pagedaemon_wakeup(); 783 784 if (bp = incore(vp, blkno)) { 785 if (bp->b_flags & B_BUSY) { 786 bp->b_flags |= B_WANTED; 787 if (curproc == pageproc) { 788 bp->b_flags |= B_PDWANTED; 789 wakeup((caddr_t) &cnt.v_free_count); 790 } 791 if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo)) 792 goto loop; 793 splx(s); 794 return (struct buf *) NULL; 795 } 796 bp->b_flags |= B_BUSY | B_CACHE; 797 bremfree(bp); 798 /* 799 * check for size inconsistancies 800 */ 801 if (bp->b_bcount != size) { 802#if defined(VFS_BIO_DEBUG) 803 printf("getblk: invalid buffer size: %ld\n", bp->b_bcount); 804#endif 805 bp->b_flags |= B_INVAL; 806 bwrite(bp); 807 goto loop; 808 } 809 splx(s); 810 return (bp); 811 } else { 812 vm_object_t obj; 813 int doingvmio; 814 815 if ((obj = (vm_object_t) vp->v_vmdata) && (vp->v_flag & VVMIO)) { 816 doingvmio = 1; 817 } else { 818 doingvmio = 0; 819 } 820 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 821 if (slpflag || slptimeo) 822 return NULL; 823 goto loop; 824 } 825 /* 826 * It is possible that another buffer has been constituted 827 * during the time that getnewbuf is blocked. This checks 828 * for this possibility, and handles it. 829 */ 830 if (incore(vp, blkno)) { 831 bp->b_flags |= B_INVAL; 832 brelse(bp); 833 goto loop; 834 } 835 /* 836 * Insert the buffer into the hash, so that it can 837 * be found by incore. 838 */ 839 bp->b_blkno = bp->b_lblkno = blkno; 840 bgetvp(vp, bp); 841 LIST_REMOVE(bp, b_hash); 842 bh = BUFHASH(vp, blkno); 843 LIST_INSERT_HEAD(bh, bp, b_hash); 844 845 if (doingvmio) { 846 bp->b_flags |= (B_VMIO | B_CACHE); 847#if defined(VFS_BIO_DEBUG) 848 if (vp->v_type != VREG) 849 printf("getblk: vmioing file type %d???\n", vp->v_type); 850#endif 851 ++nvmio; 852 } else { 853 if (bp->b_flags & B_VMIO) 854 --nvmio; 855 bp->b_flags &= ~B_VMIO; 856 } 857 splx(s); 858 859 if (!allocbuf(bp, size, 1)) { 860 s = splbio(); 861 goto loop; 862 } 863 return (bp); 864 } 865} 866 867/* 868 * Get an empty, disassociated buffer of given size. 869 */ 870struct buf * 871geteblk(int size) 872{ 873 struct buf *bp; 874 875 while ((bp = getnewbuf(0, 0, 0)) == 0); 876 allocbuf(bp, size, 0); 877 bp->b_flags |= B_INVAL; 878 return (bp); 879} 880 881/* 882 * This code constitutes the buffer memory from either anonymous system 883 * memory (in the case of non-VMIO operations) or from an associated 884 * VM object (in the case of VMIO operations). 885 * 886 * Note that this code is tricky, and has many complications to resolve 887 * deadlock or inconsistant data situations. Tread lightly!!! 888 * 889 * Modify the length of a buffer's underlying buffer storage without 890 * destroying information (unless, of course the buffer is shrinking). 891 */ 892int 893allocbuf(struct buf * bp, int size, int vmio) 894{ 895 896 int s; 897 int newbsize, mbsize; 898 int i; 899 900 if ((bp->b_flags & B_VMIO) == 0) { 901 /* 902 * Just get anonymous memory from the kernel 903 */ 904 mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 905 newbsize = round_page(size); 906 907 if (newbsize == bp->b_bufsize) { 908 bp->b_bcount = size; 909 return 1; 910 } else if (newbsize < bp->b_bufsize) { 911 vm_hold_free_pages( 912 bp, 913 (vm_offset_t) bp->b_data + newbsize, 914 (vm_offset_t) bp->b_data + bp->b_bufsize); 915 bufspace -= (bp->b_bufsize - newbsize); 916 } else if (newbsize > bp->b_bufsize) { 917 vm_hold_load_pages( 918 bp, 919 (vm_offset_t) bp->b_data + bp->b_bufsize, 920 (vm_offset_t) bp->b_data + newbsize); 921 bufspace += (newbsize - bp->b_bufsize); 922 } 923 } else { 924 vm_page_t m; 925 int desiredpages; 926 927 newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 928 desiredpages = round_page(newbsize) / PAGE_SIZE; 929 930 if (newbsize == bp->b_bufsize) { 931 bp->b_bcount = size; 932 return 1; 933 } else if (newbsize < bp->b_bufsize) { 934 if (desiredpages < bp->b_npages) { 935 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 936 desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages)); 937 for (i = desiredpages; i < bp->b_npages; i++) { 938 m = bp->b_pages[i]; 939 s = splhigh(); 940 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 941 m->flags |= PG_WANTED; 942 tsleep(m, PVM, "biodep", 0); 943 } 944 splx(s); 945 946 if (m->bmapped == 0) { 947 printf("allocbuf: bmapped is zero for page %d\n", i); 948 panic("allocbuf: error"); 949 } 950 --m->bmapped; 951 if (m->bmapped == 0) { 952 PAGE_WAKEUP(m); 953 if (m->valid == 0) { 954 vm_page_protect(m, VM_PROT_NONE); 955 vm_page_free(m); 956 } 957 } 958 bp->b_pages[i] = NULL; 959 } 960 bp->b_npages = desiredpages; 961 bufspace -= (bp->b_bufsize - newbsize); 962 } 963 } else { 964 vm_object_t obj; 965 vm_offset_t tinc, off, toff, objoff; 966 int pageindex, curbpnpages; 967 struct vnode *vp; 968 int bsize; 969 970 vp = bp->b_vp; 971 bsize = vp->v_mount->mnt_stat.f_iosize; 972 973 if (bp->b_npages < desiredpages) { 974 obj = (vm_object_t) vp->v_vmdata; 975 tinc = PAGE_SIZE; 976 if (tinc > bsize) 977 tinc = bsize; 978 off = bp->b_lblkno * bsize; 979 curbpnpages = bp->b_npages; 980 doretry: 981 for (toff = 0; toff < newbsize; toff += tinc) { 982 int mask; 983 int bytesinpage; 984 985 pageindex = toff / PAGE_SIZE; 986 objoff = trunc_page(toff + off); 987 if (pageindex < curbpnpages) { 988 int pb; 989 990 m = bp->b_pages[pageindex]; 991 if (m->offset != objoff) 992 panic("allocbuf: page changed offset??!!!?"); 993 bytesinpage = tinc; 994 if (tinc > (newbsize - toff)) 995 bytesinpage = newbsize - toff; 996 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 997 bp->b_flags &= ~B_CACHE; 998 } 999 if ((m->flags & PG_ACTIVE) == 0) { 1000 vm_page_activate(m); 1001 m->act_count = 0; 1002 } 1003 continue; 1004 } 1005 m = vm_page_lookup(obj, objoff); 1006 if (!m) { 1007 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1008 if (!m) { 1009 int j; 1010 1011 for (j = bp->b_npages; j < pageindex; j++) { 1012 vm_page_t mt = bp->b_pages[j]; 1013 1014 PAGE_WAKEUP(mt); 1015 if (mt->valid == 0 && mt->bmapped == 0) { 1016 vm_page_free(mt); 1017 } 1018 } 1019 VM_WAIT; 1020 if (vmio && (bp->b_flags & B_PDWANTED)) { 1021 bp->b_flags |= B_INVAL; 1022 brelse(bp); 1023 return 0; 1024 } 1025 curbpnpages = bp->b_npages; 1026 goto doretry; 1027 } 1028 m->valid = 0; 1029 vm_page_activate(m); 1030 m->act_count = 0; 1031 } else if ((m->valid == 0) || (m->flags & PG_BUSY)) { 1032 int j; 1033 int bufferdestroyed = 0; 1034 1035 for (j = bp->b_npages; j < pageindex; j++) { 1036 vm_page_t mt = bp->b_pages[j]; 1037 1038 PAGE_WAKEUP(mt); 1039 if (mt->valid == 0 && mt->bmapped == 0) { 1040 vm_page_free(mt); 1041 } 1042 } 1043 if (vmio && (bp->b_flags & B_PDWANTED)) { 1044 bp->b_flags |= B_INVAL; 1045 brelse(bp); 1046 VM_WAIT; 1047 bufferdestroyed = 1; 1048 } 1049 s = splbio(); 1050 if (m->flags & PG_BUSY) { 1051 m->flags |= PG_WANTED; 1052 tsleep(m, PRIBIO, "pgtblk", 0); 1053 } else if( m->valid == 0 && m->bmapped == 0) { 1054 vm_page_free(m); 1055 } 1056 splx(s); 1057 if (bufferdestroyed) 1058 return 0; 1059 curbpnpages = bp->b_npages; 1060 goto doretry; 1061 } else { 1062 int pb; 1063 1064 if ((m->flags & PG_CACHE) && 1065 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 1066 int j; 1067 1068 for (j = bp->b_npages; j < pageindex; j++) { 1069 vm_page_t mt = bp->b_pages[j]; 1070 1071 PAGE_WAKEUP(mt); 1072 if (mt->valid == 0 && mt->bmapped == 0) { 1073 vm_page_free(mt); 1074 } 1075 } 1076 VM_WAIT; 1077 if (vmio && (bp->b_flags & B_PDWANTED)) { 1078 bp->b_flags |= B_INVAL; 1079 brelse(bp); 1080 return 0; 1081 } 1082 curbpnpages = bp->b_npages; 1083 goto doretry; 1084 } 1085 bytesinpage = tinc; 1086 if (tinc > (newbsize - toff)) 1087 bytesinpage = newbsize - toff; 1088 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 1089 bp->b_flags &= ~B_CACHE; 1090 } 1091 if ((m->flags & PG_ACTIVE) == 0) { 1092 vm_page_activate(m); 1093 m->act_count = 0; 1094 } 1095 m->flags |= PG_BUSY; 1096 } 1097 bp->b_pages[pageindex] = m; 1098 curbpnpages = pageindex + 1; 1099 } 1100 if (bsize >= PAGE_SIZE) { 1101 for (i = bp->b_npages; i < curbpnpages; i++) { 1102 m = bp->b_pages[i]; 1103 if (m->valid == 0) { 1104 bp->b_flags &= ~B_CACHE; 1105 } 1106 m->bmapped++; 1107 PAGE_WAKEUP(m); 1108 } 1109 } else { 1110 if (!vm_page_is_valid(bp->b_pages[0], off, bsize)) 1111 bp->b_flags &= ~B_CACHE; 1112 bp->b_pages[0]->bmapped++; 1113 PAGE_WAKEUP(bp->b_pages[0]); 1114 } 1115 bp->b_npages = curbpnpages; 1116 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1117 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1118 bp->b_data += off % PAGE_SIZE; 1119 } 1120 bufspace += (newbsize - bp->b_bufsize); 1121 } 1122 } 1123 bp->b_bufsize = newbsize; 1124 bp->b_bcount = size; 1125 return 1; 1126} 1127 1128/* 1129 * Wait for buffer I/O completion, returning error status. 1130 */ 1131int 1132biowait(register struct buf * bp) 1133{ 1134 int s; 1135 1136 s = splbio(); 1137 while ((bp->b_flags & B_DONE) == 0) 1138 tsleep((caddr_t) bp, PRIBIO, "biowait", 0); 1139 if ((bp->b_flags & B_ERROR) || bp->b_error) { 1140 if ((bp->b_flags & B_INVAL) == 0) { 1141 bp->b_flags |= B_INVAL; 1142 bp->b_dev = NODEV; 1143 LIST_REMOVE(bp, b_hash); 1144 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1145 wakeup((caddr_t) bp); 1146 } 1147 if (!bp->b_error) 1148 bp->b_error = EIO; 1149 else 1150 bp->b_flags |= B_ERROR; 1151 splx(s); 1152 return (bp->b_error); 1153 } else { 1154 splx(s); 1155 return (0); 1156 } 1157} 1158 1159/* 1160 * Finish I/O on a buffer, calling an optional function. 1161 * This is usually called from interrupt level, so process blocking 1162 * is not *a good idea*. 1163 */ 1164void 1165biodone(register struct buf * bp) 1166{ 1167 int s; 1168 1169 s = splbio(); 1170 if (bp->b_flags & B_DONE) { 1171 splx(s); 1172 printf("biodone: buffer already done\n"); 1173 return; 1174 } 1175 bp->b_flags |= B_DONE; 1176 1177 if ((bp->b_flags & B_READ) == 0) { 1178 struct vnode *vp = bp->b_vp; 1179 vwakeup(bp); 1180 if (vp && (vp->v_numoutput == (nbuf/4)) && (vp->v_flag & VBWAIT)) { 1181 vp->v_flag &= ~VBWAIT; 1182 wakeup((caddr_t) &vp->v_numoutput); 1183 } 1184 } 1185#ifdef BOUNCE_BUFFERS 1186 if (bp->b_flags & B_BOUNCE) 1187 vm_bounce_free(bp); 1188#endif 1189 1190 /* call optional completion function if requested */ 1191 if (bp->b_flags & B_CALL) { 1192 bp->b_flags &= ~B_CALL; 1193 (*bp->b_iodone) (bp); 1194 splx(s); 1195 return; 1196 } 1197 if (bp->b_flags & B_VMIO) { 1198 int i, resid; 1199 vm_offset_t foff; 1200 vm_page_t m; 1201 vm_object_t obj; 1202 int iosize; 1203 struct vnode *vp = bp->b_vp; 1204 1205 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1206 obj = (vm_object_t) vp->v_vmdata; 1207 if (!obj) { 1208 return; 1209 } 1210#if defined(VFS_BIO_DEBUG) 1211 if (obj->paging_in_progress < bp->b_npages) { 1212 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1213 obj->paging_in_progress, bp->b_npages); 1214 } 1215#endif 1216 iosize = bp->b_bufsize; 1217 for (i = 0; i < bp->b_npages; i++) { 1218 m = bp->b_pages[i]; 1219 if (m == bogus_page) { 1220 m = vm_page_lookup(obj, foff); 1221 if (!m) { 1222#if defined(VFS_BIO_DEBUG) 1223 printf("biodone: page disappeared\n"); 1224#endif 1225 --obj->paging_in_progress; 1226 continue; 1227 } 1228 bp->b_pages[i] = m; 1229 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1230 } 1231#if defined(VFS_BIO_DEBUG) 1232 if (trunc_page(foff) != m->offset) { 1233 printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset); 1234 } 1235#endif 1236 resid = (m->offset + PAGE_SIZE) - foff; 1237 if (resid > iosize) 1238 resid = iosize; 1239 if (resid > 0) { 1240 vm_page_set_valid(m, foff, resid); 1241 vm_page_set_clean(m, foff, resid); 1242 } 1243 1244 /* 1245 * when debugging new filesystems or buffer I/O methods, this 1246 * is the most common error that pops up. if you see this, you 1247 * have not set the page busy flag correctly!!! 1248 */ 1249 if (m->busy == 0) { 1250 printf("biodone: page busy < 0, off: %d, foff: %d, resid: %d, index: %d\n", 1251 m->offset, foff, resid, i); 1252 printf(" iosize: %d, lblkno: %d\n", 1253 bp->b_vp->v_mount->mnt_stat.f_iosize, bp->b_lblkno); 1254 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1255 m->valid, m->dirty, m->bmapped); 1256 panic("biodone: page busy < 0\n"); 1257 } 1258 --m->busy; 1259 PAGE_WAKEUP(m); 1260 --obj->paging_in_progress; 1261 foff += resid; 1262 iosize -= resid; 1263 } 1264 if (obj && obj->paging_in_progress == 0 && 1265 (obj->flags & OBJ_PIPWNT)) { 1266 obj->flags &= ~OBJ_PIPWNT; 1267 wakeup((caddr_t) obj); 1268 } 1269 } 1270 /* 1271 * For asynchronous completions, release the buffer now. The brelse 1272 * checks for B_WANTED and will do the wakeup there if necessary - so 1273 * no need to do a wakeup here in the async case. 1274 */ 1275 1276 if (bp->b_flags & B_ASYNC) { 1277 brelse(bp); 1278 } else { 1279 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 1280 wakeup((caddr_t) bp); 1281 } 1282 splx(s); 1283} 1284 1285int 1286count_lock_queue() 1287{ 1288 int count; 1289 struct buf *bp; 1290 1291 count = 0; 1292 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1293 bp != NULL; 1294 bp = bp->b_freelist.tqe_next) 1295 count++; 1296 return (count); 1297} 1298 1299int vfs_update_interval = 30; 1300 1301void 1302vfs_update() 1303{ 1304 (void) spl0(); 1305 while (1) { 1306 tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update", 1307 hz * vfs_update_interval); 1308 vfs_update_wakeup = 0; 1309 sync(curproc, NULL, NULL); 1310 } 1311} 1312 1313/* 1314 * This routine is called in lieu of iodone in the case of 1315 * incomplete I/O. This keeps the busy status for pages 1316 * consistant. 1317 */ 1318void 1319vfs_unbusy_pages(struct buf * bp) 1320{ 1321 int i; 1322 1323 if (bp->b_flags & B_VMIO) { 1324 struct vnode *vp = bp->b_vp; 1325 vm_object_t obj = (vm_object_t) vp->v_vmdata; 1326 vm_offset_t foff; 1327 1328 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1329 1330 for (i = 0; i < bp->b_npages; i++) { 1331 vm_page_t m = bp->b_pages[i]; 1332 1333 if (m == bogus_page) { 1334 m = vm_page_lookup(obj, foff); 1335 if (!m) { 1336 panic("vfs_unbusy_pages: page missing\n"); 1337 } 1338 bp->b_pages[i] = m; 1339 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1340 } 1341 --obj->paging_in_progress; 1342 --m->busy; 1343 PAGE_WAKEUP(m); 1344 } 1345 if (obj->paging_in_progress == 0 && 1346 (obj->flags & OBJ_PIPWNT)) { 1347 obj->flags &= ~OBJ_PIPWNT; 1348 wakeup((caddr_t) obj); 1349 } 1350 } 1351} 1352 1353/* 1354 * This routine is called before a device strategy routine. 1355 * It is used to tell the VM system that paging I/O is in 1356 * progress, and treat the pages associated with the buffer 1357 * almost as being PG_BUSY. Also the object paging_in_progress 1358 * flag is handled to make sure that the object doesn't become 1359 * inconsistant. 1360 */ 1361void 1362vfs_busy_pages(struct buf * bp, int clear_modify) 1363{ 1364 int i; 1365 1366 if (bp->b_flags & B_VMIO) { 1367 vm_object_t obj = (vm_object_t) bp->b_vp->v_vmdata; 1368 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1369 int iocount = bp->b_bufsize; 1370 1371 for (i = 0; i < bp->b_npages; i++) { 1372 vm_page_t m = bp->b_pages[i]; 1373 int resid = (m->offset + PAGE_SIZE) - foff; 1374 1375 if (resid > iocount) 1376 resid = iocount; 1377 obj->paging_in_progress++; 1378 m->busy++; 1379 if (clear_modify) { 1380 vm_page_test_dirty(m); 1381 vm_page_protect(m, VM_PROT_READ); 1382 } else if (bp->b_bcount >= PAGE_SIZE) { 1383 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1384 bp->b_pages[i] = bogus_page; 1385 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1386 } 1387 } 1388 foff += resid; 1389 iocount -= resid; 1390 } 1391 } 1392} 1393 1394/* 1395 * Tell the VM system that the pages associated with this buffer 1396 * are dirty. This is in case of the unlikely circumstance that 1397 * a buffer has to be destroyed before it is flushed. 1398 */ 1399void 1400vfs_dirty_pages(struct buf * bp) 1401{ 1402 int i; 1403 1404 if (bp->b_flags & B_VMIO) { 1405 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1406 int iocount = bp->b_bufsize; 1407 1408 for (i = 0; i < bp->b_npages; i++) { 1409 vm_page_t m = bp->b_pages[i]; 1410 int resid = (m->offset + PAGE_SIZE) - foff; 1411 1412 if (resid > iocount) 1413 resid = iocount; 1414 if (resid > 0) { 1415 vm_page_set_valid(m, foff, resid); 1416 vm_page_set_dirty(m, foff, resid); 1417 } 1418 PAGE_WAKEUP(m); 1419 foff += resid; 1420 iocount -= resid; 1421 } 1422 } 1423} 1424/* 1425 * vm_hold_load_pages and vm_hold_unload pages get pages into 1426 * a buffers address space. The pages are anonymous and are 1427 * not associated with a file object. 1428 */ 1429void 1430vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1431{ 1432 vm_offset_t pg; 1433 vm_page_t p; 1434 vm_offset_t from = round_page(froma); 1435 vm_offset_t to = round_page(toa); 1436 1437 for (pg = from; pg < to; pg += PAGE_SIZE) { 1438 1439tryagain: 1440 1441 p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS, 1442 VM_ALLOC_NORMAL); 1443 if (!p) { 1444 VM_WAIT; 1445 goto tryagain; 1446 } 1447 vm_page_wire(p); 1448 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1449 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p; 1450 PAGE_WAKEUP(p); 1451 bp->b_npages++; 1452 } 1453} 1454 1455void 1456vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1457{ 1458 vm_offset_t pg; 1459 vm_page_t p; 1460 vm_offset_t from = round_page(froma); 1461 vm_offset_t to = round_page(toa); 1462 1463 for (pg = from; pg < to; pg += PAGE_SIZE) { 1464 p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE]; 1465 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0; 1466 pmap_kremove(pg); 1467 vm_page_free(p); 1468 --bp->b_npages; 1469 } 1470} 1471 1472void 1473bufstats() 1474{ 1475} 1476