vfs_bio.c revision 9602
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.49 1995/07/17 06:26:07 davidg Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/proc.h> 40#include <sys/vnode.h> 41#include <vm/vm.h> 42#include <vm/vm_kern.h> 43#include <vm/vm_pageout.h> 44#include <vm/vm_page.h> 45#include <vm/vm_object.h> 46#include <sys/buf.h> 47#include <sys/mount.h> 48#include <sys/malloc.h> 49#include <sys/resourcevar.h> 50#include <sys/proc.h> 51 52#include <miscfs/specfs/specdev.h> 53 54struct buf *buf; /* buffer header pool */ 55int nbuf; /* number of buffer headers calculated 56 * elsewhere */ 57struct swqueue bswlist; 58 59void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 60void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 61void vfs_clean_pages(struct buf * bp); 62static void vfs_setdirty(struct buf *bp); 63 64int needsbuffer; 65 66/* 67 * Internal update daemon, process 3 68 * The variable vfs_update_wakeup allows for internal syncs. 69 */ 70int vfs_update_wakeup; 71 72 73/* 74 * buffers base kva 75 */ 76caddr_t buffers_kva; 77 78/* 79 * bogus page -- for I/O to/from partially complete buffers 80 * this is a temporary solution to the problem, but it is not 81 * really that bad. it would be better to split the buffer 82 * for input in the case of buffers partially already in memory, 83 * but the code is intricate enough already. 84 */ 85vm_page_t bogus_page; 86vm_offset_t bogus_offset; 87 88int bufspace, maxbufspace; 89 90/* 91 * advisory minimum for size of LRU queue or VMIO queue 92 */ 93int minbuf; 94 95/* 96 * Initialize buffer headers and related structures. 97 */ 98void 99bufinit() 100{ 101 struct buf *bp; 102 int i; 103 104 TAILQ_INIT(&bswlist); 105 LIST_INIT(&invalhash); 106 107 /* first, make a null hash table */ 108 for (i = 0; i < BUFHSZ; i++) 109 LIST_INIT(&bufhashtbl[i]); 110 111 /* next, make a null set of free lists */ 112 for (i = 0; i < BUFFER_QUEUES; i++) 113 TAILQ_INIT(&bufqueues[i]); 114 115 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 116 /* finally, initialize each buffer header and stick on empty q */ 117 for (i = 0; i < nbuf; i++) { 118 bp = &buf[i]; 119 bzero(bp, sizeof *bp); 120 bp->b_flags = B_INVAL; /* we're just an empty header */ 121 bp->b_dev = NODEV; 122 bp->b_rcred = NOCRED; 123 bp->b_wcred = NOCRED; 124 bp->b_qindex = QUEUE_EMPTY; 125 bp->b_vnbufs.le_next = NOLIST; 126 bp->b_data = buffers_kva + i * MAXBSIZE; 127 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 128 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 129 } 130/* 131 * maxbufspace is currently calculated to support all filesystem blocks 132 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 133 * cache is still the same as it would be for 8K filesystems. This 134 * keeps the size of the buffer cache "in check" for big block filesystems. 135 */ 136 minbuf = nbuf / 3; 137 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 138 139 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 140 bogus_page = vm_page_alloc(kernel_object, 141 bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL); 142 143} 144 145/* 146 * remove the buffer from the appropriate free list 147 */ 148void 149bremfree(struct buf * bp) 150{ 151 int s = splbio(); 152 153 if (bp->b_qindex != QUEUE_NONE) { 154 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 155 bp->b_qindex = QUEUE_NONE; 156 } else { 157 panic("bremfree: removing a buffer when not on a queue"); 158 } 159 splx(s); 160} 161 162/* 163 * Get a buffer with the specified data. Look in the cache first. 164 */ 165int 166bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 167 struct buf ** bpp) 168{ 169 struct buf *bp; 170 171 bp = getblk(vp, blkno, size, 0, 0); 172 *bpp = bp; 173 174 /* if not found in cache, do some I/O */ 175 if ((bp->b_flags & B_CACHE) == 0) { 176 if (curproc != NULL) 177 curproc->p_stats->p_ru.ru_inblock++; 178 bp->b_flags |= B_READ; 179 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 180 if (bp->b_rcred == NOCRED) { 181 if (cred != NOCRED) 182 crhold(cred); 183 bp->b_rcred = cred; 184 } 185 vfs_busy_pages(bp, 0); 186 VOP_STRATEGY(bp); 187 return (biowait(bp)); 188 } 189 return (0); 190} 191 192/* 193 * Operates like bread, but also starts asynchronous I/O on 194 * read-ahead blocks. 195 */ 196int 197breadn(struct vnode * vp, daddr_t blkno, int size, 198 daddr_t * rablkno, int *rabsize, 199 int cnt, struct ucred * cred, struct buf ** bpp) 200{ 201 struct buf *bp, *rabp; 202 int i; 203 int rv = 0, readwait = 0; 204 205 *bpp = bp = getblk(vp, blkno, size, 0, 0); 206 207 /* if not found in cache, do some I/O */ 208 if ((bp->b_flags & B_CACHE) == 0) { 209 if (curproc != NULL) 210 curproc->p_stats->p_ru.ru_inblock++; 211 bp->b_flags |= B_READ; 212 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 213 if (bp->b_rcred == NOCRED) { 214 if (cred != NOCRED) 215 crhold(cred); 216 bp->b_rcred = cred; 217 } 218 vfs_busy_pages(bp, 0); 219 VOP_STRATEGY(bp); 220 ++readwait; 221 } 222 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 223 if (inmem(vp, *rablkno)) 224 continue; 225 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 226 227 if ((rabp->b_flags & B_CACHE) == 0) { 228 if (curproc != NULL) 229 curproc->p_stats->p_ru.ru_inblock++; 230 rabp->b_flags |= B_READ | B_ASYNC; 231 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 232 if (rabp->b_rcred == NOCRED) { 233 if (cred != NOCRED) 234 crhold(cred); 235 rabp->b_rcred = cred; 236 } 237 vfs_busy_pages(rabp, 0); 238 VOP_STRATEGY(rabp); 239 } else { 240 brelse(rabp); 241 } 242 } 243 244 if (readwait) { 245 rv = biowait(bp); 246 } 247 return (rv); 248} 249 250/* 251 * Write, release buffer on completion. (Done by iodone 252 * if async.) 253 */ 254int 255bwrite(struct buf * bp) 256{ 257 int oldflags = bp->b_flags; 258 259 if (bp->b_flags & B_INVAL) { 260 brelse(bp); 261 return (0); 262 } 263 if (!(bp->b_flags & B_BUSY)) 264 panic("bwrite: buffer is not busy???"); 265 266 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 267 bp->b_flags |= B_WRITEINPROG; 268 269 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 270 reassignbuf(bp, bp->b_vp); 271 } 272 273 bp->b_vp->v_numoutput++; 274 vfs_busy_pages(bp, 1); 275 if (curproc != NULL) 276 curproc->p_stats->p_ru.ru_oublock++; 277 VOP_STRATEGY(bp); 278 279 if ((oldflags & B_ASYNC) == 0) { 280 int rtval = biowait(bp); 281 282 if (oldflags & B_DELWRI) { 283 reassignbuf(bp, bp->b_vp); 284 } 285 brelse(bp); 286 return (rtval); 287 } 288 return (0); 289} 290 291int 292vn_bwrite(ap) 293 struct vop_bwrite_args *ap; 294{ 295 return (bwrite(ap->a_bp)); 296} 297 298/* 299 * Delayed write. (Buffer is marked dirty). 300 */ 301void 302bdwrite(struct buf * bp) 303{ 304 305 if ((bp->b_flags & B_BUSY) == 0) { 306 panic("bdwrite: buffer is not busy"); 307 } 308 if (bp->b_flags & B_INVAL) { 309 brelse(bp); 310 return; 311 } 312 if (bp->b_flags & B_TAPE) { 313 bawrite(bp); 314 return; 315 } 316 bp->b_flags &= ~(B_READ|B_RELBUF); 317 if ((bp->b_flags & B_DELWRI) == 0) { 318 bp->b_flags |= B_DONE | B_DELWRI; 319 reassignbuf(bp, bp->b_vp); 320 } 321 322 /* 323 * This bmap keeps the system from needing to do the bmap later, 324 * perhaps when the system is attempting to do a sync. Since it 325 * is likely that the indirect block -- or whatever other datastructure 326 * that the filesystem needs is still in memory now, it is a good 327 * thing to do this. Note also, that if the pageout daemon is 328 * requesting a sync -- there might not be enough memory to do 329 * the bmap then... So, this is important to do. 330 */ 331 if( bp->b_lblkno == bp->b_blkno) { 332 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); 333 } 334 335 /* 336 * Set the *dirty* buffer range based upon the VM system dirty pages. 337 */ 338 vfs_setdirty(bp); 339 340 /* 341 * We need to do this here to satisfy the vnode_pager and the 342 * pageout daemon, so that it thinks that the pages have been 343 * "cleaned". Note that since the pages are in a delayed write 344 * buffer -- the VFS layer "will" see that the pages get written 345 * out on the next sync, or perhaps the cluster will be completed. 346 */ 347 vfs_clean_pages(bp); 348 brelse(bp); 349 return; 350} 351 352/* 353 * Asynchronous write. 354 * Start output on a buffer, but do not wait for it to complete. 355 * The buffer is released when the output completes. 356 */ 357void 358bawrite(struct buf * bp) 359{ 360 bp->b_flags |= B_ASYNC; 361 (void) VOP_BWRITE(bp); 362} 363 364/* 365 * Release a buffer. 366 */ 367void 368brelse(struct buf * bp) 369{ 370 int s; 371 372 if (bp->b_flags & B_CLUSTER) { 373 relpbuf(bp); 374 return; 375 } 376 /* anyone need a "free" block? */ 377 s = splbio(); 378 379 if (needsbuffer) { 380 needsbuffer = 0; 381 wakeup((caddr_t) &needsbuffer); 382 } 383 384 /* anyone need this block? */ 385 if (bp->b_flags & B_WANTED) { 386 bp->b_flags &= ~B_WANTED | B_AGE; 387 wakeup((caddr_t) bp); 388 } else if (bp->b_flags & B_VMIO) { 389 bp->b_flags &= ~B_WANTED; 390 wakeup((caddr_t) bp); 391 } 392 if (bp->b_flags & B_LOCKED) 393 bp->b_flags &= ~B_ERROR; 394 395 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 396 (bp->b_bufsize <= 0)) { 397 bp->b_flags |= B_INVAL; 398 bp->b_flags &= ~(B_DELWRI | B_CACHE); 399 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 400 brelvp(bp); 401 } 402 403 /* 404 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 405 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 406 * but the VM object is kept around. The B_NOCACHE flag is used to 407 * invalidate the pages in the VM object. 408 */ 409 if (bp->b_flags & B_VMIO) { 410 vm_offset_t foff; 411 vm_object_t obj; 412 int i, resid; 413 vm_page_t m; 414 int iototal = bp->b_bufsize; 415 416 foff = 0; 417 obj = 0; 418 if (bp->b_npages) { 419 if (bp->b_vp && bp->b_vp->v_mount) { 420 foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 421 } else { 422 /* 423 * vnode pointer has been ripped away -- 424 * probably file gone... 425 */ 426 foff = bp->b_pages[0]->offset; 427 } 428 } 429 for (i = 0; i < bp->b_npages; i++) { 430 m = bp->b_pages[i]; 431 if (m == bogus_page) { 432 m = vm_page_lookup(obj, foff); 433 if (!m) { 434 panic("brelse: page missing\n"); 435 } 436 bp->b_pages[i] = m; 437 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 438 } 439 resid = (m->offset + PAGE_SIZE) - foff; 440 if (resid > iototal) 441 resid = iototal; 442 if (resid > 0) { 443 /* 444 * Don't invalidate the page if the local machine has already 445 * modified it. This is the lesser of two evils, and should 446 * be fixed. 447 */ 448 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 449 vm_page_test_dirty(m); 450 if (m->dirty == 0) { 451 vm_page_set_invalid(m, foff, resid); 452 if (m->valid == 0) 453 vm_page_protect(m, VM_PROT_NONE); 454 } 455 } 456 } 457 foff += resid; 458 iototal -= resid; 459 } 460 461 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 462 for(i=0;i<bp->b_npages;i++) { 463 m = bp->b_pages[i]; 464 --m->bmapped; 465 if (m->bmapped == 0) { 466 if (m->flags & PG_WANTED) { 467 wakeup((caddr_t) m); 468 m->flags &= ~PG_WANTED; 469 } 470 vm_page_test_dirty(m); 471 if ((m->dirty & m->valid) == 0 && 472 (m->flags & PG_REFERENCED) == 0 && 473 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 474 vm_page_cache(m); 475 } else if ((m->flags & PG_ACTIVE) == 0) { 476 vm_page_activate(m); 477 m->act_count = 0; 478 } 479 } 480 } 481 bufspace -= bp->b_bufsize; 482 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 483 bp->b_npages = 0; 484 bp->b_bufsize = 0; 485 bp->b_flags &= ~B_VMIO; 486 if (bp->b_vp) 487 brelvp(bp); 488 } 489 } 490 if (bp->b_qindex != QUEUE_NONE) 491 panic("brelse: free buffer onto another queue???"); 492 493 /* enqueue */ 494 /* buffers with no memory */ 495 if (bp->b_bufsize == 0) { 496 bp->b_qindex = QUEUE_EMPTY; 497 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 498 LIST_REMOVE(bp, b_hash); 499 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 500 bp->b_dev = NODEV; 501 /* buffers with junk contents */ 502 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 503 bp->b_qindex = QUEUE_AGE; 504 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 505 LIST_REMOVE(bp, b_hash); 506 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 507 bp->b_dev = NODEV; 508 /* buffers that are locked */ 509 } else if (bp->b_flags & B_LOCKED) { 510 bp->b_qindex = QUEUE_LOCKED; 511 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 512 /* buffers with stale but valid contents */ 513 } else if (bp->b_flags & B_AGE) { 514 bp->b_qindex = QUEUE_AGE; 515 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 516 /* buffers with valid and quite potentially reuseable contents */ 517 } else { 518 bp->b_qindex = QUEUE_LRU; 519 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 520 } 521 522 /* unlock */ 523 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 524 splx(s); 525} 526 527/* 528 * this routine implements clustered async writes for 529 * clearing out B_DELWRI buffers... This is much better 530 * than the old way of writing only one buffer at a time. 531 */ 532void 533vfs_bio_awrite(struct buf * bp) 534{ 535 int i; 536 daddr_t lblkno = bp->b_lblkno; 537 struct vnode *vp = bp->b_vp; 538 int s; 539 int ncl; 540 struct buf *bpa; 541 542 s = splbio(); 543 if( vp->v_mount && (vp->v_flag & VVMIO) && 544 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 545 int size = vp->v_mount->mnt_stat.f_iosize; 546 547 for (i = 1; i < MAXPHYS / size; i++) { 548 if ((bpa = incore(vp, lblkno + i)) && 549 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) && 550 (bpa->b_bufsize == size)) { 551 if ((bpa->b_blkno == bpa->b_lblkno) || 552 (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE)) 553 break; 554 } else { 555 break; 556 } 557 } 558 ncl = i; 559 /* 560 * this is a possible cluster write 561 */ 562 if (ncl != 1) { 563 bremfree(bp); 564 cluster_wbuild(vp, bp, size, lblkno, ncl, -1); 565 splx(s); 566 return; 567 } 568 } 569 /* 570 * default (old) behavior, writing out only one block 571 */ 572 bremfree(bp); 573 bp->b_flags |= B_BUSY | B_ASYNC; 574 (void) VOP_BWRITE(bp); 575 splx(s); 576} 577 578 579/* 580 * Find a buffer header which is available for use. 581 */ 582static struct buf * 583getnewbuf(int slpflag, int slptimeo, int doingvmio) 584{ 585 struct buf *bp; 586 int s; 587 int firstbp = 1; 588 589 s = splbio(); 590start: 591 if (bufspace >= maxbufspace) 592 goto trytofreespace; 593 594 /* can we constitute a new buffer? */ 595 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 596 if (bp->b_qindex != QUEUE_EMPTY) 597 panic("getnewbuf: inconsistent EMPTY queue"); 598 bremfree(bp); 599 goto fillbuf; 600 } 601trytofreespace: 602 /* 603 * We keep the file I/O from hogging metadata I/O 604 * This is desirable because file data is cached in the 605 * VM/Buffer cache even if a buffer is freed. 606 */ 607 if ((bp = bufqueues[QUEUE_AGE].tqh_first)) { 608 if (bp->b_qindex != QUEUE_AGE) 609 panic("getnewbuf: inconsistent AGE queue"); 610 } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) { 611 if (bp->b_qindex != QUEUE_LRU) 612 panic("getnewbuf: inconsistent LRU queue"); 613 } 614 if (!bp) { 615 /* wait for a free buffer of any kind */ 616 needsbuffer = 1; 617 tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo); 618 splx(s); 619 return (0); 620 } 621 622 /* if we are a delayed write, convert to an async write */ 623 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 624 vfs_bio_awrite(bp); 625 if (!slpflag && !slptimeo) { 626 splx(s); 627 return (0); 628 } 629 goto start; 630 } 631 632 if (bp->b_flags & B_WANTED) { 633 bp->b_flags &= ~B_WANTED; 634 wakeup((caddr_t) bp); 635 } 636 bremfree(bp); 637 638 if (bp->b_flags & B_VMIO) { 639 bp->b_flags |= B_RELBUF | B_BUSY | B_DONE; 640 brelse(bp); 641 bremfree(bp); 642 } 643 644 if (bp->b_vp) 645 brelvp(bp); 646 647 /* we are not free, nor do we contain interesting data */ 648 if (bp->b_rcred != NOCRED) 649 crfree(bp->b_rcred); 650 if (bp->b_wcred != NOCRED) 651 crfree(bp->b_wcred); 652fillbuf: 653 bp->b_flags |= B_BUSY; 654 LIST_REMOVE(bp, b_hash); 655 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 656 splx(s); 657 if (bp->b_bufsize) { 658 allocbuf(bp, 0); 659 } 660 bp->b_flags = B_BUSY; 661 bp->b_dev = NODEV; 662 bp->b_vp = NULL; 663 bp->b_blkno = bp->b_lblkno = 0; 664 bp->b_iodone = 0; 665 bp->b_error = 0; 666 bp->b_resid = 0; 667 bp->b_bcount = 0; 668 bp->b_npages = 0; 669 bp->b_wcred = bp->b_rcred = NOCRED; 670 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 671 bp->b_dirtyoff = bp->b_dirtyend = 0; 672 bp->b_validoff = bp->b_validend = 0; 673 if (bufspace >= maxbufspace) { 674 s = splbio(); 675 bp->b_flags |= B_INVAL; 676 brelse(bp); 677 goto trytofreespace; 678 } 679 return (bp); 680} 681 682/* 683 * Check to see if a block is currently memory resident. 684 */ 685struct buf * 686incore(struct vnode * vp, daddr_t blkno) 687{ 688 struct buf *bp; 689 struct bufhashhdr *bh; 690 691 int s = splbio(); 692 693 bh = BUFHASH(vp, blkno); 694 bp = bh->lh_first; 695 696 /* Search hash chain */ 697 while (bp) { 698 /* hit */ 699 if (bp->b_lblkno == blkno && bp->b_vp == vp && 700 (bp->b_flags & B_INVAL) == 0) { 701 splx(s); 702 return (bp); 703 } 704 bp = bp->b_hash.le_next; 705 } 706 splx(s); 707 708 return (0); 709} 710 711/* 712 * Returns true if no I/O is needed to access the 713 * associated VM object. This is like incore except 714 * it also hunts around in the VM system for the data. 715 */ 716 717int 718inmem(struct vnode * vp, daddr_t blkno) 719{ 720 vm_object_t obj; 721 vm_offset_t off, toff, tinc; 722 vm_page_t m; 723 724 if (incore(vp, blkno)) 725 return 1; 726 if (vp->v_mount == 0) 727 return 0; 728 if ((vp->v_object == 0) || (vp->v_flag & VVMIO) == 0) 729 return 0; 730 731 obj = vp->v_object; 732 tinc = PAGE_SIZE; 733 if (tinc > vp->v_mount->mnt_stat.f_iosize) 734 tinc = vp->v_mount->mnt_stat.f_iosize; 735 off = blkno * vp->v_mount->mnt_stat.f_iosize; 736 737 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 738 int mask; 739 740 m = vm_page_lookup(obj, trunc_page(toff + off)); 741 if (!m) 742 return 0; 743 if (vm_page_is_valid(m, toff + off, tinc) == 0) 744 return 0; 745 } 746 return 1; 747} 748 749/* 750 * now we set the dirty range for the buffer -- 751 * for NFS -- if the file is mapped and pages have 752 * been written to, let it know. We want the 753 * entire range of the buffer to be marked dirty if 754 * any of the pages have been written to for consistancy 755 * with the b_validoff, b_validend set in the nfs write 756 * code, and used by the nfs read code. 757 */ 758static void 759vfs_setdirty(struct buf *bp) { 760 int i; 761 vm_object_t object; 762 vm_offset_t boffset, offset; 763 /* 764 * We qualify the scan for modified pages on whether the 765 * object has been flushed yet. The OBJ_WRITEABLE flag 766 * is not cleared simply by protecting pages off. 767 */ 768 if ((bp->b_flags & B_VMIO) && 769 ((object = bp->b_pages[0]->object)->flags & OBJ_WRITEABLE)) { 770 /* 771 * test the pages to see if they have been modified directly 772 * by users through the VM system. 773 */ 774 for (i = 0; i < bp->b_npages; i++) 775 vm_page_test_dirty(bp->b_pages[i]); 776 777 /* 778 * scan forwards for the first page modified 779 */ 780 for (i = 0; i < bp->b_npages; i++) { 781 if (bp->b_pages[i]->dirty) { 782 break; 783 } 784 } 785 boffset = i * PAGE_SIZE; 786 if (boffset < bp->b_dirtyoff) { 787 bp->b_dirtyoff = boffset; 788 } 789 790 /* 791 * scan backwards for the last page modified 792 */ 793 for (i = bp->b_npages - 1; i >= 0; --i) { 794 if (bp->b_pages[i]->dirty) { 795 break; 796 } 797 } 798 boffset = (i + 1) * PAGE_SIZE; 799 offset = boffset + bp->b_pages[0]->offset; 800 if (offset >= object->size) { 801 boffset = object->size - bp->b_pages[0]->offset; 802 } 803 if (bp->b_dirtyend < boffset) { 804 bp->b_dirtyend = boffset; 805 } 806 } 807} 808 809/* 810 * Get a block given a specified block and offset into a file/device. 811 */ 812struct buf * 813getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 814{ 815 struct buf *bp; 816 int s; 817 struct bufhashhdr *bh; 818 vm_offset_t off; 819 int nleft; 820 821 s = splbio(); 822loop: 823 if (bp = incore(vp, blkno)) { 824 if (bp->b_flags & B_BUSY) { 825 bp->b_flags |= B_WANTED; 826 if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo)) 827 goto loop; 828 829 splx(s); 830 return (struct buf *) NULL; 831 } 832 bp->b_flags |= B_BUSY | B_CACHE; 833 bremfree(bp); 834 /* 835 * check for size inconsistancies 836 */ 837 if (bp->b_bcount != size) { 838 allocbuf(bp, size); 839 } 840 splx(s); 841 return (bp); 842 } else { 843 vm_object_t obj; 844 int doingvmio; 845 846 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 847 doingvmio = 1; 848 } else { 849 doingvmio = 0; 850 } 851 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 852 if (slpflag || slptimeo) 853 return NULL; 854 goto loop; 855 } 856 857 /* 858 * This code is used to make sure that a buffer is not 859 * created while the getnewbuf routine is blocked. 860 * Normally the vnode is locked so this isn't a problem. 861 * VBLK type I/O requests, however, don't lock the vnode. 862 */ 863 if (!VOP_ISLOCKED(vp) && incore(vp, blkno)) { 864 bp->b_flags |= B_INVAL; 865 brelse(bp); 866 goto loop; 867 } 868 869 /* 870 * Insert the buffer into the hash, so that it can 871 * be found by incore. 872 */ 873 bp->b_blkno = bp->b_lblkno = blkno; 874 bgetvp(vp, bp); 875 LIST_REMOVE(bp, b_hash); 876 bh = BUFHASH(vp, blkno); 877 LIST_INSERT_HEAD(bh, bp, b_hash); 878 879 if (doingvmio) { 880 bp->b_flags |= (B_VMIO | B_CACHE); 881#if defined(VFS_BIO_DEBUG) 882 if (vp->v_type != VREG) 883 printf("getblk: vmioing file type %d???\n", vp->v_type); 884#endif 885 } else { 886 bp->b_flags &= ~B_VMIO; 887 } 888 splx(s); 889 890 allocbuf(bp, size); 891 return (bp); 892 } 893} 894 895/* 896 * Get an empty, disassociated buffer of given size. 897 */ 898struct buf * 899geteblk(int size) 900{ 901 struct buf *bp; 902 903 while ((bp = getnewbuf(0, 0, 0)) == 0); 904 allocbuf(bp, size); 905 bp->b_flags |= B_INVAL; 906 return (bp); 907} 908 909/* 910 * This code constitutes the buffer memory from either anonymous system 911 * memory (in the case of non-VMIO operations) or from an associated 912 * VM object (in the case of VMIO operations). 913 * 914 * Note that this code is tricky, and has many complications to resolve 915 * deadlock or inconsistant data situations. Tread lightly!!! 916 * 917 * Modify the length of a buffer's underlying buffer storage without 918 * destroying information (unless, of course the buffer is shrinking). 919 */ 920int 921allocbuf(struct buf * bp, int size) 922{ 923 924 int s; 925 int newbsize, mbsize; 926 int i; 927 928 if ((bp->b_flags & B_VMIO) == 0) { 929 /* 930 * Just get anonymous memory from the kernel 931 */ 932 mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 933 newbsize = round_page(size); 934 935 if (newbsize < bp->b_bufsize) { 936 vm_hold_free_pages( 937 bp, 938 (vm_offset_t) bp->b_data + newbsize, 939 (vm_offset_t) bp->b_data + bp->b_bufsize); 940 } else if (newbsize > bp->b_bufsize) { 941 vm_hold_load_pages( 942 bp, 943 (vm_offset_t) bp->b_data + bp->b_bufsize, 944 (vm_offset_t) bp->b_data + newbsize); 945 } 946 } else { 947 vm_page_t m; 948 int desiredpages; 949 950 newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 951 desiredpages = round_page(newbsize) / PAGE_SIZE; 952 953 if (newbsize < bp->b_bufsize) { 954 if (desiredpages < bp->b_npages) { 955 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 956 desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages)); 957 for (i = desiredpages; i < bp->b_npages; i++) { 958 m = bp->b_pages[i]; 959 s = splhigh(); 960 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 961 m->flags |= PG_WANTED; 962 tsleep(m, PVM, "biodep", 0); 963 } 964 splx(s); 965 966 if (m->bmapped == 0) { 967 printf("allocbuf: bmapped is zero for page %d\n", i); 968 panic("allocbuf: error"); 969 } 970 --m->bmapped; 971 if (m->bmapped == 0) { 972 vm_page_protect(m, VM_PROT_NONE); 973 vm_page_free(m); 974 } 975 bp->b_pages[i] = NULL; 976 } 977 bp->b_npages = desiredpages; 978 } 979 } else if (newbsize > bp->b_bufsize) { 980 vm_object_t obj; 981 vm_offset_t tinc, off, toff, objoff; 982 int pageindex, curbpnpages; 983 struct vnode *vp; 984 int bsize; 985 986 vp = bp->b_vp; 987 bsize = vp->v_mount->mnt_stat.f_iosize; 988 989 if (bp->b_npages < desiredpages) { 990 obj = vp->v_object; 991 tinc = PAGE_SIZE; 992 if (tinc > bsize) 993 tinc = bsize; 994 off = bp->b_lblkno * bsize; 995 curbpnpages = bp->b_npages; 996 doretry: 997 bp->b_flags |= B_CACHE; 998 for (toff = 0; toff < newbsize; toff += tinc) { 999 int mask; 1000 int bytesinpage; 1001 1002 pageindex = toff / PAGE_SIZE; 1003 objoff = trunc_page(toff + off); 1004 if (pageindex < curbpnpages) { 1005 int pb; 1006 1007 m = bp->b_pages[pageindex]; 1008 if (m->offset != objoff) 1009 panic("allocbuf: page changed offset??!!!?"); 1010 bytesinpage = tinc; 1011 if (tinc > (newbsize - toff)) 1012 bytesinpage = newbsize - toff; 1013 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 1014 bp->b_flags &= ~B_CACHE; 1015 } 1016 if ((m->flags & PG_ACTIVE) == 0) { 1017 vm_page_activate(m); 1018 m->act_count = 0; 1019 } 1020 continue; 1021 } 1022 m = vm_page_lookup(obj, objoff); 1023 if (!m) { 1024 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1025 if (!m) { 1026 int j; 1027 1028 for (j = bp->b_npages; j < pageindex; j++) { 1029 PAGE_WAKEUP(bp->b_pages[j]); 1030 } 1031 VM_WAIT; 1032 curbpnpages = bp->b_npages; 1033 goto doretry; 1034 } 1035 vm_page_activate(m); 1036 m->act_count = 0; 1037 m->valid = 0; 1038 } else if (m->flags & PG_BUSY) { 1039 int j; 1040 1041 for (j = bp->b_npages; j < pageindex; j++) { 1042 PAGE_WAKEUP(bp->b_pages[j]); 1043 } 1044 1045 s = splbio(); 1046 m->flags |= PG_WANTED; 1047 tsleep(m, PRIBIO, "pgtblk", 0); 1048 splx(s); 1049 1050 curbpnpages = bp->b_npages; 1051 goto doretry; 1052 } else { 1053 int pb; 1054 if ((curproc != pageproc) && 1055 (m->flags & PG_CACHE) && 1056 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 1057 pagedaemon_wakeup(); 1058 } 1059 bytesinpage = tinc; 1060 if (tinc > (newbsize - toff)) 1061 bytesinpage = newbsize - toff; 1062 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 1063 bp->b_flags &= ~B_CACHE; 1064 } 1065 if ((m->flags & PG_ACTIVE) == 0) { 1066 vm_page_activate(m); 1067 m->act_count = 0; 1068 } 1069 m->flags |= PG_BUSY; 1070 } 1071 bp->b_pages[pageindex] = m; 1072 curbpnpages = pageindex + 1; 1073 } 1074 if (bsize >= PAGE_SIZE) { 1075 for (i = bp->b_npages; i < curbpnpages; i++) { 1076 m = bp->b_pages[i]; 1077 if (m->valid == 0) { 1078 bp->b_flags &= ~B_CACHE; 1079 } 1080 m->bmapped++; 1081 PAGE_WAKEUP(m); 1082 } 1083 } else { 1084 if (!vm_page_is_valid(bp->b_pages[0], off, bsize)) 1085 bp->b_flags &= ~B_CACHE; 1086 bp->b_pages[0]->bmapped++; 1087 PAGE_WAKEUP(bp->b_pages[0]); 1088 } 1089 bp->b_npages = curbpnpages; 1090 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1091 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1092 bp->b_data += off % PAGE_SIZE; 1093 } 1094 } 1095 } 1096 bufspace += (newbsize - bp->b_bufsize); 1097 bp->b_bufsize = newbsize; 1098 bp->b_bcount = size; 1099 return 1; 1100} 1101 1102/* 1103 * Wait for buffer I/O completion, returning error status. 1104 */ 1105int 1106biowait(register struct buf * bp) 1107{ 1108 int s; 1109 1110 s = splbio(); 1111 while ((bp->b_flags & B_DONE) == 0) 1112 tsleep((caddr_t) bp, PRIBIO, "biowait", 0); 1113 splx(s); 1114 if (bp->b_flags & B_EINTR) { 1115 bp->b_flags &= ~B_EINTR; 1116 return (EINTR); 1117 } 1118 if (bp->b_flags & B_ERROR) { 1119 return (bp->b_error ? bp->b_error : EIO); 1120 } else { 1121 return (0); 1122 } 1123} 1124 1125/* 1126 * Finish I/O on a buffer, calling an optional function. 1127 * This is usually called from interrupt level, so process blocking 1128 * is not *a good idea*. 1129 */ 1130void 1131biodone(register struct buf * bp) 1132{ 1133 int s; 1134 1135 s = splbio(); 1136 if (bp->b_flags & B_DONE) { 1137 splx(s); 1138 printf("biodone: buffer already done\n"); 1139 return; 1140 } 1141 bp->b_flags |= B_DONE; 1142 1143 if ((bp->b_flags & B_READ) == 0) { 1144 struct vnode *vp = bp->b_vp; 1145 vwakeup(bp); 1146 } 1147#ifdef BOUNCE_BUFFERS 1148 if (bp->b_flags & B_BOUNCE) 1149 vm_bounce_free(bp); 1150#endif 1151 1152 /* call optional completion function if requested */ 1153 if (bp->b_flags & B_CALL) { 1154 bp->b_flags &= ~B_CALL; 1155 (*bp->b_iodone) (bp); 1156 splx(s); 1157 return; 1158 } 1159 if (bp->b_flags & B_VMIO) { 1160 int i, resid; 1161 vm_offset_t foff; 1162 vm_page_t m; 1163 vm_object_t obj; 1164 int iosize; 1165 struct vnode *vp = bp->b_vp; 1166 1167 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1168 obj = vp->v_object; 1169 if (!obj) { 1170 return; 1171 } 1172#if defined(VFS_BIO_DEBUG) 1173 if (obj->paging_in_progress < bp->b_npages) { 1174 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1175 obj->paging_in_progress, bp->b_npages); 1176 } 1177#endif 1178 iosize = bp->b_bufsize; 1179 for (i = 0; i < bp->b_npages; i++) { 1180 int bogusflag = 0; 1181 m = bp->b_pages[i]; 1182 if (m == bogus_page) { 1183 bogusflag = 1; 1184 m = vm_page_lookup(obj, foff); 1185 if (!m) { 1186#if defined(VFS_BIO_DEBUG) 1187 printf("biodone: page disappeared\n"); 1188#endif 1189 --obj->paging_in_progress; 1190 continue; 1191 } 1192 bp->b_pages[i] = m; 1193 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1194 } 1195#if defined(VFS_BIO_DEBUG) 1196 if (trunc_page(foff) != m->offset) { 1197 printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset); 1198 } 1199#endif 1200 resid = (m->offset + PAGE_SIZE) - foff; 1201 if (resid > iosize) 1202 resid = iosize; 1203 /* 1204 * In the write case, the valid and clean bits are 1205 * already changed correctly, so we only need to do this 1206 * here in the read case. 1207 */ 1208 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1209 vm_page_set_valid(m, foff & (PAGE_SIZE-1), resid); 1210 vm_page_set_clean(m, foff & (PAGE_SIZE-1), resid); 1211 } 1212 1213 /* 1214 * when debugging new filesystems or buffer I/O methods, this 1215 * is the most common error that pops up. if you see this, you 1216 * have not set the page busy flag correctly!!! 1217 */ 1218 if (m->busy == 0) { 1219 printf("biodone: page busy < 0, " 1220 "off: %ld, foff: %ld, " 1221 "resid: %d, index: %d\n", 1222 m->offset, foff, resid, i); 1223 printf(" iosize: %ld, lblkno: %ld\n", 1224 bp->b_vp->v_mount->mnt_stat.f_iosize, 1225 bp->b_lblkno); 1226 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1227 m->valid, m->dirty, m->bmapped); 1228 panic("biodone: page busy < 0\n"); 1229 } 1230 --m->busy; 1231 if( (m->busy == 0) && (m->flags & PG_WANTED)) 1232 wakeup((caddr_t) m); 1233 --obj->paging_in_progress; 1234 foff += resid; 1235 iosize -= resid; 1236 } 1237 if (obj && obj->paging_in_progress == 0 && 1238 (obj->flags & OBJ_PIPWNT)) { 1239 obj->flags &= ~OBJ_PIPWNT; 1240 wakeup((caddr_t) obj); 1241 } 1242 } 1243 /* 1244 * For asynchronous completions, release the buffer now. The brelse 1245 * checks for B_WANTED and will do the wakeup there if necessary - so 1246 * no need to do a wakeup here in the async case. 1247 */ 1248 1249 if (bp->b_flags & B_ASYNC) { 1250 brelse(bp); 1251 } else { 1252 bp->b_flags &= ~B_WANTED; 1253 wakeup((caddr_t) bp); 1254 } 1255 splx(s); 1256} 1257 1258int 1259count_lock_queue() 1260{ 1261 int count; 1262 struct buf *bp; 1263 1264 count = 0; 1265 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1266 bp != NULL; 1267 bp = bp->b_freelist.tqe_next) 1268 count++; 1269 return (count); 1270} 1271 1272int vfs_update_interval = 30; 1273 1274void 1275vfs_update() 1276{ 1277 (void) spl0(); 1278 while (1) { 1279 tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update", 1280 hz * vfs_update_interval); 1281 vfs_update_wakeup = 0; 1282 sync(curproc, NULL, NULL); 1283 } 1284} 1285 1286/* 1287 * This routine is called in lieu of iodone in the case of 1288 * incomplete I/O. This keeps the busy status for pages 1289 * consistant. 1290 */ 1291void 1292vfs_unbusy_pages(struct buf * bp) 1293{ 1294 int i; 1295 1296 if (bp->b_flags & B_VMIO) { 1297 struct vnode *vp = bp->b_vp; 1298 vm_object_t obj = vp->v_object; 1299 vm_offset_t foff; 1300 1301 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1302 1303 for (i = 0; i < bp->b_npages; i++) { 1304 vm_page_t m = bp->b_pages[i]; 1305 1306 if (m == bogus_page) { 1307 m = vm_page_lookup(obj, foff); 1308 if (!m) { 1309 panic("vfs_unbusy_pages: page missing\n"); 1310 } 1311 bp->b_pages[i] = m; 1312 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1313 } 1314 --obj->paging_in_progress; 1315 --m->busy; 1316 if( (m->busy == 0) && (m->flags & PG_WANTED)) 1317 wakeup((caddr_t) m); 1318 } 1319 if (obj->paging_in_progress == 0 && 1320 (obj->flags & OBJ_PIPWNT)) { 1321 obj->flags &= ~OBJ_PIPWNT; 1322 wakeup((caddr_t) obj); 1323 } 1324 } 1325} 1326 1327/* 1328 * This routine is called before a device strategy routine. 1329 * It is used to tell the VM system that paging I/O is in 1330 * progress, and treat the pages associated with the buffer 1331 * almost as being PG_BUSY. Also the object paging_in_progress 1332 * flag is handled to make sure that the object doesn't become 1333 * inconsistant. 1334 */ 1335void 1336vfs_busy_pages(struct buf * bp, int clear_modify) 1337{ 1338 int i; 1339 1340 if (bp->b_flags & B_VMIO) { 1341 vm_object_t obj = bp->b_vp->v_object; 1342 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1343 int iocount = bp->b_bufsize; 1344 1345 vfs_setdirty(bp); 1346 for (i = 0; i < bp->b_npages; i++) { 1347 vm_page_t m = bp->b_pages[i]; 1348 int resid = (m->offset + PAGE_SIZE) - foff; 1349 1350 if (resid > iocount) 1351 resid = iocount; 1352 obj->paging_in_progress++; 1353 m->busy++; 1354 if (clear_modify) { 1355 vm_page_protect(m, VM_PROT_READ); 1356 vm_page_set_valid(m, 1357 foff & (PAGE_SIZE-1), resid); 1358 vm_page_set_clean(m, 1359 foff & (PAGE_SIZE-1), resid); 1360 } else if (bp->b_bcount >= PAGE_SIZE) { 1361 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1362 bp->b_pages[i] = bogus_page; 1363 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1364 } 1365 } 1366 foff += resid; 1367 iocount -= resid; 1368 } 1369 } 1370} 1371 1372/* 1373 * Tell the VM system that the pages associated with this buffer 1374 * are clean. This is used for delayed writes where the data is 1375 * going to go to disk eventually without additional VM intevention. 1376 */ 1377void 1378vfs_clean_pages(struct buf * bp) 1379{ 1380 int i; 1381 1382 if (bp->b_flags & B_VMIO) { 1383 vm_offset_t foff = 1384 bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1385 int iocount = bp->b_bufsize; 1386 1387 for (i = 0; i < bp->b_npages; i++) { 1388 vm_page_t m = bp->b_pages[i]; 1389 int resid = (m->offset + PAGE_SIZE) - foff; 1390 1391 if (resid > iocount) 1392 resid = iocount; 1393 if (resid > 0) { 1394 vm_page_set_valid(m, 1395 foff & (PAGE_SIZE-1), resid); 1396 vm_page_set_clean(m, 1397 foff & (PAGE_SIZE-1), resid); 1398 } 1399 foff += resid; 1400 iocount -= resid; 1401 } 1402 } 1403} 1404 1405void 1406vfs_bio_clrbuf(struct buf *bp) { 1407 int i; 1408 if( bp->b_flags & B_VMIO) { 1409 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1410 int j; 1411 if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) { 1412 for(j=0; j < bp->b_bufsize / DEV_BSIZE;j++) { 1413 bzero(bp->b_data + j * DEV_BSIZE, DEV_BSIZE); 1414 } 1415 } 1416 bp->b_resid = 0; 1417 return; 1418 } 1419 for(i=0;i<bp->b_npages;i++) { 1420 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1421 continue; 1422 if( bp->b_pages[i]->valid == 0) { 1423 bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE); 1424 } else { 1425 int j; 1426 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1427 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1428 bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE); 1429 } 1430 } 1431 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 1432 } 1433 bp->b_resid = 0; 1434 } else { 1435 clrbuf(bp); 1436 } 1437} 1438 1439/* 1440 * vm_hold_load_pages and vm_hold_unload pages get pages into 1441 * a buffers address space. The pages are anonymous and are 1442 * not associated with a file object. 1443 */ 1444void 1445vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1446{ 1447 vm_offset_t pg; 1448 vm_page_t p; 1449 vm_offset_t from = round_page(froma); 1450 vm_offset_t to = round_page(toa); 1451 1452 for (pg = from; pg < to; pg += PAGE_SIZE) { 1453 1454tryagain: 1455 1456 p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS, 1457 VM_ALLOC_NORMAL); 1458 if (!p) { 1459 VM_WAIT; 1460 goto tryagain; 1461 } 1462 vm_page_wire(p); 1463 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1464 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p; 1465 PAGE_WAKEUP(p); 1466 bp->b_npages++; 1467 } 1468} 1469 1470void 1471vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1472{ 1473 vm_offset_t pg; 1474 vm_page_t p; 1475 vm_offset_t from = round_page(froma); 1476 vm_offset_t to = round_page(toa); 1477 1478 for (pg = from; pg < to; pg += PAGE_SIZE) { 1479 p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE]; 1480 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0; 1481 pmap_kremove(pg); 1482 vm_page_free(p); 1483 --bp->b_npages; 1484 } 1485} 1486