vfs_bio.c revision 31016
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.133 1997/11/06 19:29:29 phk Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <vm/vm_map.h> 55#include <sys/buf.h> 56#include <sys/mount.h> 57#include <sys/malloc.h> 58#include <sys/resourcevar.h> 59 60static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 80 vm_offset_t off, vm_offset_t size, 81 vm_page_t m); 82static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 83 int pageno, vm_page_t m); 84static void vfs_clean_pages(struct buf * bp); 85static void vfs_setdirty(struct buf *bp); 86static void vfs_vmio_release(struct buf *bp); 87static void flushdirtybuffers(int slpflag, int slptimeo); 88 89int needsbuffer; 90 91/* 92 * Internal update daemon, process 3 93 * The variable vfs_update_wakeup allows for internal syncs. 94 */ 95int vfs_update_wakeup; 96 97 98/* 99 * buffers base kva 100 */ 101 102/* 103 * bogus page -- for I/O to/from partially complete buffers 104 * this is a temporary solution to the problem, but it is not 105 * really that bad. it would be better to split the buffer 106 * for input in the case of buffers partially already in memory, 107 * but the code is intricate enough already. 108 */ 109vm_page_t bogus_page; 110static vm_offset_t bogus_offset; 111 112static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 113 bufmallocspace, maxbufmallocspace; 114int numdirtybuffers, lodirtybuffers, hidirtybuffers; 115static int numfreebuffers, lofreebuffers, hifreebuffers; 116 117SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 118 &numdirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 120 &lodirtybuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 122 &hidirtybuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 124 &numfreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 126 &lofreebuffers, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 128 &hifreebuffers, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 130 &maxbufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 132 &bufspace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 134 &maxvmiobufspace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 136 &vmiospace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 138 &maxbufmallocspace, 0, ""); 139SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 140 &bufmallocspace, 0, ""); 141 142static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 143static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 144 145extern int vm_swap_size; 146 147#define BUF_MAXUSE 24 148 149#define VFS_BIO_NEED_ANY 1 150#define VFS_BIO_NEED_LOWLIMIT 2 151#define VFS_BIO_NEED_FREE 4 152 153/* 154 * Initialize buffer headers and related structures. 155 */ 156void 157bufinit() 158{ 159 struct buf *bp; 160 int i; 161 162 TAILQ_INIT(&bswlist); 163 LIST_INIT(&invalhash); 164 165 /* first, make a null hash table */ 166 for (i = 0; i < BUFHSZ; i++) 167 LIST_INIT(&bufhashtbl[i]); 168 169 /* next, make a null set of free lists */ 170 for (i = 0; i < BUFFER_QUEUES; i++) 171 TAILQ_INIT(&bufqueues[i]); 172 173 /* finally, initialize each buffer header and stick on empty q */ 174 for (i = 0; i < nbuf; i++) { 175 bp = &buf[i]; 176 bzero(bp, sizeof *bp); 177 bp->b_flags = B_INVAL; /* we're just an empty header */ 178 bp->b_dev = NODEV; 179 bp->b_rcred = NOCRED; 180 bp->b_wcred = NOCRED; 181 bp->b_qindex = QUEUE_EMPTY; 182 bp->b_vnbufs.le_next = NOLIST; 183 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 184 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 185 } 186/* 187 * maxbufspace is currently calculated to support all filesystem blocks 188 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 189 * cache is still the same as it would be for 8K filesystems. This 190 * keeps the size of the buffer cache "in check" for big block filesystems. 191 */ 192 maxbufspace = (nbuf + 8) * DFLTBSIZE; 193/* 194 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 195 */ 196 maxvmiobufspace = 2 * maxbufspace / 3; 197/* 198 * Limit the amount of malloc memory since it is wired permanently into 199 * the kernel space. Even though this is accounted for in the buffer 200 * allocation, we don't want the malloced region to grow uncontrolled. 201 * The malloc scheme improves memory utilization significantly on average 202 * (small) directories. 203 */ 204 maxbufmallocspace = maxbufspace / 20; 205 206/* 207 * Remove the probability of deadlock conditions by limiting the 208 * number of dirty buffers. 209 */ 210 hidirtybuffers = nbuf / 6 + 20; 211 lodirtybuffers = nbuf / 12 + 10; 212 numdirtybuffers = 0; 213 lofreebuffers = nbuf / 18 + 5; 214 hifreebuffers = 2 * lofreebuffers; 215 numfreebuffers = nbuf; 216 217 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 218 bogus_page = vm_page_alloc(kernel_object, 219 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 220 VM_ALLOC_NORMAL); 221 222} 223 224/* 225 * Free the kva allocation for a buffer 226 * Must be called only at splbio or higher, 227 * as this is the only locking for buffer_map. 228 */ 229static void 230bfreekva(struct buf * bp) 231{ 232 if (bp->b_kvasize == 0) 233 return; 234 235 vm_map_delete(buffer_map, 236 (vm_offset_t) bp->b_kvabase, 237 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 238 239 bp->b_kvasize = 0; 240 241} 242 243/* 244 * remove the buffer from the appropriate free list 245 */ 246void 247bremfree(struct buf * bp) 248{ 249 int s = splbio(); 250 251 if (bp->b_qindex != QUEUE_NONE) { 252 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 253 bp->b_qindex = QUEUE_NONE; 254 } else { 255#if !defined(MAX_PERF) 256 panic("bremfree: removing a buffer when not on a queue"); 257#endif 258 } 259 if ((bp->b_flags & B_INVAL) || 260 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 261 --numfreebuffers; 262 splx(s); 263} 264 265/* 266 * Get a buffer with the specified data. Look in the cache first. 267 */ 268int 269bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 270 struct buf ** bpp) 271{ 272 struct buf *bp; 273 274 bp = getblk(vp, blkno, size, 0, 0); 275 *bpp = bp; 276 277 /* if not found in cache, do some I/O */ 278 if ((bp->b_flags & B_CACHE) == 0) { 279 if (curproc != NULL) 280 curproc->p_stats->p_ru.ru_inblock++; 281 bp->b_flags |= B_READ; 282 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 283 if (bp->b_rcred == NOCRED) { 284 if (cred != NOCRED) 285 crhold(cred); 286 bp->b_rcred = cred; 287 } 288 vfs_busy_pages(bp, 0); 289 VOP_STRATEGY(bp); 290 return (biowait(bp)); 291 } 292 return (0); 293} 294 295/* 296 * Operates like bread, but also starts asynchronous I/O on 297 * read-ahead blocks. 298 */ 299int 300breadn(struct vnode * vp, daddr_t blkno, int size, 301 daddr_t * rablkno, int *rabsize, 302 int cnt, struct ucred * cred, struct buf ** bpp) 303{ 304 struct buf *bp, *rabp; 305 int i; 306 int rv = 0, readwait = 0; 307 308 *bpp = bp = getblk(vp, blkno, size, 0, 0); 309 310 /* if not found in cache, do some I/O */ 311 if ((bp->b_flags & B_CACHE) == 0) { 312 if (curproc != NULL) 313 curproc->p_stats->p_ru.ru_inblock++; 314 bp->b_flags |= B_READ; 315 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 316 if (bp->b_rcred == NOCRED) { 317 if (cred != NOCRED) 318 crhold(cred); 319 bp->b_rcred = cred; 320 } 321 vfs_busy_pages(bp, 0); 322 VOP_STRATEGY(bp); 323 ++readwait; 324 } 325 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 326 if (inmem(vp, *rablkno)) 327 continue; 328 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 329 330 if ((rabp->b_flags & B_CACHE) == 0) { 331 if (curproc != NULL) 332 curproc->p_stats->p_ru.ru_inblock++; 333 rabp->b_flags |= B_READ | B_ASYNC; 334 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 335 if (rabp->b_rcred == NOCRED) { 336 if (cred != NOCRED) 337 crhold(cred); 338 rabp->b_rcred = cred; 339 } 340 vfs_busy_pages(rabp, 0); 341 VOP_STRATEGY(rabp); 342 } else { 343 brelse(rabp); 344 } 345 } 346 347 if (readwait) { 348 rv = biowait(bp); 349 } 350 return (rv); 351} 352 353/* 354 * Write, release buffer on completion. (Done by iodone 355 * if async.) 356 */ 357int 358bwrite(struct buf * bp) 359{ 360 int oldflags = bp->b_flags; 361 362 if (bp->b_flags & B_INVAL) { 363 brelse(bp); 364 return (0); 365 } 366#if !defined(MAX_PERF) 367 if (!(bp->b_flags & B_BUSY)) 368 panic("bwrite: buffer is not busy???"); 369#endif 370 371 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 372 bp->b_flags |= B_WRITEINPROG; 373 374 if ((oldflags & B_DELWRI) == B_DELWRI) { 375 --numdirtybuffers; 376 reassignbuf(bp, bp->b_vp); 377 } 378 379 bp->b_vp->v_numoutput++; 380 vfs_busy_pages(bp, 1); 381 if (curproc != NULL) 382 curproc->p_stats->p_ru.ru_oublock++; 383 VOP_STRATEGY(bp); 384 385 if ((oldflags & B_ASYNC) == 0) { 386 int rtval = biowait(bp); 387 388 if (oldflags & B_DELWRI) { 389 reassignbuf(bp, bp->b_vp); 390 } 391 brelse(bp); 392 return (rtval); 393 } 394 return (0); 395} 396 397void 398vfs_bio_need_satisfy(void) { 399 ++numfreebuffers; 400 if (!needsbuffer) 401 return; 402 if (numdirtybuffers < lodirtybuffers) { 403 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 404 } else { 405 needsbuffer &= ~VFS_BIO_NEED_ANY; 406 } 407 if (numfreebuffers >= hifreebuffers) { 408 needsbuffer &= ~VFS_BIO_NEED_FREE; 409 } 410 wakeup(&needsbuffer); 411} 412 413/* 414 * Delayed write. (Buffer is marked dirty). 415 */ 416void 417bdwrite(struct buf * bp) 418{ 419 420#if !defined(MAX_PERF) 421 if ((bp->b_flags & B_BUSY) == 0) { 422 panic("bdwrite: buffer is not busy"); 423 } 424#endif 425 426 if (bp->b_flags & B_INVAL) { 427 brelse(bp); 428 return; 429 } 430 if (bp->b_flags & B_TAPE) { 431 bawrite(bp); 432 return; 433 } 434 bp->b_flags &= ~(B_READ|B_RELBUF); 435 if ((bp->b_flags & B_DELWRI) == 0) { 436 bp->b_flags |= B_DONE | B_DELWRI; 437 reassignbuf(bp, bp->b_vp); 438 ++numdirtybuffers; 439 } 440 441 /* 442 * This bmap keeps the system from needing to do the bmap later, 443 * perhaps when the system is attempting to do a sync. Since it 444 * is likely that the indirect block -- or whatever other datastructure 445 * that the filesystem needs is still in memory now, it is a good 446 * thing to do this. Note also, that if the pageout daemon is 447 * requesting a sync -- there might not be enough memory to do 448 * the bmap then... So, this is important to do. 449 */ 450 if (bp->b_lblkno == bp->b_blkno) { 451 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 452 } 453 454 /* 455 * Set the *dirty* buffer range based upon the VM system dirty pages. 456 */ 457 vfs_setdirty(bp); 458 459 /* 460 * We need to do this here to satisfy the vnode_pager and the 461 * pageout daemon, so that it thinks that the pages have been 462 * "cleaned". Note that since the pages are in a delayed write 463 * buffer -- the VFS layer "will" see that the pages get written 464 * out on the next sync, or perhaps the cluster will be completed. 465 */ 466 vfs_clean_pages(bp); 467 bqrelse(bp); 468 469 if (numdirtybuffers >= hidirtybuffers) 470 flushdirtybuffers(0, 0); 471 472 return; 473} 474 475/* 476 * Asynchronous write. 477 * Start output on a buffer, but do not wait for it to complete. 478 * The buffer is released when the output completes. 479 */ 480void 481bawrite(struct buf * bp) 482{ 483 bp->b_flags |= B_ASYNC; 484 (void) VOP_BWRITE(bp); 485} 486 487/* 488 * Ordered write. 489 * Start output on a buffer, but only wait for it to complete if the 490 * output device cannot guarantee ordering in some other way. Devices 491 * that can perform asynchronous ordered writes will set the B_ASYNC 492 * flag in their strategy routine. 493 * The buffer is released when the output completes. 494 */ 495int 496bowrite(struct buf * bp) 497{ 498 /* 499 * XXX Add in B_ASYNC once the SCSI 500 * layer can deal with ordered 501 * writes properly. 502 */ 503 bp->b_flags |= B_ORDERED; 504 return (VOP_BWRITE(bp)); 505} 506 507/* 508 * Release a buffer. 509 */ 510void 511brelse(struct buf * bp) 512{ 513 int s; 514 515 if (bp->b_flags & B_CLUSTER) { 516 relpbuf(bp); 517 return; 518 } 519 /* anyone need a "free" block? */ 520 s = splbio(); 521 522 /* anyone need this block? */ 523 if (bp->b_flags & B_WANTED) { 524 bp->b_flags &= ~(B_WANTED | B_AGE); 525 wakeup(bp); 526 } 527 528 if (bp->b_flags & B_LOCKED) 529 bp->b_flags &= ~B_ERROR; 530 531 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 532 (bp->b_bufsize <= 0)) { 533 bp->b_flags |= B_INVAL; 534 if (bp->b_flags & B_DELWRI) 535 --numdirtybuffers; 536 bp->b_flags &= ~(B_DELWRI | B_CACHE); 537 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 538 if (bp->b_bufsize) 539 allocbuf(bp, 0); 540 brelvp(bp); 541 } 542 } 543 544 /* 545 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 546 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 547 * but the VM object is kept around. The B_NOCACHE flag is used to 548 * invalidate the pages in the VM object. 549 * 550 * If the buffer is a partially filled NFS buffer, keep it 551 * since invalidating it now will lose informatio. The valid 552 * flags in the vm_pages have only DEV_BSIZE resolution but 553 * the b_validoff, b_validend fields have byte resolution. 554 * This can avoid unnecessary re-reads of the buffer. 555 * XXX this seems to cause performance problems. 556 */ 557 if ((bp->b_flags & B_VMIO) 558 && !(bp->b_vp->v_tag == VT_NFS && 559 bp->b_vp->v_type != VBLK && 560 (bp->b_flags & B_DELWRI) != 0) 561#ifdef notdef 562 && (bp->b_vp->v_tag != VT_NFS 563 || bp->b_vp->v_type == VBLK 564 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 565 || bp->b_validend == 0 566 || (bp->b_validoff == 0 567 && bp->b_validend == bp->b_bufsize)) 568#endif 569 ) { 570 vm_ooffset_t foff; 571 vm_object_t obj; 572 int i, resid; 573 vm_page_t m; 574 struct vnode *vp; 575 int iototal = bp->b_bufsize; 576 577 vp = bp->b_vp; 578 579#if !defined(MAX_PERF) 580 if (!vp) 581 panic("brelse: missing vp"); 582#endif 583 584 if (bp->b_npages) { 585 vm_pindex_t poff; 586 obj = (vm_object_t) vp->v_object; 587 if (vp->v_type == VBLK) 588 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 589 else 590 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 591 poff = OFF_TO_IDX(foff); 592 for (i = 0; i < bp->b_npages; i++) { 593 m = bp->b_pages[i]; 594 if (m == bogus_page) { 595 m = vm_page_lookup(obj, poff + i); 596#if !defined(MAX_PERF) 597 if (!m) { 598 panic("brelse: page missing\n"); 599 } 600#endif 601 bp->b_pages[i] = m; 602 pmap_qenter(trunc_page(bp->b_data), 603 bp->b_pages, bp->b_npages); 604 } 605 resid = IDX_TO_OFF(m->pindex+1) - foff; 606 if (resid > iototal) 607 resid = iototal; 608 if (resid > 0) { 609 /* 610 * Don't invalidate the page if the local machine has already 611 * modified it. This is the lesser of two evils, and should 612 * be fixed. 613 */ 614 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 615 vm_page_test_dirty(m); 616 if (m->dirty == 0) { 617 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 618 if (m->valid == 0) 619 vm_page_protect(m, VM_PROT_NONE); 620 } 621 } 622 if (resid >= PAGE_SIZE) { 623 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 624 bp->b_flags |= B_INVAL; 625 } 626 } else { 627 if (!vm_page_is_valid(m, 628 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 629 bp->b_flags |= B_INVAL; 630 } 631 } 632 } 633 foff += resid; 634 iototal -= resid; 635 } 636 } 637 if (bp->b_flags & (B_INVAL | B_RELBUF)) 638 vfs_vmio_release(bp); 639 } 640#if !defined(MAX_PERF) 641 if (bp->b_qindex != QUEUE_NONE) 642 panic("brelse: free buffer onto another queue???"); 643#endif 644 645 /* enqueue */ 646 /* buffers with no memory */ 647 if (bp->b_bufsize == 0) { 648 bp->b_flags |= B_INVAL; 649 bp->b_qindex = QUEUE_EMPTY; 650 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 651 LIST_REMOVE(bp, b_hash); 652 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 653 bp->b_dev = NODEV; 654 /* 655 * Get rid of the kva allocation *now* 656 */ 657 bfreekva(bp); 658 659 /* buffers with junk contents */ 660 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 661 bp->b_flags |= B_INVAL; 662 bp->b_qindex = QUEUE_AGE; 663 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 664 LIST_REMOVE(bp, b_hash); 665 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 666 bp->b_dev = NODEV; 667 668 /* buffers that are locked */ 669 } else if (bp->b_flags & B_LOCKED) { 670 bp->b_qindex = QUEUE_LOCKED; 671 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 672 673 /* buffers with stale but valid contents */ 674 } else if (bp->b_flags & B_AGE) { 675 bp->b_qindex = QUEUE_AGE; 676 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 677 678 /* buffers with valid and quite potentially reuseable contents */ 679 } else { 680 bp->b_qindex = QUEUE_LRU; 681 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 682 } 683 684 if ((bp->b_flags & B_INVAL) || 685 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 686 if (bp->b_flags & B_DELWRI) { 687 --numdirtybuffers; 688 bp->b_flags &= ~B_DELWRI; 689 } 690 vfs_bio_need_satisfy(); 691 } 692 693 /* unlock */ 694 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 695 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 696 splx(s); 697} 698 699/* 700 * Release a buffer. 701 */ 702void 703bqrelse(struct buf * bp) 704{ 705 int s; 706 707 s = splbio(); 708 709 /* anyone need this block? */ 710 if (bp->b_flags & B_WANTED) { 711 bp->b_flags &= ~(B_WANTED | B_AGE); 712 wakeup(bp); 713 } 714 715#if !defined(MAX_PERF) 716 if (bp->b_qindex != QUEUE_NONE) 717 panic("bqrelse: free buffer onto another queue???"); 718#endif 719 720 if (bp->b_flags & B_LOCKED) { 721 bp->b_flags &= ~B_ERROR; 722 bp->b_qindex = QUEUE_LOCKED; 723 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 724 /* buffers with stale but valid contents */ 725 } else { 726 bp->b_qindex = QUEUE_LRU; 727 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 728 } 729 730 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 731 vfs_bio_need_satisfy(); 732 } 733 734 /* unlock */ 735 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 736 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 737 splx(s); 738} 739 740static void 741vfs_vmio_release(bp) 742 struct buf *bp; 743{ 744 int i; 745 vm_page_t m; 746 747 for (i = 0; i < bp->b_npages; i++) { 748 m = bp->b_pages[i]; 749 bp->b_pages[i] = NULL; 750 vm_page_unwire(m); 751 /* 752 * We don't mess with busy pages, it is 753 * the responsibility of the process that 754 * busied the pages to deal with them. 755 */ 756 if ((m->flags & PG_BUSY) || (m->busy != 0)) 757 continue; 758 759 if (m->wire_count == 0) { 760 761 if (m->flags & PG_WANTED) { 762 m->flags &= ~PG_WANTED; 763 wakeup(m); 764 } 765 766 /* 767 * If this is an async free -- we cannot place 768 * pages onto the cache queue. If it is an 769 * async free, then we don't modify any queues. 770 * This is probably in error (for perf reasons), 771 * and we will eventually need to build 772 * a more complete infrastructure to support I/O 773 * rundown. 774 */ 775 if ((bp->b_flags & B_ASYNC) == 0) { 776 777 /* 778 * In the case of sync buffer frees, we can do pretty much 779 * anything to any of the memory queues. Specifically, 780 * the cache queue is okay to be modified. 781 */ 782 if (m->valid) { 783 if(m->dirty == 0) 784 vm_page_test_dirty(m); 785 /* 786 * this keeps pressure off of the process memory 787 */ 788 if (m->dirty == 0 && m->hold_count == 0) 789 vm_page_cache(m); 790 else 791 vm_page_deactivate(m); 792 } else if (m->hold_count == 0) { 793 vm_page_protect(m, VM_PROT_NONE); 794 vm_page_free(m); 795 } 796 } else { 797 /* 798 * If async, then at least we clear the 799 * act_count. 800 */ 801 m->act_count = 0; 802 } 803 } 804 } 805 bufspace -= bp->b_bufsize; 806 vmiospace -= bp->b_bufsize; 807 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 808 bp->b_npages = 0; 809 bp->b_bufsize = 0; 810 bp->b_flags &= ~B_VMIO; 811 if (bp->b_vp) 812 brelvp(bp); 813} 814 815/* 816 * Check to see if a block is currently memory resident. 817 */ 818struct buf * 819gbincore(struct vnode * vp, daddr_t blkno) 820{ 821 struct buf *bp; 822 struct bufhashhdr *bh; 823 824 bh = BUFHASH(vp, blkno); 825 bp = bh->lh_first; 826 827 /* Search hash chain */ 828 while (bp != NULL) { 829 /* hit */ 830 if (bp->b_vp == vp && bp->b_lblkno == blkno && 831 (bp->b_flags & B_INVAL) == 0) { 832 break; 833 } 834 bp = bp->b_hash.le_next; 835 } 836 return (bp); 837} 838 839/* 840 * this routine implements clustered async writes for 841 * clearing out B_DELWRI buffers... This is much better 842 * than the old way of writing only one buffer at a time. 843 */ 844int 845vfs_bio_awrite(struct buf * bp) 846{ 847 int i; 848 daddr_t lblkno = bp->b_lblkno; 849 struct vnode *vp = bp->b_vp; 850 int s; 851 int ncl; 852 struct buf *bpa; 853 int nwritten; 854 855 s = splbio(); 856 /* 857 * right now we support clustered writing only to regular files 858 */ 859 if ((vp->v_type == VREG) && 860 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 861 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 862 int size; 863 int maxcl; 864 865 size = vp->v_mount->mnt_stat.f_iosize; 866 maxcl = MAXPHYS / size; 867 868 for (i = 1; i < maxcl; i++) { 869 if ((bpa = gbincore(vp, lblkno + i)) && 870 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 871 (B_DELWRI | B_CLUSTEROK)) && 872 (bpa->b_bufsize == size)) { 873 if ((bpa->b_blkno == bpa->b_lblkno) || 874 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 875 break; 876 } else { 877 break; 878 } 879 } 880 ncl = i; 881 /* 882 * this is a possible cluster write 883 */ 884 if (ncl != 1) { 885 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 886 splx(s); 887 return nwritten; 888 } 889 } 890 bremfree(bp); 891 splx(s); 892 /* 893 * default (old) behavior, writing out only one block 894 */ 895 bp->b_flags |= B_BUSY | B_ASYNC; 896 nwritten = bp->b_bufsize; 897 (void) VOP_BWRITE(bp); 898 return nwritten; 899} 900 901 902/* 903 * Find a buffer header which is available for use. 904 */ 905static struct buf * 906getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize) 907{ 908 struct buf *bp; 909 int nbyteswritten = 0; 910 vm_offset_t addr; 911 static int writerecursion = 0; 912 913start: 914 if (bufspace >= maxbufspace) 915 goto trytofreespace; 916 917 /* can we constitute a new buffer? */ 918 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 919#if !defined(MAX_PERF) 920 if (bp->b_qindex != QUEUE_EMPTY) 921 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 922 bp->b_qindex); 923#endif 924 bp->b_flags |= B_BUSY; 925 bremfree(bp); 926 goto fillbuf; 927 } 928trytofreespace: 929 /* 930 * We keep the file I/O from hogging metadata I/O 931 * This is desirable because file data is cached in the 932 * VM/Buffer cache even if a buffer is freed. 933 */ 934 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 935#if !defined(MAX_PERF) 936 if (bp->b_qindex != QUEUE_AGE) 937 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 938 bp->b_qindex); 939#endif 940 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 941#if !defined(MAX_PERF) 942 if (bp->b_qindex != QUEUE_LRU) 943 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 944 bp->b_qindex); 945#endif 946 } 947 if (!bp) { 948 /* wait for a free buffer of any kind */ 949 needsbuffer |= VFS_BIO_NEED_ANY; 950 do 951 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 952 slptimeo); 953 while (needsbuffer & VFS_BIO_NEED_ANY); 954 return (0); 955 } 956 957#if defined(DIAGNOSTIC) 958 if (bp->b_flags & B_BUSY) { 959 panic("getnewbuf: busy buffer on free list\n"); 960 } 961#endif 962 963 /* 964 * We are fairly aggressive about freeing VMIO buffers, but since 965 * the buffering is intact without buffer headers, there is not 966 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 967 */ 968 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 969 if ((bp->b_flags & B_VMIO) == 0 || 970 (vmiospace < maxvmiobufspace)) { 971 --bp->b_usecount; 972 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 973 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 974 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 975 goto start; 976 } 977 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 978 } 979 } 980 981 982 /* if we are a delayed write, convert to an async write */ 983 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 984 985 if (writerecursion > 0) { 986 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 987 while (bp) { 988 if ((bp->b_flags & B_DELWRI) == 0) 989 break; 990 bp = TAILQ_NEXT(bp, b_freelist); 991 } 992 if (bp == NULL) { 993 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 994 while (bp) { 995 if ((bp->b_flags & B_DELWRI) == 0) 996 break; 997 bp = TAILQ_NEXT(bp, b_freelist); 998 } 999 } 1000 if (bp == NULL) 1001 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1002 } else { 1003 ++writerecursion; 1004 nbyteswritten += vfs_bio_awrite(bp); 1005 --writerecursion; 1006 if (!slpflag && !slptimeo) { 1007 return (0); 1008 } 1009 goto start; 1010 } 1011 } 1012 1013 if (bp->b_flags & B_WANTED) { 1014 bp->b_flags &= ~B_WANTED; 1015 wakeup(bp); 1016 } 1017 bremfree(bp); 1018 bp->b_flags |= B_BUSY; 1019 1020 if (bp->b_flags & B_VMIO) { 1021 bp->b_flags &= ~B_ASYNC; 1022 vfs_vmio_release(bp); 1023 } 1024 1025 if (bp->b_vp) 1026 brelvp(bp); 1027 1028fillbuf: 1029 /* we are not free, nor do we contain interesting data */ 1030 if (bp->b_rcred != NOCRED) { 1031 crfree(bp->b_rcred); 1032 bp->b_rcred = NOCRED; 1033 } 1034 if (bp->b_wcred != NOCRED) { 1035 crfree(bp->b_wcred); 1036 bp->b_wcred = NOCRED; 1037 } 1038 1039 LIST_REMOVE(bp, b_hash); 1040 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1041 if (bp->b_bufsize) { 1042 allocbuf(bp, 0); 1043 } 1044 bp->b_flags = B_BUSY; 1045 bp->b_dev = NODEV; 1046 bp->b_vp = NULL; 1047 bp->b_blkno = bp->b_lblkno = 0; 1048 bp->b_iodone = 0; 1049 bp->b_error = 0; 1050 bp->b_resid = 0; 1051 bp->b_bcount = 0; 1052 bp->b_npages = 0; 1053 bp->b_dirtyoff = bp->b_dirtyend = 0; 1054 bp->b_validoff = bp->b_validend = 0; 1055 bp->b_usecount = 4; 1056 1057 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1058 1059 /* 1060 * we assume that buffer_map is not at address 0 1061 */ 1062 addr = 0; 1063 if (maxsize != bp->b_kvasize) { 1064 bfreekva(bp); 1065 1066 /* 1067 * See if we have buffer kva space 1068 */ 1069 if (vm_map_findspace(buffer_map, 1070 vm_map_min(buffer_map), maxsize, &addr)) { 1071 bp->b_flags |= B_INVAL; 1072 brelse(bp); 1073 goto trytofreespace; 1074 } 1075 } 1076 1077 /* 1078 * See if we are below are allocated minimum 1079 */ 1080 if (bufspace >= (maxbufspace + nbyteswritten)) { 1081 bp->b_flags |= B_INVAL; 1082 brelse(bp); 1083 goto trytofreespace; 1084 } 1085 1086 /* 1087 * create a map entry for the buffer -- in essence 1088 * reserving the kva space. 1089 */ 1090 if (addr) { 1091 vm_map_insert(buffer_map, NULL, 0, 1092 addr, addr + maxsize, 1093 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1094 1095 bp->b_kvabase = (caddr_t) addr; 1096 bp->b_kvasize = maxsize; 1097 } 1098 bp->b_data = bp->b_kvabase; 1099 1100 return (bp); 1101} 1102 1103static void 1104waitfreebuffers(int slpflag, int slptimeo) { 1105 while (numfreebuffers < hifreebuffers) { 1106 flushdirtybuffers(slpflag, slptimeo); 1107 if (numfreebuffers < hifreebuffers) 1108 break; 1109 needsbuffer |= VFS_BIO_NEED_FREE; 1110 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo)) 1111 break; 1112 } 1113} 1114 1115static void 1116flushdirtybuffers(int slpflag, int slptimeo) { 1117 int s; 1118 static pid_t flushing = 0; 1119 1120 s = splbio(); 1121 1122 if (flushing) { 1123 if (flushing == curproc->p_pid) { 1124 splx(s); 1125 return; 1126 } 1127 while (flushing) { 1128 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) { 1129 splx(s); 1130 return; 1131 } 1132 } 1133 } 1134 flushing = curproc->p_pid; 1135 1136 while (numdirtybuffers > lodirtybuffers) { 1137 struct buf *bp; 1138 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1139 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1140 if (bp == NULL) 1141 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1142 1143 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1144 bp = TAILQ_NEXT(bp, b_freelist); 1145 } 1146 1147 if (bp) { 1148 splx(s); 1149 vfs_bio_awrite(bp); 1150 s = splbio(); 1151 continue; 1152 } 1153 break; 1154 } 1155 1156 flushing = 0; 1157 wakeup(&flushing); 1158 splx(s); 1159} 1160 1161/* 1162 * Check to see if a block is currently memory resident. 1163 */ 1164struct buf * 1165incore(struct vnode * vp, daddr_t blkno) 1166{ 1167 struct buf *bp; 1168 1169 int s = splbio(); 1170 bp = gbincore(vp, blkno); 1171 splx(s); 1172 return (bp); 1173} 1174 1175/* 1176 * Returns true if no I/O is needed to access the 1177 * associated VM object. This is like incore except 1178 * it also hunts around in the VM system for the data. 1179 */ 1180 1181int 1182inmem(struct vnode * vp, daddr_t blkno) 1183{ 1184 vm_object_t obj; 1185 vm_offset_t toff, tinc; 1186 vm_page_t m; 1187 vm_ooffset_t off; 1188 1189 if (incore(vp, blkno)) 1190 return 1; 1191 if (vp->v_mount == NULL) 1192 return 0; 1193 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 1194 return 0; 1195 1196 obj = vp->v_object; 1197 tinc = PAGE_SIZE; 1198 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1199 tinc = vp->v_mount->mnt_stat.f_iosize; 1200 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1201 1202 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1203 1204 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1205 if (!m) 1206 return 0; 1207 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1208 return 0; 1209 } 1210 return 1; 1211} 1212 1213/* 1214 * now we set the dirty range for the buffer -- 1215 * for NFS -- if the file is mapped and pages have 1216 * been written to, let it know. We want the 1217 * entire range of the buffer to be marked dirty if 1218 * any of the pages have been written to for consistancy 1219 * with the b_validoff, b_validend set in the nfs write 1220 * code, and used by the nfs read code. 1221 */ 1222static void 1223vfs_setdirty(struct buf *bp) { 1224 int i; 1225 vm_object_t object; 1226 vm_offset_t boffset, offset; 1227 /* 1228 * We qualify the scan for modified pages on whether the 1229 * object has been flushed yet. The OBJ_WRITEABLE flag 1230 * is not cleared simply by protecting pages off. 1231 */ 1232 if ((bp->b_flags & B_VMIO) && 1233 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1234 /* 1235 * test the pages to see if they have been modified directly 1236 * by users through the VM system. 1237 */ 1238 for (i = 0; i < bp->b_npages; i++) 1239 vm_page_test_dirty(bp->b_pages[i]); 1240 1241 /* 1242 * scan forwards for the first page modified 1243 */ 1244 for (i = 0; i < bp->b_npages; i++) { 1245 if (bp->b_pages[i]->dirty) { 1246 break; 1247 } 1248 } 1249 boffset = (i << PAGE_SHIFT); 1250 if (boffset < bp->b_dirtyoff) { 1251 bp->b_dirtyoff = boffset; 1252 } 1253 1254 /* 1255 * scan backwards for the last page modified 1256 */ 1257 for (i = bp->b_npages - 1; i >= 0; --i) { 1258 if (bp->b_pages[i]->dirty) { 1259 break; 1260 } 1261 } 1262 boffset = (i + 1); 1263 offset = boffset + bp->b_pages[0]->pindex; 1264 if (offset >= object->size) 1265 boffset = object->size - bp->b_pages[0]->pindex; 1266 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1267 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1268 } 1269} 1270 1271/* 1272 * Get a block given a specified block and offset into a file/device. 1273 */ 1274struct buf * 1275getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1276{ 1277 struct buf *bp; 1278 int s; 1279 struct bufhashhdr *bh; 1280 int maxsize; 1281 1282 if (vp->v_mount) { 1283 maxsize = vp->v_mount->mnt_stat.f_iosize; 1284 /* 1285 * This happens on mount points. 1286 */ 1287 if (maxsize < size) 1288 maxsize = size; 1289 } else { 1290 maxsize = size; 1291 } 1292 1293#if !defined(MAX_PERF) 1294 if (size > MAXBSIZE) 1295 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1296#endif 1297 1298 s = splbio(); 1299loop: 1300 if (numfreebuffers < lofreebuffers) { 1301 waitfreebuffers(slpflag, slptimeo); 1302 } 1303 1304 if ((bp = gbincore(vp, blkno))) { 1305 if (bp->b_flags & B_BUSY) { 1306 bp->b_flags |= B_WANTED; 1307 if (bp->b_usecount < BUF_MAXUSE) 1308 ++bp->b_usecount; 1309 if (!tsleep(bp, 1310 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1311 goto loop; 1312 1313 splx(s); 1314 return (struct buf *) NULL; 1315 } 1316 bp->b_flags |= B_BUSY | B_CACHE; 1317 bremfree(bp); 1318 1319 /* 1320 * check for size inconsistancies (note that they shouldn't 1321 * happen but do when filesystems don't handle the size changes 1322 * correctly.) We are conservative on metadata and don't just 1323 * extend the buffer but write and re-constitute it. 1324 */ 1325 1326 if (bp->b_bcount != size) { 1327 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1328 allocbuf(bp, size); 1329 } else { 1330 bp->b_flags |= B_NOCACHE; 1331 VOP_BWRITE(bp); 1332 goto loop; 1333 } 1334 } 1335 1336 if (bp->b_usecount < BUF_MAXUSE) 1337 ++bp->b_usecount; 1338 splx(s); 1339 return (bp); 1340 } else { 1341 vm_object_t obj; 1342 1343 if ((bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize)) == 0) { 1344 if (slpflag || slptimeo) { 1345 splx(s); 1346 return NULL; 1347 } 1348 goto loop; 1349 } 1350 1351 /* 1352 * This code is used to make sure that a buffer is not 1353 * created while the getnewbuf routine is blocked. 1354 * Normally the vnode is locked so this isn't a problem. 1355 * VBLK type I/O requests, however, don't lock the vnode. 1356 */ 1357 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1358 bp->b_flags |= B_INVAL; 1359 brelse(bp); 1360 goto loop; 1361 } 1362 1363 /* 1364 * Insert the buffer into the hash, so that it can 1365 * be found by incore. 1366 */ 1367 bp->b_blkno = bp->b_lblkno = blkno; 1368 bgetvp(vp, bp); 1369 LIST_REMOVE(bp, b_hash); 1370 bh = BUFHASH(vp, blkno); 1371 LIST_INSERT_HEAD(bh, bp, b_hash); 1372 1373 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1374 bp->b_flags |= (B_VMIO | B_CACHE); 1375#if defined(VFS_BIO_DEBUG) 1376 if (vp->v_type != VREG && vp->v_type != VBLK) 1377 printf("getblk: vmioing file type %d???\n", vp->v_type); 1378#endif 1379 } else { 1380 bp->b_flags &= ~B_VMIO; 1381 } 1382 splx(s); 1383 1384 allocbuf(bp, size); 1385#ifdef PC98 1386 /* 1387 * 1024byte/sector support 1388 */ 1389#define B_XXX2 0x8000000 1390 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1391#endif 1392 return (bp); 1393 } 1394} 1395 1396/* 1397 * Get an empty, disassociated buffer of given size. 1398 */ 1399struct buf * 1400geteblk(int size) 1401{ 1402 struct buf *bp; 1403 int s; 1404 1405 s = splbio(); 1406 while ((bp = getnewbuf(0, 0, 0, size, MAXBSIZE)) == 0); 1407 splx(s); 1408 allocbuf(bp, size); 1409 bp->b_flags |= B_INVAL; 1410 return (bp); 1411} 1412 1413 1414/* 1415 * This code constitutes the buffer memory from either anonymous system 1416 * memory (in the case of non-VMIO operations) or from an associated 1417 * VM object (in the case of VMIO operations). 1418 * 1419 * Note that this code is tricky, and has many complications to resolve 1420 * deadlock or inconsistant data situations. Tread lightly!!! 1421 * 1422 * Modify the length of a buffer's underlying buffer storage without 1423 * destroying information (unless, of course the buffer is shrinking). 1424 */ 1425int 1426allocbuf(struct buf * bp, int size) 1427{ 1428 1429 int s; 1430 int newbsize, mbsize; 1431 int i; 1432 1433#if !defined(MAX_PERF) 1434 if (!(bp->b_flags & B_BUSY)) 1435 panic("allocbuf: buffer not busy"); 1436 1437 if (bp->b_kvasize < size) 1438 panic("allocbuf: buffer too small"); 1439#endif 1440 1441 if ((bp->b_flags & B_VMIO) == 0) { 1442 caddr_t origbuf; 1443 int origbufsize; 1444 /* 1445 * Just get anonymous memory from the kernel 1446 */ 1447 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1448#if !defined(NO_B_MALLOC) 1449 if (bp->b_flags & B_MALLOC) 1450 newbsize = mbsize; 1451 else 1452#endif 1453 newbsize = round_page(size); 1454 1455 if (newbsize < bp->b_bufsize) { 1456#if !defined(NO_B_MALLOC) 1457 /* 1458 * malloced buffers are not shrunk 1459 */ 1460 if (bp->b_flags & B_MALLOC) { 1461 if (newbsize) { 1462 bp->b_bcount = size; 1463 } else { 1464 free(bp->b_data, M_BIOBUF); 1465 bufspace -= bp->b_bufsize; 1466 bufmallocspace -= bp->b_bufsize; 1467 bp->b_data = bp->b_kvabase; 1468 bp->b_bufsize = 0; 1469 bp->b_bcount = 0; 1470 bp->b_flags &= ~B_MALLOC; 1471 } 1472 return 1; 1473 } 1474#endif 1475 vm_hold_free_pages( 1476 bp, 1477 (vm_offset_t) bp->b_data + newbsize, 1478 (vm_offset_t) bp->b_data + bp->b_bufsize); 1479 } else if (newbsize > bp->b_bufsize) { 1480#if !defined(NO_B_MALLOC) 1481 /* 1482 * We only use malloced memory on the first allocation. 1483 * and revert to page-allocated memory when the buffer grows. 1484 */ 1485 if ( (bufmallocspace < maxbufmallocspace) && 1486 (bp->b_bufsize == 0) && 1487 (mbsize <= PAGE_SIZE/2)) { 1488 1489 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1490 bp->b_bufsize = mbsize; 1491 bp->b_bcount = size; 1492 bp->b_flags |= B_MALLOC; 1493 bufspace += mbsize; 1494 bufmallocspace += mbsize; 1495 return 1; 1496 } 1497#endif 1498 origbuf = NULL; 1499 origbufsize = 0; 1500#if !defined(NO_B_MALLOC) 1501 /* 1502 * If the buffer is growing on it's other-than-first allocation, 1503 * then we revert to the page-allocation scheme. 1504 */ 1505 if (bp->b_flags & B_MALLOC) { 1506 origbuf = bp->b_data; 1507 origbufsize = bp->b_bufsize; 1508 bp->b_data = bp->b_kvabase; 1509 bufspace -= bp->b_bufsize; 1510 bufmallocspace -= bp->b_bufsize; 1511 bp->b_bufsize = 0; 1512 bp->b_flags &= ~B_MALLOC; 1513 newbsize = round_page(newbsize); 1514 } 1515#endif 1516 vm_hold_load_pages( 1517 bp, 1518 (vm_offset_t) bp->b_data + bp->b_bufsize, 1519 (vm_offset_t) bp->b_data + newbsize); 1520#if !defined(NO_B_MALLOC) 1521 if (origbuf) { 1522 bcopy(origbuf, bp->b_data, origbufsize); 1523 free(origbuf, M_BIOBUF); 1524 } 1525#endif 1526 } 1527 } else { 1528 vm_page_t m; 1529 int desiredpages; 1530 1531 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1532 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1533 1534#if !defined(NO_B_MALLOC) 1535 if (bp->b_flags & B_MALLOC) 1536 panic("allocbuf: VMIO buffer can't be malloced"); 1537#endif 1538 1539 if (newbsize < bp->b_bufsize) { 1540 if (desiredpages < bp->b_npages) { 1541 for (i = desiredpages; i < bp->b_npages; i++) { 1542 /* 1543 * the page is not freed here -- it 1544 * is the responsibility of vnode_pager_setsize 1545 */ 1546 m = bp->b_pages[i]; 1547#if defined(DIAGNOSTIC) 1548 if (m == bogus_page) 1549 panic("allocbuf: bogus page found"); 1550#endif 1551 s = splvm(); 1552 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1553 m->flags |= PG_WANTED; 1554 tsleep(m, PVM, "biodep", 0); 1555 } 1556 splx(s); 1557 1558 bp->b_pages[i] = NULL; 1559 vm_page_unwire(m); 1560 } 1561 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1562 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1563 bp->b_npages = desiredpages; 1564 } 1565 } else if (newbsize > bp->b_bufsize) { 1566 vm_object_t obj; 1567 vm_offset_t tinc, toff; 1568 vm_ooffset_t off; 1569 vm_pindex_t objoff; 1570 int pageindex, curbpnpages; 1571 struct vnode *vp; 1572 int bsize; 1573 1574 vp = bp->b_vp; 1575 1576 if (vp->v_type == VBLK) 1577 bsize = DEV_BSIZE; 1578 else 1579 bsize = vp->v_mount->mnt_stat.f_iosize; 1580 1581 if (bp->b_npages < desiredpages) { 1582 obj = vp->v_object; 1583 tinc = PAGE_SIZE; 1584 if (tinc > bsize) 1585 tinc = bsize; 1586 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1587 curbpnpages = bp->b_npages; 1588 doretry: 1589 bp->b_flags |= B_CACHE; 1590 bp->b_validoff = bp->b_validend = 0; 1591 for (toff = 0; toff < newbsize; toff += tinc) { 1592 int bytesinpage; 1593 1594 pageindex = toff >> PAGE_SHIFT; 1595 objoff = OFF_TO_IDX(off + toff); 1596 if (pageindex < curbpnpages) { 1597 1598 m = bp->b_pages[pageindex]; 1599#ifdef VFS_BIO_DIAG 1600 if (m->pindex != objoff) 1601 panic("allocbuf: page changed offset??!!!?"); 1602#endif 1603 bytesinpage = tinc; 1604 if (tinc > (newbsize - toff)) 1605 bytesinpage = newbsize - toff; 1606 if (bp->b_flags & B_CACHE) 1607 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1608 continue; 1609 } 1610 m = vm_page_lookup(obj, objoff); 1611 if (!m) { 1612 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1613 if (!m) { 1614 VM_WAIT; 1615 goto doretry; 1616 } 1617 /* 1618 * Normally it is unwise to clear PG_BUSY without 1619 * PAGE_WAKEUP -- but it is okay here, as there is 1620 * no chance for blocking between here and vm_page_alloc 1621 */ 1622 m->flags &= ~PG_BUSY; 1623 vm_page_wire(m); 1624 bp->b_flags &= ~B_CACHE; 1625 } else if (m->flags & PG_BUSY) { 1626 s = splvm(); 1627 if (m->flags & PG_BUSY) { 1628 m->flags |= PG_WANTED; 1629 tsleep(m, PVM, "pgtblk", 0); 1630 } 1631 splx(s); 1632 goto doretry; 1633 } else { 1634 if ((curproc != pageproc) && 1635 ((m->queue - m->pc) == PQ_CACHE) && 1636 ((cnt.v_free_count + cnt.v_cache_count) < 1637 (cnt.v_free_min + cnt.v_cache_min))) { 1638 pagedaemon_wakeup(); 1639 } 1640 bytesinpage = tinc; 1641 if (tinc > (newbsize - toff)) 1642 bytesinpage = newbsize - toff; 1643 if (bp->b_flags & B_CACHE) 1644 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1645 vm_page_wire(m); 1646 } 1647 bp->b_pages[pageindex] = m; 1648 curbpnpages = pageindex + 1; 1649 } 1650 if (vp->v_tag == VT_NFS && 1651 vp->v_type != VBLK) { 1652 if (bp->b_dirtyend > 0) { 1653 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1654 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1655 } 1656 if (bp->b_validend == 0) 1657 bp->b_flags &= ~B_CACHE; 1658 } 1659 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1660 bp->b_npages = curbpnpages; 1661 pmap_qenter((vm_offset_t) bp->b_data, 1662 bp->b_pages, bp->b_npages); 1663 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1664 } 1665 } 1666 } 1667 if (bp->b_flags & B_VMIO) 1668 vmiospace += (newbsize - bp->b_bufsize); 1669 bufspace += (newbsize - bp->b_bufsize); 1670 bp->b_bufsize = newbsize; 1671 bp->b_bcount = size; 1672 return 1; 1673} 1674 1675/* 1676 * Wait for buffer I/O completion, returning error status. 1677 */ 1678int 1679biowait(register struct buf * bp) 1680{ 1681 int s; 1682 1683 s = splbio(); 1684 while ((bp->b_flags & B_DONE) == 0) 1685#if defined(NO_SCHEDULE_MODS) 1686 tsleep(bp, PRIBIO, "biowait", 0); 1687#else 1688 tsleep(bp, curproc->p_usrpri, "biowait", 0); 1689#endif 1690 splx(s); 1691 if (bp->b_flags & B_EINTR) { 1692 bp->b_flags &= ~B_EINTR; 1693 return (EINTR); 1694 } 1695 if (bp->b_flags & B_ERROR) { 1696 return (bp->b_error ? bp->b_error : EIO); 1697 } else { 1698 return (0); 1699 } 1700} 1701 1702/* 1703 * Finish I/O on a buffer, calling an optional function. 1704 * This is usually called from interrupt level, so process blocking 1705 * is not *a good idea*. 1706 */ 1707void 1708biodone(register struct buf * bp) 1709{ 1710 int s; 1711 1712 s = splbio(); 1713 1714#if !defined(MAX_PERF) 1715 if (!(bp->b_flags & B_BUSY)) 1716 panic("biodone: buffer not busy"); 1717#endif 1718 1719 if (bp->b_flags & B_DONE) { 1720 splx(s); 1721#if !defined(MAX_PERF) 1722 printf("biodone: buffer already done\n"); 1723#endif 1724 return; 1725 } 1726 bp->b_flags |= B_DONE; 1727 1728 if ((bp->b_flags & B_READ) == 0) { 1729 vwakeup(bp); 1730 } 1731#ifdef BOUNCE_BUFFERS 1732 if (bp->b_flags & B_BOUNCE) 1733 vm_bounce_free(bp); 1734#endif 1735 1736 /* call optional completion function if requested */ 1737 if (bp->b_flags & B_CALL) { 1738 bp->b_flags &= ~B_CALL; 1739 (*bp->b_iodone) (bp); 1740 splx(s); 1741 return; 1742 } 1743 if (bp->b_flags & B_VMIO) { 1744 int i, resid; 1745 vm_ooffset_t foff; 1746 vm_page_t m; 1747 vm_object_t obj; 1748 int iosize; 1749 struct vnode *vp = bp->b_vp; 1750 1751 obj = vp->v_object; 1752 1753#if defined(VFS_BIO_DEBUG) 1754 if (vp->v_usecount == 0) { 1755 panic("biodone: zero vnode ref count"); 1756 } 1757 1758 if (vp->v_object == NULL) { 1759 panic("biodone: missing VM object"); 1760 } 1761 1762 if ((vp->v_flag & VVMIO) == 0) { 1763 panic("biodone: vnode is not setup for merged cache"); 1764 } 1765#endif 1766 1767 if (vp->v_type == VBLK) 1768 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1769 else 1770 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1771#if !defined(MAX_PERF) 1772 if (!obj) { 1773 panic("biodone: no object"); 1774 } 1775#endif 1776#if defined(VFS_BIO_DEBUG) 1777 if (obj->paging_in_progress < bp->b_npages) { 1778 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1779 obj->paging_in_progress, bp->b_npages); 1780 } 1781#endif 1782 iosize = bp->b_bufsize; 1783 for (i = 0; i < bp->b_npages; i++) { 1784 int bogusflag = 0; 1785 m = bp->b_pages[i]; 1786 if (m == bogus_page) { 1787 bogusflag = 1; 1788 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1789 if (!m) { 1790#if defined(VFS_BIO_DEBUG) 1791 printf("biodone: page disappeared\n"); 1792#endif 1793 --obj->paging_in_progress; 1794 continue; 1795 } 1796 bp->b_pages[i] = m; 1797 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1798 } 1799#if defined(VFS_BIO_DEBUG) 1800 if (OFF_TO_IDX(foff) != m->pindex) { 1801 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1802 } 1803#endif 1804 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1805 if (resid > iosize) 1806 resid = iosize; 1807 /* 1808 * In the write case, the valid and clean bits are 1809 * already changed correctly, so we only need to do this 1810 * here in the read case. 1811 */ 1812 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1813 vfs_page_set_valid(bp, foff, i, m); 1814 } 1815 1816 /* 1817 * when debugging new filesystems or buffer I/O methods, this 1818 * is the most common error that pops up. if you see this, you 1819 * have not set the page busy flag correctly!!! 1820 */ 1821 if (m->busy == 0) { 1822#if !defined(MAX_PERF) 1823 printf("biodone: page busy < 0, " 1824 "pindex: %d, foff: 0x(%x,%x), " 1825 "resid: %d, index: %d\n", 1826 (int) m->pindex, (int)(foff >> 32), 1827 (int) foff & 0xffffffff, resid, i); 1828#endif 1829 if (vp->v_type != VBLK) 1830#if !defined(MAX_PERF) 1831 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1832 bp->b_vp->v_mount->mnt_stat.f_iosize, 1833 (int) bp->b_lblkno, 1834 bp->b_flags, bp->b_npages); 1835 else 1836 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1837 (int) bp->b_lblkno, 1838 bp->b_flags, bp->b_npages); 1839 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1840 m->valid, m->dirty, m->wire_count); 1841#endif 1842 panic("biodone: page busy < 0\n"); 1843 } 1844 --m->busy; 1845 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1846 m->flags &= ~PG_WANTED; 1847 wakeup(m); 1848 } 1849 --obj->paging_in_progress; 1850 foff += resid; 1851 iosize -= resid; 1852 } 1853 if (obj && obj->paging_in_progress == 0 && 1854 (obj->flags & OBJ_PIPWNT)) { 1855 obj->flags &= ~OBJ_PIPWNT; 1856 wakeup(obj); 1857 } 1858 } 1859 /* 1860 * For asynchronous completions, release the buffer now. The brelse 1861 * checks for B_WANTED and will do the wakeup there if necessary - so 1862 * no need to do a wakeup here in the async case. 1863 */ 1864 1865 if (bp->b_flags & B_ASYNC) { 1866 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1867 brelse(bp); 1868 else 1869 bqrelse(bp); 1870 } else { 1871 bp->b_flags &= ~B_WANTED; 1872 wakeup(bp); 1873 } 1874 splx(s); 1875} 1876 1877int 1878count_lock_queue() 1879{ 1880 int count; 1881 struct buf *bp; 1882 1883 count = 0; 1884 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1885 bp != NULL; 1886 bp = TAILQ_NEXT(bp, b_freelist)) 1887 count++; 1888 return (count); 1889} 1890 1891int vfs_update_interval = 30; 1892 1893static void 1894vfs_update() 1895{ 1896 while (1) { 1897 tsleep(&vfs_update_wakeup, PUSER, "update", 1898 hz * vfs_update_interval); 1899 vfs_update_wakeup = 0; 1900 sync(curproc, NULL); 1901 } 1902} 1903 1904static int 1905sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1906{ 1907 int error = sysctl_handle_int(oidp, 1908 oidp->oid_arg1, oidp->oid_arg2, req); 1909 if (!error) 1910 wakeup(&vfs_update_wakeup); 1911 return error; 1912} 1913 1914SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1915 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1916 1917 1918/* 1919 * This routine is called in lieu of iodone in the case of 1920 * incomplete I/O. This keeps the busy status for pages 1921 * consistant. 1922 */ 1923void 1924vfs_unbusy_pages(struct buf * bp) 1925{ 1926 int i; 1927 1928 if (bp->b_flags & B_VMIO) { 1929 struct vnode *vp = bp->b_vp; 1930 vm_object_t obj = vp->v_object; 1931 vm_ooffset_t foff; 1932 1933 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1934 1935 for (i = 0; i < bp->b_npages; i++) { 1936 vm_page_t m = bp->b_pages[i]; 1937 1938 if (m == bogus_page) { 1939 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1940#if !defined(MAX_PERF) 1941 if (!m) { 1942 panic("vfs_unbusy_pages: page missing\n"); 1943 } 1944#endif 1945 bp->b_pages[i] = m; 1946 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1947 } 1948 --obj->paging_in_progress; 1949 --m->busy; 1950 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1951 m->flags &= ~PG_WANTED; 1952 wakeup(m); 1953 } 1954 } 1955 if (obj->paging_in_progress == 0 && 1956 (obj->flags & OBJ_PIPWNT)) { 1957 obj->flags &= ~OBJ_PIPWNT; 1958 wakeup(obj); 1959 } 1960 } 1961} 1962 1963/* 1964 * Set NFS' b_validoff and b_validend fields from the valid bits 1965 * of a page. If the consumer is not NFS, and the page is not 1966 * valid for the entire range, clear the B_CACHE flag to force 1967 * the consumer to re-read the page. 1968 */ 1969static void 1970vfs_buf_set_valid(struct buf *bp, 1971 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 1972 vm_page_t m) 1973{ 1974 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 1975 vm_offset_t svalid, evalid; 1976 int validbits = m->valid; 1977 1978 /* 1979 * This only bothers with the first valid range in the 1980 * page. 1981 */ 1982 svalid = off; 1983 while (validbits && !(validbits & 1)) { 1984 svalid += DEV_BSIZE; 1985 validbits >>= 1; 1986 } 1987 evalid = svalid; 1988 while (validbits & 1) { 1989 evalid += DEV_BSIZE; 1990 validbits >>= 1; 1991 } 1992 /* 1993 * Make sure this range is contiguous with the range 1994 * built up from previous pages. If not, then we will 1995 * just use the range from the previous pages. 1996 */ 1997 if (svalid == bp->b_validend) { 1998 bp->b_validoff = min(bp->b_validoff, svalid); 1999 bp->b_validend = max(bp->b_validend, evalid); 2000 } 2001 } else if (!vm_page_is_valid(m, 2002 (vm_offset_t) ((foff + off) & PAGE_MASK), 2003 size)) { 2004 bp->b_flags &= ~B_CACHE; 2005 } 2006} 2007 2008/* 2009 * Set the valid bits in a page, taking care of the b_validoff, 2010 * b_validend fields which NFS uses to optimise small reads. Off is 2011 * the offset within the file and pageno is the page index within the buf. 2012 */ 2013static void 2014vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2015{ 2016 struct vnode *vp = bp->b_vp; 2017 vm_ooffset_t soff, eoff; 2018 2019 soff = off; 2020 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2021 vm_page_set_invalid(m, 2022 (vm_offset_t) (soff & PAGE_MASK), 2023 (vm_offset_t) (eoff - soff)); 2024 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2025 vm_ooffset_t sv, ev; 2026 off = off - pageno * PAGE_SIZE; 2027 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2028 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 2029 soff = max(sv, soff); 2030 eoff = min(ev, eoff); 2031 } 2032 if (eoff > soff) 2033 vm_page_set_validclean(m, 2034 (vm_offset_t) (soff & PAGE_MASK), 2035 (vm_offset_t) (eoff - soff)); 2036} 2037 2038/* 2039 * This routine is called before a device strategy routine. 2040 * It is used to tell the VM system that paging I/O is in 2041 * progress, and treat the pages associated with the buffer 2042 * almost as being PG_BUSY. Also the object paging_in_progress 2043 * flag is handled to make sure that the object doesn't become 2044 * inconsistant. 2045 */ 2046void 2047vfs_busy_pages(struct buf * bp, int clear_modify) 2048{ 2049 int i; 2050 2051 if (bp->b_flags & B_VMIO) { 2052 struct vnode *vp = bp->b_vp; 2053 vm_object_t obj = vp->v_object; 2054 vm_ooffset_t foff; 2055 2056 if (vp->v_type == VBLK) 2057 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2058 else 2059 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2060 vfs_setdirty(bp); 2061 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2062 vm_page_t m = bp->b_pages[i]; 2063 2064 if ((bp->b_flags & B_CLUSTER) == 0) { 2065 obj->paging_in_progress++; 2066 m->busy++; 2067 } 2068 vm_page_protect(m, VM_PROT_NONE); 2069 if (clear_modify) 2070 vfs_page_set_valid(bp, foff, i, m); 2071 else if (bp->b_bcount >= PAGE_SIZE) { 2072 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2073 bp->b_pages[i] = bogus_page; 2074 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2075 } 2076 } 2077 } 2078 } 2079} 2080 2081/* 2082 * Tell the VM system that the pages associated with this buffer 2083 * are clean. This is used for delayed writes where the data is 2084 * going to go to disk eventually without additional VM intevention. 2085 */ 2086void 2087vfs_clean_pages(struct buf * bp) 2088{ 2089 int i; 2090 2091 if (bp->b_flags & B_VMIO) { 2092 struct vnode *vp = bp->b_vp; 2093 vm_ooffset_t foff; 2094 2095 if (vp->v_type == VBLK) 2096 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2097 else 2098 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2099 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2100 vm_page_t m = bp->b_pages[i]; 2101 2102 vfs_page_set_valid(bp, foff, i, m); 2103 } 2104 } 2105} 2106 2107void 2108vfs_bio_clrbuf(struct buf *bp) { 2109 int i; 2110 if( bp->b_flags & B_VMIO) { 2111 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2112 int mask; 2113 mask = 0; 2114 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2115 mask |= (1 << (i/DEV_BSIZE)); 2116 if( bp->b_pages[0]->valid != mask) { 2117 bzero(bp->b_data, bp->b_bufsize); 2118 } 2119 bp->b_pages[0]->valid = mask; 2120 bp->b_resid = 0; 2121 return; 2122 } 2123 for(i=0;i<bp->b_npages;i++) { 2124 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2125 continue; 2126 if( bp->b_pages[i]->valid == 0) { 2127 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2128 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2129 } 2130 } else { 2131 int j; 2132 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2133 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 2134 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2135 } 2136 } 2137 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 2138 } 2139 bp->b_resid = 0; 2140 } else { 2141 clrbuf(bp); 2142 } 2143} 2144 2145/* 2146 * vm_hold_load_pages and vm_hold_unload pages get pages into 2147 * a buffers address space. The pages are anonymous and are 2148 * not associated with a file object. 2149 */ 2150void 2151vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2152{ 2153 vm_offset_t pg; 2154 vm_page_t p; 2155 int index; 2156 2157 to = round_page(to); 2158 from = round_page(from); 2159 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2160 2161 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2162 2163tryagain: 2164 2165 p = vm_page_alloc(kernel_object, 2166 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2167 VM_ALLOC_NORMAL); 2168 if (!p) { 2169 VM_WAIT; 2170 goto tryagain; 2171 } 2172 vm_page_wire(p); 2173 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2174 bp->b_pages[index] = p; 2175 PAGE_WAKEUP(p); 2176 } 2177 bp->b_npages = index; 2178} 2179 2180void 2181vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2182{ 2183 vm_offset_t pg; 2184 vm_page_t p; 2185 int index, newnpages; 2186 2187 from = round_page(from); 2188 to = round_page(to); 2189 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2190 2191 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2192 p = bp->b_pages[index]; 2193 if (p && (index < bp->b_npages)) { 2194#if !defined(MAX_PERF) 2195 if (p->busy) { 2196 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2197 bp->b_blkno, bp->b_lblkno); 2198 } 2199#endif 2200 bp->b_pages[index] = NULL; 2201 pmap_kremove(pg); 2202 vm_page_unwire(p); 2203 vm_page_free(p); 2204 } 2205 } 2206 bp->b_npages = newnpages; 2207} 2208 2209 2210#include "opt_ddb.h" 2211#ifdef DDB 2212#include <ddb/ddb.h> 2213 2214DB_SHOW_COMMAND(buffer, db_show_buffer) 2215{ 2216 /* get args */ 2217 struct buf *bp = (struct buf *)addr; 2218 2219 if (!have_addr) { 2220 db_printf("usage: show buffer <addr>\n"); 2221 return; 2222 } 2223 2224 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2225 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2226 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2227 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2228 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2229 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2230 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2231 "b_resid = %ld\nb_dev = 0x%x, b_un.b_addr = %p, " 2232 "b_blkno = %d, b_pblkno = %d\n", 2233 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2234 bp->b_dev, bp->b_un.b_addr, bp->b_blkno, bp->b_pblkno); 2235 if (bp->b_npages) { 2236 int i; 2237 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2238 for (i = 0; i < bp->b_npages; i++) { 2239 vm_page_t m; 2240 m = bp->b_pages[i]; 2241 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2242 VM_PAGE_TO_PHYS(m)); 2243 if ((i + 1) < bp->b_npages) 2244 db_printf(","); 2245 } 2246 db_printf("\n"); 2247 } 2248} 2249#endif /* DDB */ 2250