vfs_bio.c revision 30994
1169689Skan/* 2169689Skan * Copyright (c) 1994 John S. Dyson 3169689Skan * All rights reserved. 4169689Skan * 5169689Skan * Redistribution and use in source and binary forms, with or without 6169689Skan * modification, are permitted provided that the following conditions 7169689Skan * are met: 8169689Skan * 1. Redistributions of source code must retain the above copyright 9169689Skan * notice immediately at the beginning of the file, without modification, 10169689Skan * this list of conditions, and the following disclaimer. 11169689Skan * 2. Redistributions in binary form must reproduce the above copyright 12169689Skan * notice, this list of conditions and the following disclaimer in the 13169689Skan * documentation and/or other materials provided with the distribution. 14169689Skan * 3. Absolutely no warranty of function or purpose is made by the author 15169689Skan * John S. Dyson. 16169689Skan * 4. This work was done expressly for inclusion into FreeBSD. Other use 17169689Skan * is allowed if this notation is included. 18169689Skan * 5. Modifications may be freely made to this file if the above conditions 19169689Skan * are met. 20169689Skan * 21169689Skan * $Id: vfs_bio.c,v 1.132 1997/10/28 15:58:24 bde Exp $ 22169689Skan */ 23169689Skan 24169689Skan/* 25169689Skan * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <vm/vm_map.h> 55#include <sys/buf.h> 56#include <sys/mount.h> 57#include <sys/malloc.h> 58#include <sys/resourcevar.h> 59 60static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 80 vm_offset_t off, vm_offset_t size, 81 vm_page_t m); 82static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 83 int pageno, vm_page_t m); 84static void vfs_clean_pages(struct buf * bp); 85static void vfs_setdirty(struct buf *bp); 86static void vfs_vmio_release(struct buf *bp); 87static void flushdirtybuffers(int slpflag, int slptimeo); 88 89int needsbuffer; 90 91/* 92 * Internal update daemon, process 3 93 * The variable vfs_update_wakeup allows for internal syncs. 94 */ 95int vfs_update_wakeup; 96 97 98/* 99 * buffers base kva 100 */ 101 102/* 103 * bogus page -- for I/O to/from partially complete buffers 104 * this is a temporary solution to the problem, but it is not 105 * really that bad. it would be better to split the buffer 106 * for input in the case of buffers partially already in memory, 107 * but the code is intricate enough already. 108 */ 109vm_page_t bogus_page; 110static vm_offset_t bogus_offset; 111 112static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 113 bufmallocspace, maxbufmallocspace; 114int numdirtybuffers, lodirtybuffers, hidirtybuffers; 115static int numfreebuffers, lofreebuffers, hifreebuffers; 116 117SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 118 &numdirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 120 &lodirtybuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 122 &hidirtybuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 124 &numfreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 126 &lofreebuffers, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 128 &hifreebuffers, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 130 &maxbufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 132 &bufspace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 134 &maxvmiobufspace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 136 &vmiospace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 138 &maxbufmallocspace, 0, ""); 139SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 140 &bufmallocspace, 0, ""); 141 142static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 143static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 144 145extern int vm_swap_size; 146 147#define BUF_MAXUSE 24 148 149#define VFS_BIO_NEED_ANY 1 150#define VFS_BIO_NEED_LOWLIMIT 2 151#define VFS_BIO_NEED_FREE 4 152 153/* 154 * Initialize buffer headers and related structures. 155 */ 156void 157bufinit() 158{ 159 struct buf *bp; 160 int i; 161 162 TAILQ_INIT(&bswlist); 163 LIST_INIT(&invalhash); 164 165 /* first, make a null hash table */ 166 for (i = 0; i < BUFHSZ; i++) 167 LIST_INIT(&bufhashtbl[i]); 168 169 /* next, make a null set of free lists */ 170 for (i = 0; i < BUFFER_QUEUES; i++) 171 TAILQ_INIT(&bufqueues[i]); 172 173 /* finally, initialize each buffer header and stick on empty q */ 174 for (i = 0; i < nbuf; i++) { 175 bp = &buf[i]; 176 bzero(bp, sizeof *bp); 177 bp->b_flags = B_INVAL; /* we're just an empty header */ 178 bp->b_dev = NODEV; 179 bp->b_rcred = NOCRED; 180 bp->b_wcred = NOCRED; 181 bp->b_qindex = QUEUE_EMPTY; 182 bp->b_vnbufs.le_next = NOLIST; 183 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 184 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 185 } 186/* 187 * maxbufspace is currently calculated to support all filesystem blocks 188 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 189 * cache is still the same as it would be for 8K filesystems. This 190 * keeps the size of the buffer cache "in check" for big block filesystems. 191 */ 192 maxbufspace = (nbuf + 8) * DFLTBSIZE; 193/* 194 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 195 */ 196 maxvmiobufspace = 2 * maxbufspace / 3; 197/* 198 * Limit the amount of malloc memory since it is wired permanently into 199 * the kernel space. Even though this is accounted for in the buffer 200 * allocation, we don't want the malloced region to grow uncontrolled. 201 * The malloc scheme improves memory utilization significantly on average 202 * (small) directories. 203 */ 204 maxbufmallocspace = maxbufspace / 20; 205 206/* 207 * Remove the probability of deadlock conditions by limiting the 208 * number of dirty buffers. 209 */ 210 hidirtybuffers = nbuf / 6 + 20; 211 lodirtybuffers = nbuf / 12 + 10; 212 numdirtybuffers = 0; 213 lofreebuffers = nbuf / 18 + 5; 214 hifreebuffers = 2 * lofreebuffers; 215 numfreebuffers = nbuf; 216 217 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 218 bogus_page = vm_page_alloc(kernel_object, 219 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 220 VM_ALLOC_NORMAL); 221 222} 223 224/* 225 * Free the kva allocation for a buffer 226 * Must be called only at splbio or higher, 227 * as this is the only locking for buffer_map. 228 */ 229static void 230bfreekva(struct buf * bp) 231{ 232 if (bp->b_kvasize == 0) 233 return; 234 235 vm_map_delete(buffer_map, 236 (vm_offset_t) bp->b_kvabase, 237 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 238 239 bp->b_kvasize = 0; 240 241} 242 243/* 244 * remove the buffer from the appropriate free list 245 */ 246void 247bremfree(struct buf * bp) 248{ 249 int s = splbio(); 250 251 if (bp->b_qindex != QUEUE_NONE) { 252 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 253 bp->b_qindex = QUEUE_NONE; 254 } else { 255#if !defined(MAX_PERF) 256 panic("bremfree: removing a buffer when not on a queue"); 257#endif 258 } 259 if ((bp->b_flags & B_INVAL) || 260 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 261 --numfreebuffers; 262 splx(s); 263} 264 265/* 266 * Get a buffer with the specified data. Look in the cache first. 267 */ 268int 269bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 270 struct buf ** bpp) 271{ 272 struct buf *bp; 273 274 bp = getblk(vp, blkno, size, 0, 0); 275 *bpp = bp; 276 277 /* if not found in cache, do some I/O */ 278 if ((bp->b_flags & B_CACHE) == 0) { 279 if (curproc != NULL) 280 curproc->p_stats->p_ru.ru_inblock++; 281 bp->b_flags |= B_READ; 282 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 283 if (bp->b_rcred == NOCRED) { 284 if (cred != NOCRED) 285 crhold(cred); 286 bp->b_rcred = cred; 287 } 288 vfs_busy_pages(bp, 0); 289 VOP_STRATEGY(bp); 290 return (biowait(bp)); 291 } 292 return (0); 293} 294 295/* 296 * Operates like bread, but also starts asynchronous I/O on 297 * read-ahead blocks. 298 */ 299int 300breadn(struct vnode * vp, daddr_t blkno, int size, 301 daddr_t * rablkno, int *rabsize, 302 int cnt, struct ucred * cred, struct buf ** bpp) 303{ 304 struct buf *bp, *rabp; 305 int i; 306 int rv = 0, readwait = 0; 307 308 *bpp = bp = getblk(vp, blkno, size, 0, 0); 309 310 /* if not found in cache, do some I/O */ 311 if ((bp->b_flags & B_CACHE) == 0) { 312 if (curproc != NULL) 313 curproc->p_stats->p_ru.ru_inblock++; 314 bp->b_flags |= B_READ; 315 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 316 if (bp->b_rcred == NOCRED) { 317 if (cred != NOCRED) 318 crhold(cred); 319 bp->b_rcred = cred; 320 } 321 vfs_busy_pages(bp, 0); 322 VOP_STRATEGY(bp); 323 ++readwait; 324 } 325 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 326 if (inmem(vp, *rablkno)) 327 continue; 328 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 329 330 if ((rabp->b_flags & B_CACHE) == 0) { 331 if (curproc != NULL) 332 curproc->p_stats->p_ru.ru_inblock++; 333 rabp->b_flags |= B_READ | B_ASYNC; 334 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 335 if (rabp->b_rcred == NOCRED) { 336 if (cred != NOCRED) 337 crhold(cred); 338 rabp->b_rcred = cred; 339 } 340 vfs_busy_pages(rabp, 0); 341 VOP_STRATEGY(rabp); 342 } else { 343 brelse(rabp); 344 } 345 } 346 347 if (readwait) { 348 rv = biowait(bp); 349 } 350 return (rv); 351} 352 353/* 354 * Write, release buffer on completion. (Done by iodone 355 * if async.) 356 */ 357int 358bwrite(struct buf * bp) 359{ 360 int oldflags = bp->b_flags; 361 362 if (bp->b_flags & B_INVAL) { 363 brelse(bp); 364 return (0); 365 } 366#if !defined(MAX_PERF) 367 if (!(bp->b_flags & B_BUSY)) 368 panic("bwrite: buffer is not busy???"); 369#endif 370 371 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 372 bp->b_flags |= B_WRITEINPROG; 373 374 if ((oldflags & B_DELWRI) == B_DELWRI) { 375 --numdirtybuffers; 376 reassignbuf(bp, bp->b_vp); 377 } 378 379 bp->b_vp->v_numoutput++; 380 vfs_busy_pages(bp, 1); 381 if (curproc != NULL) 382 curproc->p_stats->p_ru.ru_oublock++; 383 VOP_STRATEGY(bp); 384 385 if ((oldflags & B_ASYNC) == 0) { 386 int rtval = biowait(bp); 387 388 if (oldflags & B_DELWRI) { 389 reassignbuf(bp, bp->b_vp); 390 } 391 brelse(bp); 392 return (rtval); 393 } 394 return (0); 395} 396 397void 398vfs_bio_need_satisfy(void) { 399 ++numfreebuffers; 400 if (!needsbuffer) 401 return; 402 if (numdirtybuffers < lodirtybuffers) { 403 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 404 } else { 405 needsbuffer &= ~VFS_BIO_NEED_ANY; 406 } 407 if (numfreebuffers >= hifreebuffers) { 408 needsbuffer &= ~VFS_BIO_NEED_FREE; 409 } 410 wakeup(&needsbuffer); 411} 412 413/* 414 * Delayed write. (Buffer is marked dirty). 415 */ 416void 417bdwrite(struct buf * bp) 418{ 419 420#if !defined(MAX_PERF) 421 if ((bp->b_flags & B_BUSY) == 0) { 422 panic("bdwrite: buffer is not busy"); 423 } 424#endif 425 426 if (bp->b_flags & B_INVAL) { 427 brelse(bp); 428 return; 429 } 430 if (bp->b_flags & B_TAPE) { 431 bawrite(bp); 432 return; 433 } 434 bp->b_flags &= ~(B_READ|B_RELBUF); 435 if ((bp->b_flags & B_DELWRI) == 0) { 436 bp->b_flags |= B_DONE | B_DELWRI; 437 reassignbuf(bp, bp->b_vp); 438 ++numdirtybuffers; 439 } 440 441 /* 442 * This bmap keeps the system from needing to do the bmap later, 443 * perhaps when the system is attempting to do a sync. Since it 444 * is likely that the indirect block -- or whatever other datastructure 445 * that the filesystem needs is still in memory now, it is a good 446 * thing to do this. Note also, that if the pageout daemon is 447 * requesting a sync -- there might not be enough memory to do 448 * the bmap then... So, this is important to do. 449 */ 450 if (bp->b_lblkno == bp->b_blkno) { 451 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 452 } 453 454 /* 455 * Set the *dirty* buffer range based upon the VM system dirty pages. 456 */ 457 vfs_setdirty(bp); 458 459 /* 460 * We need to do this here to satisfy the vnode_pager and the 461 * pageout daemon, so that it thinks that the pages have been 462 * "cleaned". Note that since the pages are in a delayed write 463 * buffer -- the VFS layer "will" see that the pages get written 464 * out on the next sync, or perhaps the cluster will be completed. 465 */ 466 vfs_clean_pages(bp); 467 bqrelse(bp); 468 469 if (numdirtybuffers >= hidirtybuffers) 470 flushdirtybuffers(0, 0); 471 472 return; 473} 474 475/* 476 * Asynchronous write. 477 * Start output on a buffer, but do not wait for it to complete. 478 * The buffer is released when the output completes. 479 */ 480void 481bawrite(struct buf * bp) 482{ 483 bp->b_flags |= B_ASYNC; 484 (void) VOP_BWRITE(bp); 485} 486 487/* 488 * Ordered write. 489 * Start output on a buffer, but only wait for it to complete if the 490 * output device cannot guarantee ordering in some other way. Devices 491 * that can perform asynchronous ordered writes will set the B_ASYNC 492 * flag in their strategy routine. 493 * The buffer is released when the output completes. 494 */ 495int 496bowrite(struct buf * bp) 497{ 498 /* 499 * XXX Add in B_ASYNC once the SCSI 500 * layer can deal with ordered 501 * writes properly. 502 */ 503 bp->b_flags |= B_ORDERED; 504 return (VOP_BWRITE(bp)); 505} 506 507/* 508 * Release a buffer. 509 */ 510void 511brelse(struct buf * bp) 512{ 513 int s; 514 515 if (bp->b_flags & B_CLUSTER) { 516 relpbuf(bp); 517 return; 518 } 519 /* anyone need a "free" block? */ 520 s = splbio(); 521 522 /* anyone need this block? */ 523 if (bp->b_flags & B_WANTED) { 524 bp->b_flags &= ~(B_WANTED | B_AGE); 525 wakeup(bp); 526 } 527 528 if (bp->b_flags & B_LOCKED) 529 bp->b_flags &= ~B_ERROR; 530 531 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 532 (bp->b_bufsize <= 0)) { 533 bp->b_flags |= B_INVAL; 534 if (bp->b_flags & B_DELWRI) 535 --numdirtybuffers; 536 bp->b_flags &= ~(B_DELWRI | B_CACHE); 537 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 538 if (bp->b_bufsize) 539 allocbuf(bp, 0); 540 brelvp(bp); 541 } 542 } 543 544 /* 545 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 546 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 547 * but the VM object is kept around. The B_NOCACHE flag is used to 548 * invalidate the pages in the VM object. 549 * 550 * If the buffer is a partially filled NFS buffer, keep it 551 * since invalidating it now will lose informatio. The valid 552 * flags in the vm_pages have only DEV_BSIZE resolution but 553 * the b_validoff, b_validend fields have byte resolution. 554 * This can avoid unnecessary re-reads of the buffer. 555 * XXX this seems to cause performance problems. 556 */ 557 if ((bp->b_flags & B_VMIO) 558 && !(bp->b_vp->v_tag == VT_NFS && 559 bp->b_vp->v_type != VBLK && 560 (bp->b_flags & B_DELWRI) != 0) 561#ifdef notdef 562 && (bp->b_vp->v_tag != VT_NFS 563 || bp->b_vp->v_type == VBLK 564 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 565 || bp->b_validend == 0 566 || (bp->b_validoff == 0 567 && bp->b_validend == bp->b_bufsize)) 568#endif 569 ) { 570 vm_ooffset_t foff; 571 vm_object_t obj; 572 int i, resid; 573 vm_page_t m; 574 struct vnode *vp; 575 int iototal = bp->b_bufsize; 576 577 vp = bp->b_vp; 578 579#if !defined(MAX_PERF) 580 if (!vp) 581 panic("brelse: missing vp"); 582#endif 583 584 if (bp->b_npages) { 585 vm_pindex_t poff; 586 obj = (vm_object_t) vp->v_object; 587 if (vp->v_type == VBLK) 588 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 589 else 590 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 591 poff = OFF_TO_IDX(foff); 592 for (i = 0; i < bp->b_npages; i++) { 593 m = bp->b_pages[i]; 594 if (m == bogus_page) { 595 m = vm_page_lookup(obj, poff + i); 596#if !defined(MAX_PERF) 597 if (!m) { 598 panic("brelse: page missing\n"); 599 } 600#endif 601 bp->b_pages[i] = m; 602 pmap_qenter(trunc_page(bp->b_data), 603 bp->b_pages, bp->b_npages); 604 } 605 resid = IDX_TO_OFF(m->pindex+1) - foff; 606 if (resid > iototal) 607 resid = iototal; 608 if (resid > 0) { 609 /* 610 * Don't invalidate the page if the local machine has already 611 * modified it. This is the lesser of two evils, and should 612 * be fixed. 613 */ 614 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 615 vm_page_test_dirty(m); 616 if (m->dirty == 0) { 617 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 618 if (m->valid == 0) 619 vm_page_protect(m, VM_PROT_NONE); 620 } 621 } 622 if (resid >= PAGE_SIZE) { 623 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 624 bp->b_flags |= B_INVAL; 625 } 626 } else { 627 if (!vm_page_is_valid(m, 628 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 629 bp->b_flags |= B_INVAL; 630 } 631 } 632 } 633 foff += resid; 634 iototal -= resid; 635 } 636 } 637 if (bp->b_flags & (B_INVAL | B_RELBUF)) 638 vfs_vmio_release(bp); 639 } 640#if !defined(MAX_PERF) 641 if (bp->b_qindex != QUEUE_NONE) 642 panic("brelse: free buffer onto another queue???"); 643#endif 644 645 /* enqueue */ 646 /* buffers with no memory */ 647 if (bp->b_bufsize == 0) { 648 bp->b_flags |= B_INVAL; 649 bp->b_qindex = QUEUE_EMPTY; 650 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 651 LIST_REMOVE(bp, b_hash); 652 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 653 bp->b_dev = NODEV; 654 /* 655 * Get rid of the kva allocation *now* 656 */ 657 bfreekva(bp); 658 659 /* buffers with junk contents */ 660 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 661 bp->b_flags |= B_INVAL; 662 bp->b_qindex = QUEUE_AGE; 663 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 664 LIST_REMOVE(bp, b_hash); 665 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 666 bp->b_dev = NODEV; 667 668 /* buffers that are locked */ 669 } else if (bp->b_flags & B_LOCKED) { 670 bp->b_qindex = QUEUE_LOCKED; 671 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 672 673 /* buffers with stale but valid contents */ 674 } else if (bp->b_flags & B_AGE) { 675 bp->b_qindex = QUEUE_AGE; 676 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 677 678 /* buffers with valid and quite potentially reuseable contents */ 679 } else { 680 bp->b_qindex = QUEUE_LRU; 681 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 682 } 683 684 if ((bp->b_flags & B_INVAL) || 685 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 686 if (bp->b_flags & B_DELWRI) { 687 --numdirtybuffers; 688 bp->b_flags &= ~B_DELWRI; 689 } 690 vfs_bio_need_satisfy(); 691 } 692 693 /* unlock */ 694 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 695 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 696 splx(s); 697} 698 699/* 700 * Release a buffer. 701 */ 702void 703bqrelse(struct buf * bp) 704{ 705 int s; 706 707 s = splbio(); 708 709 /* anyone need this block? */ 710 if (bp->b_flags & B_WANTED) { 711 bp->b_flags &= ~(B_WANTED | B_AGE); 712 wakeup(bp); 713 } 714 715#if !defined(MAX_PERF) 716 if (bp->b_qindex != QUEUE_NONE) 717 panic("bqrelse: free buffer onto another queue???"); 718#endif 719 720 if (bp->b_flags & B_LOCKED) { 721 bp->b_flags &= ~B_ERROR; 722 bp->b_qindex = QUEUE_LOCKED; 723 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 724 /* buffers with stale but valid contents */ 725 } else { 726 bp->b_qindex = QUEUE_LRU; 727 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 728 } 729 730 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 731 vfs_bio_need_satisfy(); 732 } 733 734 /* unlock */ 735 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 736 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 737 splx(s); 738} 739 740static void 741vfs_vmio_release(bp) 742 struct buf *bp; 743{ 744 int i; 745 vm_page_t m; 746 747 for (i = 0; i < bp->b_npages; i++) { 748 m = bp->b_pages[i]; 749 bp->b_pages[i] = NULL; 750 vm_page_unwire(m); 751 /* 752 * We don't mess with busy pages, it is 753 * the responsibility of the process that 754 * busied the pages to deal with them. 755 */ 756 if ((m->flags & PG_BUSY) || (m->busy != 0)) 757 continue; 758 759 if (m->wire_count == 0) { 760 761 if (m->flags & PG_WANTED) { 762 m->flags &= ~PG_WANTED; 763 wakeup(m); 764 } 765 766 /* 767 * If this is an async free -- we cannot place 768 * pages onto the cache queue. If it is an 769 * async free, then we don't modify any queues. 770 * This is probably in error (for perf reasons), 771 * and we will eventually need to build 772 * a more complete infrastructure to support I/O 773 * rundown. 774 */ 775 if ((bp->b_flags & B_ASYNC) == 0) { 776 777 /* 778 * In the case of sync buffer frees, we can do pretty much 779 * anything to any of the memory queues. Specifically, 780 * the cache queue is okay to be modified. 781 */ 782 if (m->valid) { 783 if(m->dirty == 0) 784 vm_page_test_dirty(m); 785 /* 786 * this keeps pressure off of the process memory 787 */ 788 if (m->dirty == 0 && m->hold_count == 0) 789 vm_page_cache(m); 790 else 791 vm_page_deactivate(m); 792 } else if (m->hold_count == 0) { 793 vm_page_protect(m, VM_PROT_NONE); 794 vm_page_free(m); 795 } 796 } else { 797 /* 798 * If async, then at least we clear the 799 * act_count. 800 */ 801 m->act_count = 0; 802 } 803 } 804 } 805 bufspace -= bp->b_bufsize; 806 vmiospace -= bp->b_bufsize; 807 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 808 bp->b_npages = 0; 809 bp->b_bufsize = 0; 810 bp->b_flags &= ~B_VMIO; 811 if (bp->b_vp) 812 brelvp(bp); 813} 814 815/* 816 * Check to see if a block is currently memory resident. 817 */ 818struct buf * 819gbincore(struct vnode * vp, daddr_t blkno) 820{ 821 struct buf *bp; 822 struct bufhashhdr *bh; 823 824 bh = BUFHASH(vp, blkno); 825 bp = bh->lh_first; 826 827 /* Search hash chain */ 828 while (bp != NULL) { 829 /* hit */ 830 if (bp->b_vp == vp && bp->b_lblkno == blkno && 831 (bp->b_flags & B_INVAL) == 0) { 832 break; 833 } 834 bp = bp->b_hash.le_next; 835 } 836 return (bp); 837} 838 839/* 840 * this routine implements clustered async writes for 841 * clearing out B_DELWRI buffers... This is much better 842 * than the old way of writing only one buffer at a time. 843 */ 844int 845vfs_bio_awrite(struct buf * bp) 846{ 847 int i; 848 daddr_t lblkno = bp->b_lblkno; 849 struct vnode *vp = bp->b_vp; 850 int s; 851 int ncl; 852 struct buf *bpa; 853 int nwritten; 854 855 s = splbio(); 856 /* 857 * right now we support clustered writing only to regular files 858 */ 859 if ((vp->v_type == VREG) && 860 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 861 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 862 int size; 863 int maxcl; 864 865 size = vp->v_mount->mnt_stat.f_iosize; 866 maxcl = MAXPHYS / size; 867 868 for (i = 1; i < maxcl; i++) { 869 if ((bpa = gbincore(vp, lblkno + i)) && 870 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 871 (B_DELWRI | B_CLUSTEROK)) && 872 (bpa->b_bufsize == size)) { 873 if ((bpa->b_blkno == bpa->b_lblkno) || 874 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 875 break; 876 } else { 877 break; 878 } 879 } 880 ncl = i; 881 /* 882 * this is a possible cluster write 883 */ 884 if (ncl != 1) { 885 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 886 splx(s); 887 return nwritten; 888 } 889 } 890 bremfree(bp); 891 splx(s); 892 /* 893 * default (old) behavior, writing out only one block 894 */ 895 bp->b_flags |= B_BUSY | B_ASYNC; 896 nwritten = bp->b_bufsize; 897 (void) VOP_BWRITE(bp); 898 return nwritten; 899} 900 901 902/* 903 * Find a buffer header which is available for use. 904 */ 905static struct buf * 906getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize) 907{ 908 struct buf *bp; 909 int nbyteswritten = 0; 910 vm_offset_t addr; 911 static int writerecursion = 0; 912 913start: 914 if (bufspace >= maxbufspace) 915 goto trytofreespace; 916 917 /* can we constitute a new buffer? */ 918 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 919#if !defined(MAX_PERF) 920 if (bp->b_qindex != QUEUE_EMPTY) 921 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 922 bp->b_qindex); 923#endif 924 bp->b_flags |= B_BUSY; 925 bremfree(bp); 926 goto fillbuf; 927 } 928trytofreespace: 929 /* 930 * We keep the file I/O from hogging metadata I/O 931 * This is desirable because file data is cached in the 932 * VM/Buffer cache even if a buffer is freed. 933 */ 934 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 935#if !defined(MAX_PERF) 936 if (bp->b_qindex != QUEUE_AGE) 937 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 938 bp->b_qindex); 939#endif 940 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 941#if !defined(MAX_PERF) 942 if (bp->b_qindex != QUEUE_LRU) 943 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 944 bp->b_qindex); 945#endif 946 } 947 if (!bp) { 948 /* wait for a free buffer of any kind */ 949 needsbuffer |= VFS_BIO_NEED_ANY; 950 do 951 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 952 slptimeo); 953 while (needsbuffer & VFS_BIO_NEED_ANY); 954 return (0); 955 } 956 957#if defined(DIAGNOSTIC) 958 if (bp->b_flags & B_BUSY) { 959 panic("getnewbuf: busy buffer on free list\n"); 960 } 961#endif 962 963 /* 964 * We are fairly aggressive about freeing VMIO buffers, but since 965 * the buffering is intact without buffer headers, there is not 966 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 967 */ 968 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 969 if ((bp->b_flags & B_VMIO) == 0 || 970 (vmiospace < maxvmiobufspace)) { 971 --bp->b_usecount; 972 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 973 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 974 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 975 goto start; 976 } 977 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 978 } 979 } 980 981 982 /* if we are a delayed write, convert to an async write */ 983 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 984 985 if (writerecursion > 0) { 986 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 987 while (bp) { 988 if ((bp->b_flags & B_DELWRI) == 0) 989 break; 990 bp = TAILQ_NEXT(bp, b_freelist); 991 } 992 if (bp == NULL) { 993 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 994 while (bp) { 995 if ((bp->b_flags & B_DELWRI) == 0) 996 break; 997 bp = TAILQ_NEXT(bp, b_freelist); 998 } 999 } 1000 if (bp == NULL) 1001 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1002 } else { 1003 ++writerecursion; 1004 nbyteswritten += vfs_bio_awrite(bp); 1005 --writerecursion; 1006 if (!slpflag && !slptimeo) { 1007 return (0); 1008 } 1009 goto start; 1010 } 1011 } 1012 1013 if (bp->b_flags & B_WANTED) { 1014 bp->b_flags &= ~B_WANTED; 1015 wakeup(bp); 1016 } 1017 bremfree(bp); 1018 bp->b_flags |= B_BUSY; 1019 1020 if (bp->b_flags & B_VMIO) { 1021 bp->b_flags &= ~B_ASYNC; 1022 vfs_vmio_release(bp); 1023 } 1024 1025 if (bp->b_vp) 1026 brelvp(bp); 1027 1028fillbuf: 1029 /* we are not free, nor do we contain interesting data */ 1030 if (bp->b_rcred != NOCRED) { 1031 crfree(bp->b_rcred); 1032 bp->b_rcred = NOCRED; 1033 } 1034 if (bp->b_wcred != NOCRED) { 1035 crfree(bp->b_wcred); 1036 bp->b_wcred = NOCRED; 1037 } 1038 1039 LIST_REMOVE(bp, b_hash); 1040 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1041 if (bp->b_bufsize) { 1042 allocbuf(bp, 0); 1043 } 1044 bp->b_flags = B_BUSY; 1045 bp->b_dev = NODEV; 1046 bp->b_vp = NULL; 1047 bp->b_blkno = bp->b_lblkno = 0; 1048 bp->b_iodone = 0; 1049 bp->b_error = 0; 1050 bp->b_resid = 0; 1051 bp->b_bcount = 0; 1052 bp->b_npages = 0; 1053 bp->b_dirtyoff = bp->b_dirtyend = 0; 1054 bp->b_validoff = bp->b_validend = 0; 1055 bp->b_usecount = 4; 1056 1057 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1058 1059 /* 1060 * we assume that buffer_map is not at address 0 1061 */ 1062 addr = 0; 1063 if (maxsize != bp->b_kvasize) { 1064 bfreekva(bp); 1065 1066 /* 1067 * See if we have buffer kva space 1068 */ 1069 if (vm_map_findspace(buffer_map, 1070 vm_map_min(buffer_map), maxsize, &addr)) { 1071 bp->b_flags |= B_INVAL; 1072 brelse(bp); 1073 goto trytofreespace; 1074 } 1075 } 1076 1077 /* 1078 * See if we are below are allocated minimum 1079 */ 1080 if (bufspace >= (maxbufspace + nbyteswritten)) { 1081 bp->b_flags |= B_INVAL; 1082 brelse(bp); 1083 goto trytofreespace; 1084 } 1085 1086 /* 1087 * create a map entry for the buffer -- in essence 1088 * reserving the kva space. 1089 */ 1090 if (addr) { 1091 vm_map_insert(buffer_map, NULL, 0, 1092 addr, addr + maxsize, 1093 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1094 1095 bp->b_kvabase = (caddr_t) addr; 1096 bp->b_kvasize = maxsize; 1097 } 1098 bp->b_data = bp->b_kvabase; 1099 1100 return (bp); 1101} 1102 1103static void 1104waitfreebuffers(int slpflag, int slptimeo) { 1105 while (numfreebuffers < hifreebuffers) { 1106 flushdirtybuffers(slpflag, slptimeo); 1107 if (numfreebuffers < hifreebuffers) 1108 break; 1109 needsbuffer |= VFS_BIO_NEED_FREE; 1110 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo)) 1111 break; 1112 } 1113} 1114 1115static void 1116flushdirtybuffers(int slpflag, int slptimeo) { 1117 int s; 1118 static pid_t flushing = 0; 1119 1120 s = splbio(); 1121 1122 if (flushing) { 1123 if (flushing == curproc->p_pid) { 1124 splx(s); 1125 return; 1126 } 1127 while (flushing) { 1128 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) { 1129 splx(s); 1130 return; 1131 } 1132 } 1133 } 1134 flushing = curproc->p_pid; 1135 1136 while (numdirtybuffers > lodirtybuffers) { 1137 struct buf *bp; 1138 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1139 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1140 if (bp == NULL) 1141 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1142 1143 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1144 bp = TAILQ_NEXT(bp, b_freelist); 1145 } 1146 1147 if (bp) { 1148 splx(s); 1149 vfs_bio_awrite(bp); 1150 s = splbio(); 1151 continue; 1152 } 1153 break; 1154 } 1155 1156 flushing = 0; 1157 wakeup(&flushing); 1158 splx(s); 1159} 1160 1161/* 1162 * Check to see if a block is currently memory resident. 1163 */ 1164struct buf * 1165incore(struct vnode * vp, daddr_t blkno) 1166{ 1167 struct buf *bp; 1168 1169 int s = splbio(); 1170 bp = gbincore(vp, blkno); 1171 splx(s); 1172 return (bp); 1173} 1174 1175/* 1176 * Returns true if no I/O is needed to access the 1177 * associated VM object. This is like incore except 1178 * it also hunts around in the VM system for the data. 1179 */ 1180 1181int 1182inmem(struct vnode * vp, daddr_t blkno) 1183{ 1184 vm_object_t obj; 1185 vm_offset_t toff, tinc; 1186 vm_page_t m; 1187 vm_ooffset_t off; 1188 1189 if (incore(vp, blkno)) 1190 return 1; 1191 if (vp->v_mount == NULL) 1192 return 0; 1193 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 1194 return 0; 1195 1196 obj = vp->v_object; 1197 tinc = PAGE_SIZE; 1198 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1199 tinc = vp->v_mount->mnt_stat.f_iosize; 1200 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1201 1202 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1203 1204 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1205 if (!m) 1206 return 0; 1207 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1208 return 0; 1209 } 1210 return 1; 1211} 1212 1213/* 1214 * now we set the dirty range for the buffer -- 1215 * for NFS -- if the file is mapped and pages have 1216 * been written to, let it know. We want the 1217 * entire range of the buffer to be marked dirty if 1218 * any of the pages have been written to for consistancy 1219 * with the b_validoff, b_validend set in the nfs write 1220 * code, and used by the nfs read code. 1221 */ 1222static void 1223vfs_setdirty(struct buf *bp) { 1224 int i; 1225 vm_object_t object; 1226 vm_offset_t boffset, offset; 1227 /* 1228 * We qualify the scan for modified pages on whether the 1229 * object has been flushed yet. The OBJ_WRITEABLE flag 1230 * is not cleared simply by protecting pages off. 1231 */ 1232 if ((bp->b_flags & B_VMIO) && 1233 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1234 /* 1235 * test the pages to see if they have been modified directly 1236 * by users through the VM system. 1237 */ 1238 for (i = 0; i < bp->b_npages; i++) 1239 vm_page_test_dirty(bp->b_pages[i]); 1240 1241 /* 1242 * scan forwards for the first page modified 1243 */ 1244 for (i = 0; i < bp->b_npages; i++) { 1245 if (bp->b_pages[i]->dirty) { 1246 break; 1247 } 1248 } 1249 boffset = (i << PAGE_SHIFT); 1250 if (boffset < bp->b_dirtyoff) { 1251 bp->b_dirtyoff = boffset; 1252 } 1253 1254 /* 1255 * scan backwards for the last page modified 1256 */ 1257 for (i = bp->b_npages - 1; i >= 0; --i) { 1258 if (bp->b_pages[i]->dirty) { 1259 break; 1260 } 1261 } 1262 boffset = (i + 1); 1263 offset = boffset + bp->b_pages[0]->pindex; 1264 if (offset >= object->size) 1265 boffset = object->size - bp->b_pages[0]->pindex; 1266 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1267 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1268 } 1269} 1270 1271/* 1272 * Get a block given a specified block and offset into a file/device. 1273 */ 1274struct buf * 1275getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1276{ 1277 struct buf *bp; 1278 int s; 1279 struct bufhashhdr *bh; 1280 int maxsize; 1281 static pid_t flushing = 0; 1282 1283 if (vp->v_mount) { 1284 maxsize = vp->v_mount->mnt_stat.f_iosize; 1285 /* 1286 * This happens on mount points. 1287 */ 1288 if (maxsize < size) 1289 maxsize = size; 1290 } else { 1291 maxsize = size; 1292 } 1293 1294#if !defined(MAX_PERF) 1295 if (size > MAXBSIZE) 1296 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1297#endif 1298 1299 s = splbio(); 1300loop: 1301 if (numfreebuffers < lofreebuffers) { 1302 waitfreebuffers(slpflag, slptimeo); 1303 } 1304 1305 if ((bp = gbincore(vp, blkno))) { 1306 if (bp->b_flags & B_BUSY) { 1307 bp->b_flags |= B_WANTED; 1308 if (bp->b_usecount < BUF_MAXUSE) 1309 ++bp->b_usecount; 1310 if (!tsleep(bp, 1311 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1312 goto loop; 1313 1314 splx(s); 1315 return (struct buf *) NULL; 1316 } 1317 bp->b_flags |= B_BUSY | B_CACHE; 1318 bremfree(bp); 1319 1320 /* 1321 * check for size inconsistancies (note that they shouldn't 1322 * happen but do when filesystems don't handle the size changes 1323 * correctly.) We are conservative on metadata and don't just 1324 * extend the buffer but write and re-constitute it. 1325 */ 1326 1327 if (bp->b_bcount != size) { 1328 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1329 allocbuf(bp, size); 1330 } else { 1331 bp->b_flags |= B_NOCACHE; 1332 VOP_BWRITE(bp); 1333 goto loop; 1334 } 1335 } 1336 1337 if (bp->b_usecount < BUF_MAXUSE) 1338 ++bp->b_usecount; 1339 splx(s); 1340 return (bp); 1341 } else { 1342 vm_object_t obj; 1343 1344 if ((bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize)) == 0) { 1345 if (slpflag || slptimeo) { 1346 splx(s); 1347 return NULL; 1348 } 1349 goto loop; 1350 } 1351 1352 /* 1353 * This code is used to make sure that a buffer is not 1354 * created while the getnewbuf routine is blocked. 1355 * Normally the vnode is locked so this isn't a problem. 1356 * VBLK type I/O requests, however, don't lock the vnode. 1357 */ 1358 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1359 bp->b_flags |= B_INVAL; 1360 brelse(bp); 1361 goto loop; 1362 } 1363 1364 /* 1365 * Insert the buffer into the hash, so that it can 1366 * be found by incore. 1367 */ 1368 bp->b_blkno = bp->b_lblkno = blkno; 1369 bgetvp(vp, bp); 1370 LIST_REMOVE(bp, b_hash); 1371 bh = BUFHASH(vp, blkno); 1372 LIST_INSERT_HEAD(bh, bp, b_hash); 1373 1374 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1375 bp->b_flags |= (B_VMIO | B_CACHE); 1376#if defined(VFS_BIO_DEBUG) 1377 if (vp->v_type != VREG && vp->v_type != VBLK) 1378 printf("getblk: vmioing file type %d???\n", vp->v_type); 1379#endif 1380 } else { 1381 bp->b_flags &= ~B_VMIO; 1382 } 1383 splx(s); 1384 1385 allocbuf(bp, size); 1386#ifdef PC98 1387 /* 1388 * 1024byte/sector support 1389 */ 1390#define B_XXX2 0x8000000 1391 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1392#endif 1393 return (bp); 1394 } 1395} 1396 1397/* 1398 * Get an empty, disassociated buffer of given size. 1399 */ 1400struct buf * 1401geteblk(int size) 1402{ 1403 struct buf *bp; 1404 int s; 1405 1406 s = splbio(); 1407 while ((bp = getnewbuf(0, 0, 0, size, MAXBSIZE)) == 0); 1408 splx(s); 1409 allocbuf(bp, size); 1410 bp->b_flags |= B_INVAL; 1411 return (bp); 1412} 1413 1414 1415/* 1416 * This code constitutes the buffer memory from either anonymous system 1417 * memory (in the case of non-VMIO operations) or from an associated 1418 * VM object (in the case of VMIO operations). 1419 * 1420 * Note that this code is tricky, and has many complications to resolve 1421 * deadlock or inconsistant data situations. Tread lightly!!! 1422 * 1423 * Modify the length of a buffer's underlying buffer storage without 1424 * destroying information (unless, of course the buffer is shrinking). 1425 */ 1426int 1427allocbuf(struct buf * bp, int size) 1428{ 1429 1430 int s; 1431 int newbsize, mbsize; 1432 int i; 1433 1434#if !defined(MAX_PERF) 1435 if (!(bp->b_flags & B_BUSY)) 1436 panic("allocbuf: buffer not busy"); 1437 1438 if (bp->b_kvasize < size) 1439 panic("allocbuf: buffer too small"); 1440#endif 1441 1442 if ((bp->b_flags & B_VMIO) == 0) { 1443 caddr_t origbuf; 1444 int origbufsize; 1445 /* 1446 * Just get anonymous memory from the kernel 1447 */ 1448 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1449#if !defined(NO_B_MALLOC) 1450 if (bp->b_flags & B_MALLOC) 1451 newbsize = mbsize; 1452 else 1453#endif 1454 newbsize = round_page(size); 1455 1456 if (newbsize < bp->b_bufsize) { 1457#if !defined(NO_B_MALLOC) 1458 /* 1459 * malloced buffers are not shrunk 1460 */ 1461 if (bp->b_flags & B_MALLOC) { 1462 if (newbsize) { 1463 bp->b_bcount = size; 1464 } else { 1465 free(bp->b_data, M_BIOBUF); 1466 bufspace -= bp->b_bufsize; 1467 bufmallocspace -= bp->b_bufsize; 1468 bp->b_data = bp->b_kvabase; 1469 bp->b_bufsize = 0; 1470 bp->b_bcount = 0; 1471 bp->b_flags &= ~B_MALLOC; 1472 } 1473 return 1; 1474 } 1475#endif 1476 vm_hold_free_pages( 1477 bp, 1478 (vm_offset_t) bp->b_data + newbsize, 1479 (vm_offset_t) bp->b_data + bp->b_bufsize); 1480 } else if (newbsize > bp->b_bufsize) { 1481#if !defined(NO_B_MALLOC) 1482 /* 1483 * We only use malloced memory on the first allocation. 1484 * and revert to page-allocated memory when the buffer grows. 1485 */ 1486 if ( (bufmallocspace < maxbufmallocspace) && 1487 (bp->b_bufsize == 0) && 1488 (mbsize <= PAGE_SIZE/2)) { 1489 1490 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1491 bp->b_bufsize = mbsize; 1492 bp->b_bcount = size; 1493 bp->b_flags |= B_MALLOC; 1494 bufspace += mbsize; 1495 bufmallocspace += mbsize; 1496 return 1; 1497 } 1498#endif 1499 origbuf = NULL; 1500 origbufsize = 0; 1501#if !defined(NO_B_MALLOC) 1502 /* 1503 * If the buffer is growing on it's other-than-first allocation, 1504 * then we revert to the page-allocation scheme. 1505 */ 1506 if (bp->b_flags & B_MALLOC) { 1507 origbuf = bp->b_data; 1508 origbufsize = bp->b_bufsize; 1509 bp->b_data = bp->b_kvabase; 1510 bufspace -= bp->b_bufsize; 1511 bufmallocspace -= bp->b_bufsize; 1512 bp->b_bufsize = 0; 1513 bp->b_flags &= ~B_MALLOC; 1514 newbsize = round_page(newbsize); 1515 } 1516#endif 1517 vm_hold_load_pages( 1518 bp, 1519 (vm_offset_t) bp->b_data + bp->b_bufsize, 1520 (vm_offset_t) bp->b_data + newbsize); 1521#if !defined(NO_B_MALLOC) 1522 if (origbuf) { 1523 bcopy(origbuf, bp->b_data, origbufsize); 1524 free(origbuf, M_BIOBUF); 1525 } 1526#endif 1527 } 1528 } else { 1529 vm_page_t m; 1530 int desiredpages; 1531 1532 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1533 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1534 1535#if !defined(NO_B_MALLOC) 1536 if (bp->b_flags & B_MALLOC) 1537 panic("allocbuf: VMIO buffer can't be malloced"); 1538#endif 1539 1540 if (newbsize < bp->b_bufsize) { 1541 if (desiredpages < bp->b_npages) { 1542 for (i = desiredpages; i < bp->b_npages; i++) { 1543 /* 1544 * the page is not freed here -- it 1545 * is the responsibility of vnode_pager_setsize 1546 */ 1547 m = bp->b_pages[i]; 1548#if defined(DIAGNOSTIC) 1549 if (m == bogus_page) 1550 panic("allocbuf: bogus page found"); 1551#endif 1552 s = splvm(); 1553 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1554 m->flags |= PG_WANTED; 1555 tsleep(m, PVM, "biodep", 0); 1556 } 1557 splx(s); 1558 1559 bp->b_pages[i] = NULL; 1560 vm_page_unwire(m); 1561 } 1562 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1563 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1564 bp->b_npages = desiredpages; 1565 } 1566 } else if (newbsize > bp->b_bufsize) { 1567 vm_object_t obj; 1568 vm_offset_t tinc, toff; 1569 vm_ooffset_t off; 1570 vm_pindex_t objoff; 1571 int pageindex, curbpnpages; 1572 struct vnode *vp; 1573 int bsize; 1574 1575 vp = bp->b_vp; 1576 1577 if (vp->v_type == VBLK) 1578 bsize = DEV_BSIZE; 1579 else 1580 bsize = vp->v_mount->mnt_stat.f_iosize; 1581 1582 if (bp->b_npages < desiredpages) { 1583 obj = vp->v_object; 1584 tinc = PAGE_SIZE; 1585 if (tinc > bsize) 1586 tinc = bsize; 1587 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1588 curbpnpages = bp->b_npages; 1589 doretry: 1590 bp->b_flags |= B_CACHE; 1591 bp->b_validoff = bp->b_validend = 0; 1592 for (toff = 0; toff < newbsize; toff += tinc) { 1593 int bytesinpage; 1594 1595 pageindex = toff >> PAGE_SHIFT; 1596 objoff = OFF_TO_IDX(off + toff); 1597 if (pageindex < curbpnpages) { 1598 1599 m = bp->b_pages[pageindex]; 1600#ifdef VFS_BIO_DIAG 1601 if (m->pindex != objoff) 1602 panic("allocbuf: page changed offset??!!!?"); 1603#endif 1604 bytesinpage = tinc; 1605 if (tinc > (newbsize - toff)) 1606 bytesinpage = newbsize - toff; 1607 if (bp->b_flags & B_CACHE) 1608 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1609 continue; 1610 } 1611 m = vm_page_lookup(obj, objoff); 1612 if (!m) { 1613 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1614 if (!m) { 1615 VM_WAIT; 1616 goto doretry; 1617 } 1618 /* 1619 * Normally it is unwise to clear PG_BUSY without 1620 * PAGE_WAKEUP -- but it is okay here, as there is 1621 * no chance for blocking between here and vm_page_alloc 1622 */ 1623 m->flags &= ~PG_BUSY; 1624 vm_page_wire(m); 1625 bp->b_flags &= ~B_CACHE; 1626 } else if (m->flags & PG_BUSY) { 1627 s = splvm(); 1628 if (m->flags & PG_BUSY) { 1629 m->flags |= PG_WANTED; 1630 tsleep(m, PVM, "pgtblk", 0); 1631 } 1632 splx(s); 1633 goto doretry; 1634 } else { 1635 if ((curproc != pageproc) && 1636 ((m->queue - m->pc) == PQ_CACHE) && 1637 ((cnt.v_free_count + cnt.v_cache_count) < 1638 (cnt.v_free_min + cnt.v_cache_min))) { 1639 pagedaemon_wakeup(); 1640 } 1641 bytesinpage = tinc; 1642 if (tinc > (newbsize - toff)) 1643 bytesinpage = newbsize - toff; 1644 if (bp->b_flags & B_CACHE) 1645 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1646 vm_page_wire(m); 1647 } 1648 bp->b_pages[pageindex] = m; 1649 curbpnpages = pageindex + 1; 1650 } 1651 if (vp->v_tag == VT_NFS && 1652 vp->v_type != VBLK) { 1653 if (bp->b_dirtyend > 0) { 1654 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1655 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1656 } 1657 if (bp->b_validend == 0) 1658 bp->b_flags &= ~B_CACHE; 1659 } 1660 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1661 bp->b_npages = curbpnpages; 1662 pmap_qenter((vm_offset_t) bp->b_data, 1663 bp->b_pages, bp->b_npages); 1664 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1665 } 1666 } 1667 } 1668 if (bp->b_flags & B_VMIO) 1669 vmiospace += (newbsize - bp->b_bufsize); 1670 bufspace += (newbsize - bp->b_bufsize); 1671 bp->b_bufsize = newbsize; 1672 bp->b_bcount = size; 1673 return 1; 1674} 1675 1676/* 1677 * Wait for buffer I/O completion, returning error status. 1678 */ 1679int 1680biowait(register struct buf * bp) 1681{ 1682 int s; 1683 1684 s = splbio(); 1685 while ((bp->b_flags & B_DONE) == 0) 1686#if defined(NO_SCHEDULE_MODS) 1687 tsleep(bp, PRIBIO, "biowait", 0); 1688#else 1689 tsleep(bp, curproc->p_usrpri, "biowait", 0); 1690#endif 1691 splx(s); 1692 if (bp->b_flags & B_EINTR) { 1693 bp->b_flags &= ~B_EINTR; 1694 return (EINTR); 1695 } 1696 if (bp->b_flags & B_ERROR) { 1697 return (bp->b_error ? bp->b_error : EIO); 1698 } else { 1699 return (0); 1700 } 1701} 1702 1703/* 1704 * Finish I/O on a buffer, calling an optional function. 1705 * This is usually called from interrupt level, so process blocking 1706 * is not *a good idea*. 1707 */ 1708void 1709biodone(register struct buf * bp) 1710{ 1711 int s; 1712 1713 s = splbio(); 1714 1715#if !defined(MAX_PERF) 1716 if (!(bp->b_flags & B_BUSY)) 1717 panic("biodone: buffer not busy"); 1718#endif 1719 1720 if (bp->b_flags & B_DONE) { 1721 splx(s); 1722#if !defined(MAX_PERF) 1723 printf("biodone: buffer already done\n"); 1724#endif 1725 return; 1726 } 1727 bp->b_flags |= B_DONE; 1728 1729 if ((bp->b_flags & B_READ) == 0) { 1730 vwakeup(bp); 1731 } 1732#ifdef BOUNCE_BUFFERS 1733 if (bp->b_flags & B_BOUNCE) 1734 vm_bounce_free(bp); 1735#endif 1736 1737 /* call optional completion function if requested */ 1738 if (bp->b_flags & B_CALL) { 1739 bp->b_flags &= ~B_CALL; 1740 (*bp->b_iodone) (bp); 1741 splx(s); 1742 return; 1743 } 1744 if (bp->b_flags & B_VMIO) { 1745 int i, resid; 1746 vm_ooffset_t foff; 1747 vm_page_t m; 1748 vm_object_t obj; 1749 int iosize; 1750 struct vnode *vp = bp->b_vp; 1751 1752 obj = vp->v_object; 1753 1754#if defined(VFS_BIO_DEBUG) 1755 if (vp->v_usecount == 0) { 1756 panic("biodone: zero vnode ref count"); 1757 } 1758 1759 if (vp->v_object == NULL) { 1760 panic("biodone: missing VM object"); 1761 } 1762 1763 if ((vp->v_flag & VVMIO) == 0) { 1764 panic("biodone: vnode is not setup for merged cache"); 1765 } 1766#endif 1767 1768 if (vp->v_type == VBLK) 1769 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1770 else 1771 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1772#if !defined(MAX_PERF) 1773 if (!obj) { 1774 panic("biodone: no object"); 1775 } 1776#endif 1777#if defined(VFS_BIO_DEBUG) 1778 if (obj->paging_in_progress < bp->b_npages) { 1779 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1780 obj->paging_in_progress, bp->b_npages); 1781 } 1782#endif 1783 iosize = bp->b_bufsize; 1784 for (i = 0; i < bp->b_npages; i++) { 1785 int bogusflag = 0; 1786 m = bp->b_pages[i]; 1787 if (m == bogus_page) { 1788 bogusflag = 1; 1789 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1790 if (!m) { 1791#if defined(VFS_BIO_DEBUG) 1792 printf("biodone: page disappeared\n"); 1793#endif 1794 --obj->paging_in_progress; 1795 continue; 1796 } 1797 bp->b_pages[i] = m; 1798 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1799 } 1800#if defined(VFS_BIO_DEBUG) 1801 if (OFF_TO_IDX(foff) != m->pindex) { 1802 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1803 } 1804#endif 1805 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1806 if (resid > iosize) 1807 resid = iosize; 1808 /* 1809 * In the write case, the valid and clean bits are 1810 * already changed correctly, so we only need to do this 1811 * here in the read case. 1812 */ 1813 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1814 vfs_page_set_valid(bp, foff, i, m); 1815 } 1816 1817 /* 1818 * when debugging new filesystems or buffer I/O methods, this 1819 * is the most common error that pops up. if you see this, you 1820 * have not set the page busy flag correctly!!! 1821 */ 1822 if (m->busy == 0) { 1823#if !defined(MAX_PERF) 1824 printf("biodone: page busy < 0, " 1825 "pindex: %d, foff: 0x(%x,%x), " 1826 "resid: %d, index: %d\n", 1827 (int) m->pindex, (int)(foff >> 32), 1828 (int) foff & 0xffffffff, resid, i); 1829#endif 1830 if (vp->v_type != VBLK) 1831#if !defined(MAX_PERF) 1832 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1833 bp->b_vp->v_mount->mnt_stat.f_iosize, 1834 (int) bp->b_lblkno, 1835 bp->b_flags, bp->b_npages); 1836 else 1837 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1838 (int) bp->b_lblkno, 1839 bp->b_flags, bp->b_npages); 1840 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1841 m->valid, m->dirty, m->wire_count); 1842#endif 1843 panic("biodone: page busy < 0\n"); 1844 } 1845 --m->busy; 1846 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1847 m->flags &= ~PG_WANTED; 1848 wakeup(m); 1849 } 1850 --obj->paging_in_progress; 1851 foff += resid; 1852 iosize -= resid; 1853 } 1854 if (obj && obj->paging_in_progress == 0 && 1855 (obj->flags & OBJ_PIPWNT)) { 1856 obj->flags &= ~OBJ_PIPWNT; 1857 wakeup(obj); 1858 } 1859 } 1860 /* 1861 * For asynchronous completions, release the buffer now. The brelse 1862 * checks for B_WANTED and will do the wakeup there if necessary - so 1863 * no need to do a wakeup here in the async case. 1864 */ 1865 1866 if (bp->b_flags & B_ASYNC) { 1867 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1868 brelse(bp); 1869 else 1870 bqrelse(bp); 1871 } else { 1872 bp->b_flags &= ~B_WANTED; 1873 wakeup(bp); 1874 } 1875 splx(s); 1876} 1877 1878int 1879count_lock_queue() 1880{ 1881 int count; 1882 struct buf *bp; 1883 1884 count = 0; 1885 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1886 bp != NULL; 1887 bp = TAILQ_NEXT(bp, b_freelist)) 1888 count++; 1889 return (count); 1890} 1891 1892int vfs_update_interval = 30; 1893 1894static void 1895vfs_update() 1896{ 1897 while (1) { 1898 tsleep(&vfs_update_wakeup, PUSER, "update", 1899 hz * vfs_update_interval); 1900 vfs_update_wakeup = 0; 1901 sync(curproc, NULL); 1902 } 1903} 1904 1905static int 1906sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1907{ 1908 int error = sysctl_handle_int(oidp, 1909 oidp->oid_arg1, oidp->oid_arg2, req); 1910 if (!error) 1911 wakeup(&vfs_update_wakeup); 1912 return error; 1913} 1914 1915SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1916 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1917 1918 1919/* 1920 * This routine is called in lieu of iodone in the case of 1921 * incomplete I/O. This keeps the busy status for pages 1922 * consistant. 1923 */ 1924void 1925vfs_unbusy_pages(struct buf * bp) 1926{ 1927 int i; 1928 1929 if (bp->b_flags & B_VMIO) { 1930 struct vnode *vp = bp->b_vp; 1931 vm_object_t obj = vp->v_object; 1932 vm_ooffset_t foff; 1933 1934 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1935 1936 for (i = 0; i < bp->b_npages; i++) { 1937 vm_page_t m = bp->b_pages[i]; 1938 1939 if (m == bogus_page) { 1940 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1941#if !defined(MAX_PERF) 1942 if (!m) { 1943 panic("vfs_unbusy_pages: page missing\n"); 1944 } 1945#endif 1946 bp->b_pages[i] = m; 1947 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1948 } 1949 --obj->paging_in_progress; 1950 --m->busy; 1951 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1952 m->flags &= ~PG_WANTED; 1953 wakeup(m); 1954 } 1955 } 1956 if (obj->paging_in_progress == 0 && 1957 (obj->flags & OBJ_PIPWNT)) { 1958 obj->flags &= ~OBJ_PIPWNT; 1959 wakeup(obj); 1960 } 1961 } 1962} 1963 1964/* 1965 * Set NFS' b_validoff and b_validend fields from the valid bits 1966 * of a page. If the consumer is not NFS, and the page is not 1967 * valid for the entire range, clear the B_CACHE flag to force 1968 * the consumer to re-read the page. 1969 */ 1970static void 1971vfs_buf_set_valid(struct buf *bp, 1972 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 1973 vm_page_t m) 1974{ 1975 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 1976 vm_offset_t svalid, evalid; 1977 int validbits = m->valid; 1978 1979 /* 1980 * This only bothers with the first valid range in the 1981 * page. 1982 */ 1983 svalid = off; 1984 while (validbits && !(validbits & 1)) { 1985 svalid += DEV_BSIZE; 1986 validbits >>= 1; 1987 } 1988 evalid = svalid; 1989 while (validbits & 1) { 1990 evalid += DEV_BSIZE; 1991 validbits >>= 1; 1992 } 1993 /* 1994 * Make sure this range is contiguous with the range 1995 * built up from previous pages. If not, then we will 1996 * just use the range from the previous pages. 1997 */ 1998 if (svalid == bp->b_validend) { 1999 bp->b_validoff = min(bp->b_validoff, svalid); 2000 bp->b_validend = max(bp->b_validend, evalid); 2001 } 2002 } else if (!vm_page_is_valid(m, 2003 (vm_offset_t) ((foff + off) & PAGE_MASK), 2004 size)) { 2005 bp->b_flags &= ~B_CACHE; 2006 } 2007} 2008 2009/* 2010 * Set the valid bits in a page, taking care of the b_validoff, 2011 * b_validend fields which NFS uses to optimise small reads. Off is 2012 * the offset within the file and pageno is the page index within the buf. 2013 */ 2014static void 2015vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2016{ 2017 struct vnode *vp = bp->b_vp; 2018 vm_ooffset_t soff, eoff; 2019 2020 soff = off; 2021 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2022 vm_page_set_invalid(m, 2023 (vm_offset_t) (soff & PAGE_MASK), 2024 (vm_offset_t) (eoff - soff)); 2025 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2026 vm_ooffset_t sv, ev; 2027 off = off - pageno * PAGE_SIZE; 2028 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2029 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 2030 soff = max(sv, soff); 2031 eoff = min(ev, eoff); 2032 } 2033 if (eoff > soff) 2034 vm_page_set_validclean(m, 2035 (vm_offset_t) (soff & PAGE_MASK), 2036 (vm_offset_t) (eoff - soff)); 2037} 2038 2039/* 2040 * This routine is called before a device strategy routine. 2041 * It is used to tell the VM system that paging I/O is in 2042 * progress, and treat the pages associated with the buffer 2043 * almost as being PG_BUSY. Also the object paging_in_progress 2044 * flag is handled to make sure that the object doesn't become 2045 * inconsistant. 2046 */ 2047void 2048vfs_busy_pages(struct buf * bp, int clear_modify) 2049{ 2050 int i; 2051 2052 if (bp->b_flags & B_VMIO) { 2053 struct vnode *vp = bp->b_vp; 2054 vm_object_t obj = vp->v_object; 2055 vm_ooffset_t foff; 2056 2057 if (vp->v_type == VBLK) 2058 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2059 else 2060 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2061 vfs_setdirty(bp); 2062 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2063 vm_page_t m = bp->b_pages[i]; 2064 2065 if ((bp->b_flags & B_CLUSTER) == 0) { 2066 obj->paging_in_progress++; 2067 m->busy++; 2068 } 2069 vm_page_protect(m, VM_PROT_NONE); 2070 if (clear_modify) 2071 vfs_page_set_valid(bp, foff, i, m); 2072 else if (bp->b_bcount >= PAGE_SIZE) { 2073 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2074 bp->b_pages[i] = bogus_page; 2075 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2076 } 2077 } 2078 } 2079 } 2080} 2081 2082/* 2083 * Tell the VM system that the pages associated with this buffer 2084 * are clean. This is used for delayed writes where the data is 2085 * going to go to disk eventually without additional VM intevention. 2086 */ 2087void 2088vfs_clean_pages(struct buf * bp) 2089{ 2090 int i; 2091 2092 if (bp->b_flags & B_VMIO) { 2093 struct vnode *vp = bp->b_vp; 2094 vm_object_t obj = vp->v_object; 2095 vm_ooffset_t foff; 2096 2097 if (vp->v_type == VBLK) 2098 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2099 else 2100 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2101 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2102 vm_page_t m = bp->b_pages[i]; 2103 2104 vfs_page_set_valid(bp, foff, i, m); 2105 } 2106 } 2107} 2108 2109void 2110vfs_bio_clrbuf(struct buf *bp) { 2111 int i; 2112 if( bp->b_flags & B_VMIO) { 2113 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2114 int mask; 2115 mask = 0; 2116 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2117 mask |= (1 << (i/DEV_BSIZE)); 2118 if( bp->b_pages[0]->valid != mask) { 2119 bzero(bp->b_data, bp->b_bufsize); 2120 } 2121 bp->b_pages[0]->valid = mask; 2122 bp->b_resid = 0; 2123 return; 2124 } 2125 for(i=0;i<bp->b_npages;i++) { 2126 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2127 continue; 2128 if( bp->b_pages[i]->valid == 0) { 2129 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2130 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2131 } 2132 } else { 2133 int j; 2134 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2135 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 2136 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2137 } 2138 } 2139 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 2140 } 2141 bp->b_resid = 0; 2142 } else { 2143 clrbuf(bp); 2144 } 2145} 2146 2147/* 2148 * vm_hold_load_pages and vm_hold_unload pages get pages into 2149 * a buffers address space. The pages are anonymous and are 2150 * not associated with a file object. 2151 */ 2152void 2153vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2154{ 2155 vm_offset_t pg; 2156 vm_page_t p; 2157 int index; 2158 2159 to = round_page(to); 2160 from = round_page(from); 2161 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2162 2163 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2164 2165tryagain: 2166 2167 p = vm_page_alloc(kernel_object, 2168 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2169 VM_ALLOC_NORMAL); 2170 if (!p) { 2171 VM_WAIT; 2172 goto tryagain; 2173 } 2174 vm_page_wire(p); 2175 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2176 bp->b_pages[index] = p; 2177 PAGE_WAKEUP(p); 2178 } 2179 bp->b_npages = index; 2180} 2181 2182void 2183vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2184{ 2185 vm_offset_t pg; 2186 vm_page_t p; 2187 int index, newnpages; 2188 2189 from = round_page(from); 2190 to = round_page(to); 2191 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2192 2193 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2194 p = bp->b_pages[index]; 2195 if (p && (index < bp->b_npages)) { 2196#if !defined(MAX_PERF) 2197 if (p->busy) { 2198 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2199 bp->b_blkno, bp->b_lblkno); 2200 } 2201#endif 2202 bp->b_pages[index] = NULL; 2203 pmap_kremove(pg); 2204 vm_page_unwire(p); 2205 vm_page_free(p); 2206 } 2207 } 2208 bp->b_npages = newnpages; 2209} 2210 2211 2212#include "opt_ddb.h" 2213#ifdef DDB 2214#include <ddb/ddb.h> 2215 2216DB_SHOW_COMMAND(buffer, db_show_buffer) 2217{ 2218 /* get args */ 2219 struct buf *bp = (struct buf *)addr; 2220 2221 if (!have_addr) { 2222 db_printf("usage: show buffer <addr>\n"); 2223 return; 2224 } 2225 2226 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2227 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2228 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2229 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2230 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2231 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2232 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2233 "b_resid = %ld\nb_dev = 0x%x, b_un.b_addr = %p, " 2234 "b_blkno = %d, b_pblkno = %d\n", 2235 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2236 bp->b_dev, bp->b_un.b_addr, bp->b_blkno, bp->b_pblkno); 2237 if (bp->b_npages) { 2238 int i; 2239 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2240 for (i = 0; i < bp->b_npages; i++) { 2241 vm_page_t m; 2242 m = bp->b_pages[i]; 2243 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2244 VM_PAGE_TO_PHYS(m)); 2245 if ((i + 1) < bp->b_npages) 2246 db_printf(","); 2247 } 2248 db_printf("\n"); 2249 } 2250} 2251#endif /* DDB */ 2252