vfs_bio.c revision 31493
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.136 1997/12/01 19:04:00 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <vm/vm_map.h> 55#include <sys/buf.h> 56#include <sys/mount.h> 57#include <sys/malloc.h> 58#include <sys/resourcevar.h> 59 60static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 80 vm_offset_t off, vm_offset_t size, 81 vm_page_t m); 82static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 83 int pageno, vm_page_t m); 84static void vfs_clean_pages(struct buf * bp); 85static void vfs_setdirty(struct buf *bp); 86static void vfs_vmio_release(struct buf *bp); 87static void flushdirtybuffers(int slpflag, int slptimeo); 88 89int needsbuffer; 90 91/* 92 * Internal update daemon, process 3 93 * The variable vfs_update_wakeup allows for internal syncs. 94 */ 95int vfs_update_wakeup; 96 97 98/* 99 * buffers base kva 100 */ 101 102/* 103 * bogus page -- for I/O to/from partially complete buffers 104 * this is a temporary solution to the problem, but it is not 105 * really that bad. it would be better to split the buffer 106 * for input in the case of buffers partially already in memory, 107 * but the code is intricate enough already. 108 */ 109vm_page_t bogus_page; 110static vm_offset_t bogus_offset; 111 112static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 113 bufmallocspace, maxbufmallocspace; 114int numdirtybuffers, lodirtybuffers, hidirtybuffers; 115static int numfreebuffers, lofreebuffers, hifreebuffers; 116 117SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 118 &numdirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 120 &lodirtybuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 122 &hidirtybuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 124 &numfreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 126 &lofreebuffers, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 128 &hifreebuffers, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 130 &maxbufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 132 &bufspace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 134 &maxvmiobufspace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 136 &vmiospace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 138 &maxbufmallocspace, 0, ""); 139SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 140 &bufmallocspace, 0, ""); 141 142static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 143static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 144 145extern int vm_swap_size; 146 147#define BUF_MAXUSE 24 148 149#define VFS_BIO_NEED_ANY 1 150#define VFS_BIO_NEED_LOWLIMIT 2 151#define VFS_BIO_NEED_FREE 4 152 153/* 154 * Initialize buffer headers and related structures. 155 */ 156void 157bufinit() 158{ 159 struct buf *bp; 160 int i; 161 162 TAILQ_INIT(&bswlist); 163 LIST_INIT(&invalhash); 164 165 /* first, make a null hash table */ 166 for (i = 0; i < BUFHSZ; i++) 167 LIST_INIT(&bufhashtbl[i]); 168 169 /* next, make a null set of free lists */ 170 for (i = 0; i < BUFFER_QUEUES; i++) 171 TAILQ_INIT(&bufqueues[i]); 172 173 /* finally, initialize each buffer header and stick on empty q */ 174 for (i = 0; i < nbuf; i++) { 175 bp = &buf[i]; 176 bzero(bp, sizeof *bp); 177 bp->b_flags = B_INVAL; /* we're just an empty header */ 178 bp->b_dev = NODEV; 179 bp->b_rcred = NOCRED; 180 bp->b_wcred = NOCRED; 181 bp->b_qindex = QUEUE_EMPTY; 182 bp->b_vnbufs.le_next = NOLIST; 183 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 184 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 185 } 186/* 187 * maxbufspace is currently calculated to support all filesystem blocks 188 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 189 * cache is still the same as it would be for 8K filesystems. This 190 * keeps the size of the buffer cache "in check" for big block filesystems. 191 */ 192 maxbufspace = (nbuf + 8) * DFLTBSIZE; 193/* 194 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 195 */ 196 maxvmiobufspace = 2 * maxbufspace / 3; 197/* 198 * Limit the amount of malloc memory since it is wired permanently into 199 * the kernel space. Even though this is accounted for in the buffer 200 * allocation, we don't want the malloced region to grow uncontrolled. 201 * The malloc scheme improves memory utilization significantly on average 202 * (small) directories. 203 */ 204 maxbufmallocspace = maxbufspace / 20; 205 206/* 207 * Remove the probability of deadlock conditions by limiting the 208 * number of dirty buffers. 209 */ 210 hidirtybuffers = nbuf / 6 + 20; 211 lodirtybuffers = nbuf / 12 + 10; 212 numdirtybuffers = 0; 213 lofreebuffers = nbuf / 18 + 5; 214 hifreebuffers = 2 * lofreebuffers; 215 numfreebuffers = nbuf; 216 217 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 218 bogus_page = vm_page_alloc(kernel_object, 219 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 220 VM_ALLOC_NORMAL); 221 222} 223 224/* 225 * Free the kva allocation for a buffer 226 * Must be called only at splbio or higher, 227 * as this is the only locking for buffer_map. 228 */ 229static void 230bfreekva(struct buf * bp) 231{ 232 if (bp->b_kvasize == 0) 233 return; 234 235 vm_map_delete(buffer_map, 236 (vm_offset_t) bp->b_kvabase, 237 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 238 239 bp->b_kvasize = 0; 240 241} 242 243/* 244 * remove the buffer from the appropriate free list 245 */ 246void 247bremfree(struct buf * bp) 248{ 249 int s = splbio(); 250 251 if (bp->b_qindex != QUEUE_NONE) { 252 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 253 bp->b_qindex = QUEUE_NONE; 254 } else { 255#if !defined(MAX_PERF) 256 panic("bremfree: removing a buffer when not on a queue"); 257#endif 258 } 259 if ((bp->b_flags & B_INVAL) || 260 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 261 --numfreebuffers; 262 splx(s); 263} 264 265/* 266 * Get a buffer with the specified data. Look in the cache first. 267 */ 268int 269bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 270 struct buf ** bpp) 271{ 272 struct buf *bp; 273 274 bp = getblk(vp, blkno, size, 0, 0); 275 *bpp = bp; 276 277 /* if not found in cache, do some I/O */ 278 if ((bp->b_flags & B_CACHE) == 0) { 279 if (curproc != NULL) 280 curproc->p_stats->p_ru.ru_inblock++; 281 bp->b_flags |= B_READ; 282 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 283 if (bp->b_rcred == NOCRED) { 284 if (cred != NOCRED) 285 crhold(cred); 286 bp->b_rcred = cred; 287 } 288 vfs_busy_pages(bp, 0); 289 VOP_STRATEGY(bp); 290 return (biowait(bp)); 291 } 292 return (0); 293} 294 295/* 296 * Operates like bread, but also starts asynchronous I/O on 297 * read-ahead blocks. 298 */ 299int 300breadn(struct vnode * vp, daddr_t blkno, int size, 301 daddr_t * rablkno, int *rabsize, 302 int cnt, struct ucred * cred, struct buf ** bpp) 303{ 304 struct buf *bp, *rabp; 305 int i; 306 int rv = 0, readwait = 0; 307 308 *bpp = bp = getblk(vp, blkno, size, 0, 0); 309 310 /* if not found in cache, do some I/O */ 311 if ((bp->b_flags & B_CACHE) == 0) { 312 if (curproc != NULL) 313 curproc->p_stats->p_ru.ru_inblock++; 314 bp->b_flags |= B_READ; 315 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 316 if (bp->b_rcred == NOCRED) { 317 if (cred != NOCRED) 318 crhold(cred); 319 bp->b_rcred = cred; 320 } 321 vfs_busy_pages(bp, 0); 322 VOP_STRATEGY(bp); 323 ++readwait; 324 } 325 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 326 if (inmem(vp, *rablkno)) 327 continue; 328 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 329 330 if ((rabp->b_flags & B_CACHE) == 0) { 331 if (curproc != NULL) 332 curproc->p_stats->p_ru.ru_inblock++; 333 rabp->b_flags |= B_READ | B_ASYNC; 334 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 335 if (rabp->b_rcred == NOCRED) { 336 if (cred != NOCRED) 337 crhold(cred); 338 rabp->b_rcred = cred; 339 } 340 vfs_busy_pages(rabp, 0); 341 VOP_STRATEGY(rabp); 342 } else { 343 brelse(rabp); 344 } 345 } 346 347 if (readwait) { 348 rv = biowait(bp); 349 } 350 return (rv); 351} 352 353/* 354 * Write, release buffer on completion. (Done by iodone 355 * if async.) 356 */ 357int 358bwrite(struct buf * bp) 359{ 360 int oldflags = bp->b_flags; 361 362 if (bp->b_flags & B_INVAL) { 363 brelse(bp); 364 return (0); 365 } 366#if !defined(MAX_PERF) 367 if (!(bp->b_flags & B_BUSY)) 368 panic("bwrite: buffer is not busy???"); 369#endif 370 371 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 372 bp->b_flags |= B_WRITEINPROG; 373 374 if ((oldflags & B_DELWRI) == B_DELWRI) { 375 --numdirtybuffers; 376 reassignbuf(bp, bp->b_vp); 377 } 378 379 bp->b_vp->v_numoutput++; 380 vfs_busy_pages(bp, 1); 381 if (curproc != NULL) 382 curproc->p_stats->p_ru.ru_oublock++; 383 VOP_STRATEGY(bp); 384 385 if ((oldflags & B_ASYNC) == 0) { 386 int rtval = biowait(bp); 387 388 if (oldflags & B_DELWRI) { 389 reassignbuf(bp, bp->b_vp); 390 } 391 brelse(bp); 392 return (rtval); 393 } 394 return (0); 395} 396 397void 398vfs_bio_need_satisfy(void) { 399 ++numfreebuffers; 400 if (!needsbuffer) 401 return; 402 if (numdirtybuffers < lodirtybuffers) { 403 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 404 } else { 405 needsbuffer &= ~VFS_BIO_NEED_ANY; 406 } 407 if (numfreebuffers >= hifreebuffers) { 408 needsbuffer &= ~VFS_BIO_NEED_FREE; 409 } 410 wakeup(&needsbuffer); 411} 412 413/* 414 * Delayed write. (Buffer is marked dirty). 415 */ 416void 417bdwrite(struct buf * bp) 418{ 419 420#if !defined(MAX_PERF) 421 if ((bp->b_flags & B_BUSY) == 0) { 422 panic("bdwrite: buffer is not busy"); 423 } 424#endif 425 426 if (bp->b_flags & B_INVAL) { 427 brelse(bp); 428 return; 429 } 430 if (bp->b_flags & B_TAPE) { 431 bawrite(bp); 432 return; 433 } 434 bp->b_flags &= ~(B_READ|B_RELBUF); 435 if ((bp->b_flags & B_DELWRI) == 0) { 436 bp->b_flags |= B_DONE | B_DELWRI; 437 reassignbuf(bp, bp->b_vp); 438 ++numdirtybuffers; 439 } 440 441 /* 442 * This bmap keeps the system from needing to do the bmap later, 443 * perhaps when the system is attempting to do a sync. Since it 444 * is likely that the indirect block -- or whatever other datastructure 445 * that the filesystem needs is still in memory now, it is a good 446 * thing to do this. Note also, that if the pageout daemon is 447 * requesting a sync -- there might not be enough memory to do 448 * the bmap then... So, this is important to do. 449 */ 450 if (bp->b_lblkno == bp->b_blkno) { 451 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 452 } 453 454 /* 455 * Set the *dirty* buffer range based upon the VM system dirty pages. 456 */ 457 vfs_setdirty(bp); 458 459 /* 460 * We need to do this here to satisfy the vnode_pager and the 461 * pageout daemon, so that it thinks that the pages have been 462 * "cleaned". Note that since the pages are in a delayed write 463 * buffer -- the VFS layer "will" see that the pages get written 464 * out on the next sync, or perhaps the cluster will be completed. 465 */ 466 vfs_clean_pages(bp); 467 bqrelse(bp); 468 469 if (numdirtybuffers >= hidirtybuffers) 470 flushdirtybuffers(0, 0); 471 472 return; 473} 474 475/* 476 * Asynchronous write. 477 * Start output on a buffer, but do not wait for it to complete. 478 * The buffer is released when the output completes. 479 */ 480void 481bawrite(struct buf * bp) 482{ 483 bp->b_flags |= B_ASYNC; 484 (void) VOP_BWRITE(bp); 485} 486 487/* 488 * Ordered write. 489 * Start output on a buffer, but only wait for it to complete if the 490 * output device cannot guarantee ordering in some other way. Devices 491 * that can perform asynchronous ordered writes will set the B_ASYNC 492 * flag in their strategy routine. 493 * The buffer is released when the output completes. 494 */ 495int 496bowrite(struct buf * bp) 497{ 498 /* 499 * XXX Add in B_ASYNC once the SCSI 500 * layer can deal with ordered 501 * writes properly. 502 */ 503 bp->b_flags |= B_ORDERED; 504 return (VOP_BWRITE(bp)); 505} 506 507/* 508 * Release a buffer. 509 */ 510void 511brelse(struct buf * bp) 512{ 513 int s; 514 515 if (bp->b_flags & B_CLUSTER) { 516 relpbuf(bp); 517 return; 518 } 519 /* anyone need a "free" block? */ 520 s = splbio(); 521 522 /* anyone need this block? */ 523 if (bp->b_flags & B_WANTED) { 524 bp->b_flags &= ~(B_WANTED | B_AGE); 525 wakeup(bp); 526 } 527 528 if (bp->b_flags & B_LOCKED) 529 bp->b_flags &= ~B_ERROR; 530 531 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 532 (bp->b_bufsize <= 0)) { 533 bp->b_flags |= B_INVAL; 534 if (bp->b_flags & B_DELWRI) 535 --numdirtybuffers; 536 bp->b_flags &= ~(B_DELWRI | B_CACHE); 537 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 538 if (bp->b_bufsize) 539 allocbuf(bp, 0); 540 brelvp(bp); 541 } 542 } 543 544 /* 545 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 546 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 547 * but the VM object is kept around. The B_NOCACHE flag is used to 548 * invalidate the pages in the VM object. 549 * 550 * If the buffer is a partially filled NFS buffer, keep it 551 * since invalidating it now will lose informatio. The valid 552 * flags in the vm_pages have only DEV_BSIZE resolution but 553 * the b_validoff, b_validend fields have byte resolution. 554 * This can avoid unnecessary re-reads of the buffer. 555 * XXX this seems to cause performance problems. 556 */ 557 if ((bp->b_flags & B_VMIO) 558 && !(bp->b_vp->v_tag == VT_NFS && 559 bp->b_vp->v_type != VBLK && 560 (bp->b_flags & B_DELWRI) != 0) 561#ifdef notdef 562 && (bp->b_vp->v_tag != VT_NFS 563 || bp->b_vp->v_type == VBLK 564 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 565 || bp->b_validend == 0 566 || (bp->b_validoff == 0 567 && bp->b_validend == bp->b_bufsize)) 568#endif 569 ) { 570 vm_ooffset_t foff; 571 vm_object_t obj; 572 int i, resid; 573 vm_page_t m; 574 struct vnode *vp; 575 int iototal = bp->b_bufsize; 576 577 vp = bp->b_vp; 578 579#if !defined(MAX_PERF) 580 if (!vp) 581 panic("brelse: missing vp"); 582#endif 583 584 if (bp->b_npages) { 585 vm_pindex_t poff; 586 obj = (vm_object_t) vp->v_object; 587 if (vp->v_type == VBLK) 588 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 589 else 590 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 591 poff = OFF_TO_IDX(foff); 592 for (i = 0; i < bp->b_npages; i++) { 593 m = bp->b_pages[i]; 594 if (m == bogus_page) { 595 m = vm_page_lookup(obj, poff + i); 596#if !defined(MAX_PERF) 597 if (!m) { 598 panic("brelse: page missing\n"); 599 } 600#endif 601 bp->b_pages[i] = m; 602 pmap_qenter(trunc_page(bp->b_data), 603 bp->b_pages, bp->b_npages); 604 } 605 resid = IDX_TO_OFF(m->pindex+1) - foff; 606 if (resid > iototal) 607 resid = iototal; 608 if (resid > 0) { 609 /* 610 * Don't invalidate the page if the local machine has already 611 * modified it. This is the lesser of two evils, and should 612 * be fixed. 613 */ 614 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 615 vm_page_test_dirty(m); 616 if (m->dirty == 0) { 617 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 618 if (m->valid == 0) 619 vm_page_protect(m, VM_PROT_NONE); 620 } 621 } 622 if (resid >= PAGE_SIZE) { 623 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 624 bp->b_flags |= B_INVAL; 625 } 626 } else { 627 if (!vm_page_is_valid(m, 628 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 629 bp->b_flags |= B_INVAL; 630 } 631 } 632 } 633 foff += resid; 634 iototal -= resid; 635 } 636 } 637 if (bp->b_flags & (B_INVAL | B_RELBUF)) 638 vfs_vmio_release(bp); 639 } 640#if !defined(MAX_PERF) 641 if (bp->b_qindex != QUEUE_NONE) 642 panic("brelse: free buffer onto another queue???"); 643#endif 644 645 /* enqueue */ 646 /* buffers with no memory */ 647 if (bp->b_bufsize == 0) { 648 bp->b_flags |= B_INVAL; 649 bp->b_qindex = QUEUE_EMPTY; 650 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 651 LIST_REMOVE(bp, b_hash); 652 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 653 bp->b_dev = NODEV; 654 655 /* buffers with junk contents */ 656 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 657 bp->b_flags |= B_INVAL; 658 bp->b_qindex = QUEUE_AGE; 659 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 660 LIST_REMOVE(bp, b_hash); 661 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 662 bp->b_dev = NODEV; 663 664 /* buffers that are locked */ 665 } else if (bp->b_flags & B_LOCKED) { 666 bp->b_qindex = QUEUE_LOCKED; 667 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 668 669 /* buffers with stale but valid contents */ 670 } else if (bp->b_flags & B_AGE) { 671 bp->b_qindex = QUEUE_AGE; 672 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 673 674 /* buffers with valid and quite potentially reuseable contents */ 675 } else { 676 bp->b_qindex = QUEUE_LRU; 677 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 678 } 679 680 if ((bp->b_flags & B_INVAL) || 681 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 682 if (bp->b_flags & B_DELWRI) { 683 --numdirtybuffers; 684 bp->b_flags &= ~B_DELWRI; 685 } 686 vfs_bio_need_satisfy(); 687 } 688 689 /* unlock */ 690 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 691 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 692 splx(s); 693} 694 695/* 696 * Release a buffer. 697 */ 698void 699bqrelse(struct buf * bp) 700{ 701 int s; 702 703 s = splbio(); 704 705 /* anyone need this block? */ 706 if (bp->b_flags & B_WANTED) { 707 bp->b_flags &= ~(B_WANTED | B_AGE); 708 wakeup(bp); 709 } 710 711#if !defined(MAX_PERF) 712 if (bp->b_qindex != QUEUE_NONE) 713 panic("bqrelse: free buffer onto another queue???"); 714#endif 715 716 if (bp->b_flags & B_LOCKED) { 717 bp->b_flags &= ~B_ERROR; 718 bp->b_qindex = QUEUE_LOCKED; 719 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 720 /* buffers with stale but valid contents */ 721 } else { 722 bp->b_qindex = QUEUE_LRU; 723 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 724 } 725 726 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 727 vfs_bio_need_satisfy(); 728 } 729 730 /* unlock */ 731 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 732 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 733 splx(s); 734} 735 736static void 737vfs_vmio_release(bp) 738 struct buf *bp; 739{ 740 int i; 741 vm_page_t m; 742 743 for (i = 0; i < bp->b_npages; i++) { 744 m = bp->b_pages[i]; 745 bp->b_pages[i] = NULL; 746 vm_page_unwire(m); 747 /* 748 * We don't mess with busy pages, it is 749 * the responsibility of the process that 750 * busied the pages to deal with them. 751 */ 752 if ((m->flags & PG_BUSY) || (m->busy != 0)) 753 continue; 754 755 if (m->wire_count == 0) { 756 757 if (m->flags & PG_WANTED) { 758 m->flags &= ~PG_WANTED; 759 wakeup(m); 760 } 761 762 /* 763 * If this is an async free -- we cannot place 764 * pages onto the cache queue. If it is an 765 * async free, then we don't modify any queues. 766 * This is probably in error (for perf reasons), 767 * and we will eventually need to build 768 * a more complete infrastructure to support I/O 769 * rundown. 770 */ 771 if ((bp->b_flags & B_ASYNC) == 0) { 772 773 /* 774 * In the case of sync buffer frees, we can do pretty much 775 * anything to any of the memory queues. Specifically, 776 * the cache queue is okay to be modified. 777 */ 778 if (m->valid) { 779 if(m->dirty == 0) 780 vm_page_test_dirty(m); 781 /* 782 * this keeps pressure off of the process memory 783 */ 784 if (m->dirty == 0 && m->hold_count == 0) 785 vm_page_cache(m); 786 else 787 vm_page_deactivate(m); 788 } else if (m->hold_count == 0) { 789 vm_page_protect(m, VM_PROT_NONE); 790 vm_page_free(m); 791 } 792 } else { 793 /* 794 * If async, then at least we clear the 795 * act_count. 796 */ 797 m->act_count = 0; 798 } 799 } 800 } 801 bufspace -= bp->b_bufsize; 802 vmiospace -= bp->b_bufsize; 803 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 804 bp->b_npages = 0; 805 bp->b_bufsize = 0; 806 bp->b_flags &= ~B_VMIO; 807 if (bp->b_vp) 808 brelvp(bp); 809} 810 811/* 812 * Check to see if a block is currently memory resident. 813 */ 814struct buf * 815gbincore(struct vnode * vp, daddr_t blkno) 816{ 817 struct buf *bp; 818 struct bufhashhdr *bh; 819 820 bh = BUFHASH(vp, blkno); 821 bp = bh->lh_first; 822 823 /* Search hash chain */ 824 while (bp != NULL) { 825 /* hit */ 826 if (bp->b_vp == vp && bp->b_lblkno == blkno && 827 (bp->b_flags & B_INVAL) == 0) { 828 break; 829 } 830 bp = bp->b_hash.le_next; 831 } 832 return (bp); 833} 834 835/* 836 * this routine implements clustered async writes for 837 * clearing out B_DELWRI buffers... This is much better 838 * than the old way of writing only one buffer at a time. 839 */ 840int 841vfs_bio_awrite(struct buf * bp) 842{ 843 int i; 844 daddr_t lblkno = bp->b_lblkno; 845 struct vnode *vp = bp->b_vp; 846 int s; 847 int ncl; 848 struct buf *bpa; 849 int nwritten; 850 851 s = splbio(); 852 /* 853 * right now we support clustered writing only to regular files 854 */ 855 if ((vp->v_type == VREG) && 856 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 857 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 858 int size; 859 int maxcl; 860 861 size = vp->v_mount->mnt_stat.f_iosize; 862 maxcl = MAXPHYS / size; 863 864 for (i = 1; i < maxcl; i++) { 865 if ((bpa = gbincore(vp, lblkno + i)) && 866 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 867 (B_DELWRI | B_CLUSTEROK)) && 868 (bpa->b_bufsize == size)) { 869 if ((bpa->b_blkno == bpa->b_lblkno) || 870 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 871 break; 872 } else { 873 break; 874 } 875 } 876 ncl = i; 877 /* 878 * this is a possible cluster write 879 */ 880 if (ncl != 1) { 881 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 882 splx(s); 883 return nwritten; 884 } 885 } 886 bremfree(bp); 887 splx(s); 888 /* 889 * default (old) behavior, writing out only one block 890 */ 891 bp->b_flags |= B_BUSY | B_ASYNC; 892 nwritten = bp->b_bufsize; 893 (void) VOP_BWRITE(bp); 894 return nwritten; 895} 896 897 898/* 899 * Find a buffer header which is available for use. 900 */ 901static struct buf * 902getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize) 903{ 904 struct buf *bp, *bp1; 905 int nbyteswritten = 0; 906 vm_offset_t addr; 907 static int writerecursion = 0; 908 909start: 910 if (bufspace >= maxbufspace) 911 goto trytofreespace; 912 913 /* can we constitute a new buffer? */ 914 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 915#if !defined(MAX_PERF) 916 if (bp->b_qindex != QUEUE_EMPTY) 917 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 918 bp->b_qindex); 919#endif 920 bp->b_flags |= B_BUSY; 921 bremfree(bp); 922 goto fillbuf; 923 } 924trytofreespace: 925 /* 926 * We keep the file I/O from hogging metadata I/O 927 * This is desirable because file data is cached in the 928 * VM/Buffer cache even if a buffer is freed. 929 */ 930 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 931#if !defined(MAX_PERF) 932 if (bp->b_qindex != QUEUE_AGE) 933 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 934 bp->b_qindex); 935#endif 936 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 937#if !defined(MAX_PERF) 938 if (bp->b_qindex != QUEUE_LRU) 939 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 940 bp->b_qindex); 941#endif 942 } 943 if (!bp) { 944 /* wait for a free buffer of any kind */ 945 needsbuffer |= VFS_BIO_NEED_ANY; 946 do 947 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 948 slptimeo); 949 while (needsbuffer & VFS_BIO_NEED_ANY); 950 return (0); 951 } 952 953#if defined(DIAGNOSTIC) 954 if (bp->b_flags & B_BUSY) { 955 panic("getnewbuf: busy buffer on free list\n"); 956 } 957#endif 958 959 /* 960 * We are fairly aggressive about freeing VMIO buffers, but since 961 * the buffering is intact without buffer headers, there is not 962 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 963 */ 964 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 965 if ((bp->b_flags & B_VMIO) == 0 || 966 (vmiospace < maxvmiobufspace)) { 967 --bp->b_usecount; 968 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 969 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 970 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 971 goto start; 972 } 973 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 974 } 975 } 976 977 978 /* if we are a delayed write, convert to an async write */ 979 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 980 981 if (writerecursion > 0) { 982 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 983 while (bp) { 984 if ((bp->b_flags & B_DELWRI) == 0) 985 break; 986 bp = TAILQ_NEXT(bp, b_freelist); 987 } 988 if (bp == NULL) { 989 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 990 while (bp) { 991 if ((bp->b_flags & B_DELWRI) == 0) 992 break; 993 bp = TAILQ_NEXT(bp, b_freelist); 994 } 995 } 996 if (bp == NULL) 997 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 998 } else { 999 ++writerecursion; 1000 nbyteswritten += vfs_bio_awrite(bp); 1001 --writerecursion; 1002 if (!slpflag && !slptimeo) { 1003 return (0); 1004 } 1005 goto start; 1006 } 1007 } 1008 1009 if (bp->b_flags & B_WANTED) { 1010 bp->b_flags &= ~B_WANTED; 1011 wakeup(bp); 1012 } 1013 bremfree(bp); 1014 bp->b_flags |= B_BUSY; 1015 1016 if (bp->b_flags & B_VMIO) { 1017 bp->b_flags &= ~B_ASYNC; 1018 vfs_vmio_release(bp); 1019 } 1020 1021 if (bp->b_vp) 1022 brelvp(bp); 1023 1024fillbuf: 1025 /* we are not free, nor do we contain interesting data */ 1026 if (bp->b_rcred != NOCRED) { 1027 crfree(bp->b_rcred); 1028 bp->b_rcred = NOCRED; 1029 } 1030 if (bp->b_wcred != NOCRED) { 1031 crfree(bp->b_wcred); 1032 bp->b_wcred = NOCRED; 1033 } 1034 1035 LIST_REMOVE(bp, b_hash); 1036 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1037 if (bp->b_bufsize) { 1038 allocbuf(bp, 0); 1039 } 1040 bp->b_flags = B_BUSY; 1041 bp->b_dev = NODEV; 1042 bp->b_vp = NULL; 1043 bp->b_blkno = bp->b_lblkno = 0; 1044 bp->b_iodone = 0; 1045 bp->b_error = 0; 1046 bp->b_resid = 0; 1047 bp->b_bcount = 0; 1048 bp->b_npages = 0; 1049 bp->b_dirtyoff = bp->b_dirtyend = 0; 1050 bp->b_validoff = bp->b_validend = 0; 1051 bp->b_usecount = 4; 1052 1053 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1054 1055 /* 1056 * we assume that buffer_map is not at address 0 1057 */ 1058 addr = 0; 1059 if (maxsize != bp->b_kvasize) { 1060 bfreekva(bp); 1061 1062findkvaspace: 1063 /* 1064 * See if we have buffer kva space 1065 */ 1066 if (vm_map_findspace(buffer_map, 1067 vm_map_min(buffer_map), maxsize, &addr)) { 1068 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1069 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) 1070 if (bp1->b_kvasize != 0) { 1071 bremfree(bp1); 1072 bfreekva(bp1); 1073 brelse(bp1); 1074 goto findkvaspace; 1075 } 1076 bp->b_flags |= B_INVAL; 1077 brelse(bp); 1078 goto trytofreespace; 1079 } 1080 } 1081 1082 /* 1083 * See if we are below are allocated minimum 1084 */ 1085 if (bufspace >= (maxbufspace + nbyteswritten)) { 1086 bp->b_flags |= B_INVAL; 1087 brelse(bp); 1088 goto trytofreespace; 1089 } 1090 1091 /* 1092 * create a map entry for the buffer -- in essence 1093 * reserving the kva space. 1094 */ 1095 if (addr) { 1096 vm_map_insert(buffer_map, NULL, 0, 1097 addr, addr + maxsize, 1098 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1099 1100 bp->b_kvabase = (caddr_t) addr; 1101 bp->b_kvasize = maxsize; 1102 } 1103 bp->b_data = bp->b_kvabase; 1104 1105 return (bp); 1106} 1107 1108static void 1109waitfreebuffers(int slpflag, int slptimeo) { 1110 while (numfreebuffers < hifreebuffers) { 1111 flushdirtybuffers(slpflag, slptimeo); 1112 if (numfreebuffers < hifreebuffers) 1113 break; 1114 needsbuffer |= VFS_BIO_NEED_FREE; 1115 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo)) 1116 break; 1117 } 1118} 1119 1120static void 1121flushdirtybuffers(int slpflag, int slptimeo) { 1122 int s; 1123 static pid_t flushing = 0; 1124 1125 s = splbio(); 1126 1127 if (flushing) { 1128 if (flushing == curproc->p_pid) { 1129 splx(s); 1130 return; 1131 } 1132 while (flushing) { 1133 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) { 1134 splx(s); 1135 return; 1136 } 1137 } 1138 } 1139 flushing = curproc->p_pid; 1140 1141 while (numdirtybuffers > lodirtybuffers) { 1142 struct buf *bp; 1143 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1144 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1145 if (bp == NULL) 1146 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1147 1148 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1149 bp = TAILQ_NEXT(bp, b_freelist); 1150 } 1151 1152 if (bp) { 1153 splx(s); 1154 vfs_bio_awrite(bp); 1155 s = splbio(); 1156 continue; 1157 } 1158 break; 1159 } 1160 1161 flushing = 0; 1162 wakeup(&flushing); 1163 splx(s); 1164} 1165 1166/* 1167 * Check to see if a block is currently memory resident. 1168 */ 1169struct buf * 1170incore(struct vnode * vp, daddr_t blkno) 1171{ 1172 struct buf *bp; 1173 1174 int s = splbio(); 1175 bp = gbincore(vp, blkno); 1176 splx(s); 1177 return (bp); 1178} 1179 1180/* 1181 * Returns true if no I/O is needed to access the 1182 * associated VM object. This is like incore except 1183 * it also hunts around in the VM system for the data. 1184 */ 1185 1186int 1187inmem(struct vnode * vp, daddr_t blkno) 1188{ 1189 vm_object_t obj; 1190 vm_offset_t toff, tinc; 1191 vm_page_t m; 1192 vm_ooffset_t off; 1193 1194 if (incore(vp, blkno)) 1195 return 1; 1196 if (vp->v_mount == NULL) 1197 return 0; 1198 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 1199 return 0; 1200 1201 obj = vp->v_object; 1202 tinc = PAGE_SIZE; 1203 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1204 tinc = vp->v_mount->mnt_stat.f_iosize; 1205 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1206 1207 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1208 1209 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1210 if (!m) 1211 return 0; 1212 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1213 return 0; 1214 } 1215 return 1; 1216} 1217 1218/* 1219 * now we set the dirty range for the buffer -- 1220 * for NFS -- if the file is mapped and pages have 1221 * been written to, let it know. We want the 1222 * entire range of the buffer to be marked dirty if 1223 * any of the pages have been written to for consistancy 1224 * with the b_validoff, b_validend set in the nfs write 1225 * code, and used by the nfs read code. 1226 */ 1227static void 1228vfs_setdirty(struct buf *bp) { 1229 int i; 1230 vm_object_t object; 1231 vm_offset_t boffset, offset; 1232 /* 1233 * We qualify the scan for modified pages on whether the 1234 * object has been flushed yet. The OBJ_WRITEABLE flag 1235 * is not cleared simply by protecting pages off. 1236 */ 1237 if ((bp->b_flags & B_VMIO) && 1238 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1239 /* 1240 * test the pages to see if they have been modified directly 1241 * by users through the VM system. 1242 */ 1243 for (i = 0; i < bp->b_npages; i++) 1244 vm_page_test_dirty(bp->b_pages[i]); 1245 1246 /* 1247 * scan forwards for the first page modified 1248 */ 1249 for (i = 0; i < bp->b_npages; i++) { 1250 if (bp->b_pages[i]->dirty) { 1251 break; 1252 } 1253 } 1254 boffset = (i << PAGE_SHIFT); 1255 if (boffset < bp->b_dirtyoff) { 1256 bp->b_dirtyoff = boffset; 1257 } 1258 1259 /* 1260 * scan backwards for the last page modified 1261 */ 1262 for (i = bp->b_npages - 1; i >= 0; --i) { 1263 if (bp->b_pages[i]->dirty) { 1264 break; 1265 } 1266 } 1267 boffset = (i + 1); 1268 offset = boffset + bp->b_pages[0]->pindex; 1269 if (offset >= object->size) 1270 boffset = object->size - bp->b_pages[0]->pindex; 1271 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1272 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1273 } 1274} 1275 1276/* 1277 * Get a block given a specified block and offset into a file/device. 1278 */ 1279struct buf * 1280getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1281{ 1282 struct buf *bp; 1283 int s; 1284 struct bufhashhdr *bh; 1285 int maxsize; 1286 1287 if (vp->v_mount) { 1288 maxsize = vp->v_mount->mnt_stat.f_iosize; 1289 /* 1290 * This happens on mount points. 1291 */ 1292 if (maxsize < size) 1293 maxsize = size; 1294 } else { 1295 maxsize = size; 1296 } 1297 1298#if !defined(MAX_PERF) 1299 if (size > MAXBSIZE) 1300 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1301#endif 1302 1303 s = splbio(); 1304loop: 1305 if (numfreebuffers < lofreebuffers) { 1306 waitfreebuffers(slpflag, slptimeo); 1307 } 1308 1309 if ((bp = gbincore(vp, blkno))) { 1310 if (bp->b_flags & B_BUSY) { 1311 bp->b_flags |= B_WANTED; 1312 if (bp->b_usecount < BUF_MAXUSE) 1313 ++bp->b_usecount; 1314 if (!tsleep(bp, 1315 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1316 goto loop; 1317 1318 splx(s); 1319 return (struct buf *) NULL; 1320 } 1321 bp->b_flags |= B_BUSY | B_CACHE; 1322 bremfree(bp); 1323 1324 /* 1325 * check for size inconsistancies (note that they shouldn't 1326 * happen but do when filesystems don't handle the size changes 1327 * correctly.) We are conservative on metadata and don't just 1328 * extend the buffer but write and re-constitute it. 1329 */ 1330 1331 if (bp->b_bcount != size) { 1332 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1333 allocbuf(bp, size); 1334 } else { 1335 bp->b_flags |= B_NOCACHE; 1336 VOP_BWRITE(bp); 1337 goto loop; 1338 } 1339 } 1340 1341 if (bp->b_usecount < BUF_MAXUSE) 1342 ++bp->b_usecount; 1343 splx(s); 1344 return (bp); 1345 } else { 1346 vm_object_t obj; 1347 1348 if ((bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize)) == 0) { 1349 if (slpflag || slptimeo) { 1350 splx(s); 1351 return NULL; 1352 } 1353 goto loop; 1354 } 1355 1356 /* 1357 * This code is used to make sure that a buffer is not 1358 * created while the getnewbuf routine is blocked. 1359 * Normally the vnode is locked so this isn't a problem. 1360 * VBLK type I/O requests, however, don't lock the vnode. 1361 */ 1362 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1363 bp->b_flags |= B_INVAL; 1364 brelse(bp); 1365 goto loop; 1366 } 1367 1368 /* 1369 * Insert the buffer into the hash, so that it can 1370 * be found by incore. 1371 */ 1372 bp->b_blkno = bp->b_lblkno = blkno; 1373 bgetvp(vp, bp); 1374 LIST_REMOVE(bp, b_hash); 1375 bh = BUFHASH(vp, blkno); 1376 LIST_INSERT_HEAD(bh, bp, b_hash); 1377 1378 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1379 bp->b_flags |= (B_VMIO | B_CACHE); 1380#if defined(VFS_BIO_DEBUG) 1381 if (vp->v_type != VREG && vp->v_type != VBLK) 1382 printf("getblk: vmioing file type %d???\n", vp->v_type); 1383#endif 1384 } else { 1385 bp->b_flags &= ~B_VMIO; 1386 } 1387 splx(s); 1388 1389 allocbuf(bp, size); 1390#ifdef PC98 1391 /* 1392 * 1024byte/sector support 1393 */ 1394#define B_XXX2 0x8000000 1395 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1396#endif 1397 return (bp); 1398 } 1399} 1400 1401/* 1402 * Get an empty, disassociated buffer of given size. 1403 */ 1404struct buf * 1405geteblk(int size) 1406{ 1407 struct buf *bp; 1408 int s; 1409 1410 s = splbio(); 1411 while ((bp = getnewbuf(0, 0, 0, size, MAXBSIZE)) == 0); 1412 splx(s); 1413 allocbuf(bp, size); 1414 bp->b_flags |= B_INVAL; 1415 return (bp); 1416} 1417 1418 1419/* 1420 * This code constitutes the buffer memory from either anonymous system 1421 * memory (in the case of non-VMIO operations) or from an associated 1422 * VM object (in the case of VMIO operations). 1423 * 1424 * Note that this code is tricky, and has many complications to resolve 1425 * deadlock or inconsistant data situations. Tread lightly!!! 1426 * 1427 * Modify the length of a buffer's underlying buffer storage without 1428 * destroying information (unless, of course the buffer is shrinking). 1429 */ 1430int 1431allocbuf(struct buf * bp, int size) 1432{ 1433 1434 int s; 1435 int newbsize, mbsize; 1436 int i; 1437 1438#if !defined(MAX_PERF) 1439 if (!(bp->b_flags & B_BUSY)) 1440 panic("allocbuf: buffer not busy"); 1441 1442 if (bp->b_kvasize < size) 1443 panic("allocbuf: buffer too small"); 1444#endif 1445 1446 if ((bp->b_flags & B_VMIO) == 0) { 1447 caddr_t origbuf; 1448 int origbufsize; 1449 /* 1450 * Just get anonymous memory from the kernel 1451 */ 1452 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1453#if !defined(NO_B_MALLOC) 1454 if (bp->b_flags & B_MALLOC) 1455 newbsize = mbsize; 1456 else 1457#endif 1458 newbsize = round_page(size); 1459 1460 if (newbsize < bp->b_bufsize) { 1461#if !defined(NO_B_MALLOC) 1462 /* 1463 * malloced buffers are not shrunk 1464 */ 1465 if (bp->b_flags & B_MALLOC) { 1466 if (newbsize) { 1467 bp->b_bcount = size; 1468 } else { 1469 free(bp->b_data, M_BIOBUF); 1470 bufspace -= bp->b_bufsize; 1471 bufmallocspace -= bp->b_bufsize; 1472 bp->b_data = bp->b_kvabase; 1473 bp->b_bufsize = 0; 1474 bp->b_bcount = 0; 1475 bp->b_flags &= ~B_MALLOC; 1476 } 1477 return 1; 1478 } 1479#endif 1480 vm_hold_free_pages( 1481 bp, 1482 (vm_offset_t) bp->b_data + newbsize, 1483 (vm_offset_t) bp->b_data + bp->b_bufsize); 1484 } else if (newbsize > bp->b_bufsize) { 1485#if !defined(NO_B_MALLOC) 1486 /* 1487 * We only use malloced memory on the first allocation. 1488 * and revert to page-allocated memory when the buffer grows. 1489 */ 1490 if ( (bufmallocspace < maxbufmallocspace) && 1491 (bp->b_bufsize == 0) && 1492 (mbsize <= PAGE_SIZE/2)) { 1493 1494 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1495 bp->b_bufsize = mbsize; 1496 bp->b_bcount = size; 1497 bp->b_flags |= B_MALLOC; 1498 bufspace += mbsize; 1499 bufmallocspace += mbsize; 1500 return 1; 1501 } 1502#endif 1503 origbuf = NULL; 1504 origbufsize = 0; 1505#if !defined(NO_B_MALLOC) 1506 /* 1507 * If the buffer is growing on it's other-than-first allocation, 1508 * then we revert to the page-allocation scheme. 1509 */ 1510 if (bp->b_flags & B_MALLOC) { 1511 origbuf = bp->b_data; 1512 origbufsize = bp->b_bufsize; 1513 bp->b_data = bp->b_kvabase; 1514 bufspace -= bp->b_bufsize; 1515 bufmallocspace -= bp->b_bufsize; 1516 bp->b_bufsize = 0; 1517 bp->b_flags &= ~B_MALLOC; 1518 newbsize = round_page(newbsize); 1519 } 1520#endif 1521 vm_hold_load_pages( 1522 bp, 1523 (vm_offset_t) bp->b_data + bp->b_bufsize, 1524 (vm_offset_t) bp->b_data + newbsize); 1525#if !defined(NO_B_MALLOC) 1526 if (origbuf) { 1527 bcopy(origbuf, bp->b_data, origbufsize); 1528 free(origbuf, M_BIOBUF); 1529 } 1530#endif 1531 } 1532 } else { 1533 vm_page_t m; 1534 int desiredpages; 1535 1536 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1537 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1538 1539#if !defined(NO_B_MALLOC) 1540 if (bp->b_flags & B_MALLOC) 1541 panic("allocbuf: VMIO buffer can't be malloced"); 1542#endif 1543 1544 if (newbsize < bp->b_bufsize) { 1545 if (desiredpages < bp->b_npages) { 1546 for (i = desiredpages; i < bp->b_npages; i++) { 1547 /* 1548 * the page is not freed here -- it 1549 * is the responsibility of vnode_pager_setsize 1550 */ 1551 m = bp->b_pages[i]; 1552#if defined(DIAGNOSTIC) 1553 if (m == bogus_page) 1554 panic("allocbuf: bogus page found"); 1555#endif 1556 s = splvm(); 1557 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1558 m->flags |= PG_WANTED; 1559 tsleep(m, PVM, "biodep", 0); 1560 } 1561 splx(s); 1562 1563 bp->b_pages[i] = NULL; 1564 vm_page_unwire(m); 1565 } 1566 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1567 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1568 bp->b_npages = desiredpages; 1569 } 1570 } else if (newbsize > bp->b_bufsize) { 1571 vm_object_t obj; 1572 vm_offset_t tinc, toff; 1573 vm_ooffset_t off; 1574 vm_pindex_t objoff; 1575 int pageindex, curbpnpages; 1576 struct vnode *vp; 1577 int bsize; 1578 1579 vp = bp->b_vp; 1580 1581 if (vp->v_type == VBLK) 1582 bsize = DEV_BSIZE; 1583 else 1584 bsize = vp->v_mount->mnt_stat.f_iosize; 1585 1586 if (bp->b_npages < desiredpages) { 1587 obj = vp->v_object; 1588 tinc = PAGE_SIZE; 1589 if (tinc > bsize) 1590 tinc = bsize; 1591 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1592 curbpnpages = bp->b_npages; 1593 doretry: 1594 bp->b_flags |= B_CACHE; 1595 bp->b_validoff = bp->b_validend = 0; 1596 for (toff = 0; toff < newbsize; toff += tinc) { 1597 int bytesinpage; 1598 1599 pageindex = toff >> PAGE_SHIFT; 1600 objoff = OFF_TO_IDX(off + toff); 1601 if (pageindex < curbpnpages) { 1602 1603 m = bp->b_pages[pageindex]; 1604#ifdef VFS_BIO_DIAG 1605 if (m->pindex != objoff) 1606 panic("allocbuf: page changed offset??!!!?"); 1607#endif 1608 bytesinpage = tinc; 1609 if (tinc > (newbsize - toff)) 1610 bytesinpage = newbsize - toff; 1611 if (bp->b_flags & B_CACHE) 1612 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1613 continue; 1614 } 1615 m = vm_page_lookup(obj, objoff); 1616 if (!m) { 1617 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1618 if (!m) { 1619 VM_WAIT; 1620 goto doretry; 1621 } 1622 /* 1623 * Normally it is unwise to clear PG_BUSY without 1624 * PAGE_WAKEUP -- but it is okay here, as there is 1625 * no chance for blocking between here and vm_page_alloc 1626 */ 1627 m->flags &= ~PG_BUSY; 1628 vm_page_wire(m); 1629 bp->b_flags &= ~B_CACHE; 1630 } else if (m->flags & PG_BUSY) { 1631 s = splvm(); 1632 if (m->flags & PG_BUSY) { 1633 m->flags |= PG_WANTED; 1634 tsleep(m, PVM, "pgtblk", 0); 1635 } 1636 splx(s); 1637 goto doretry; 1638 } else { 1639 if ((curproc != pageproc) && 1640 ((m->queue - m->pc) == PQ_CACHE) && 1641 ((cnt.v_free_count + cnt.v_cache_count) < 1642 (cnt.v_free_min + cnt.v_cache_min))) { 1643 pagedaemon_wakeup(); 1644 } 1645 bytesinpage = tinc; 1646 if (tinc > (newbsize - toff)) 1647 bytesinpage = newbsize - toff; 1648 if (bp->b_flags & B_CACHE) 1649 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1650 vm_page_wire(m); 1651 } 1652 bp->b_pages[pageindex] = m; 1653 curbpnpages = pageindex + 1; 1654 } 1655 if (vp->v_tag == VT_NFS && 1656 vp->v_type != VBLK) { 1657 if (bp->b_dirtyend > 0) { 1658 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1659 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1660 } 1661 if (bp->b_validend == 0) 1662 bp->b_flags &= ~B_CACHE; 1663 } 1664 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1665 bp->b_npages = curbpnpages; 1666 pmap_qenter((vm_offset_t) bp->b_data, 1667 bp->b_pages, bp->b_npages); 1668 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1669 } 1670 } 1671 } 1672 if (bp->b_flags & B_VMIO) 1673 vmiospace += (newbsize - bp->b_bufsize); 1674 bufspace += (newbsize - bp->b_bufsize); 1675 bp->b_bufsize = newbsize; 1676 bp->b_bcount = size; 1677 return 1; 1678} 1679 1680/* 1681 * Wait for buffer I/O completion, returning error status. 1682 */ 1683int 1684biowait(register struct buf * bp) 1685{ 1686 int s; 1687 1688 s = splbio(); 1689 while ((bp->b_flags & B_DONE) == 0) 1690#if defined(NO_SCHEDULE_MODS) 1691 tsleep(bp, PRIBIO, "biowait", 0); 1692#else 1693 tsleep(bp, curproc->p_usrpri, "biowait", 0); 1694#endif 1695 splx(s); 1696 if (bp->b_flags & B_EINTR) { 1697 bp->b_flags &= ~B_EINTR; 1698 return (EINTR); 1699 } 1700 if (bp->b_flags & B_ERROR) { 1701 return (bp->b_error ? bp->b_error : EIO); 1702 } else { 1703 return (0); 1704 } 1705} 1706 1707/* 1708 * Finish I/O on a buffer, calling an optional function. 1709 * This is usually called from interrupt level, so process blocking 1710 * is not *a good idea*. 1711 */ 1712void 1713biodone(register struct buf * bp) 1714{ 1715 int s; 1716 1717 s = splbio(); 1718 1719#if !defined(MAX_PERF) 1720 if (!(bp->b_flags & B_BUSY)) 1721 panic("biodone: buffer not busy"); 1722#endif 1723 1724 if (bp->b_flags & B_DONE) { 1725 splx(s); 1726#if !defined(MAX_PERF) 1727 printf("biodone: buffer already done\n"); 1728#endif 1729 return; 1730 } 1731 bp->b_flags |= B_DONE; 1732 1733 if ((bp->b_flags & B_READ) == 0) { 1734 vwakeup(bp); 1735 } 1736#ifdef BOUNCE_BUFFERS 1737 if (bp->b_flags & B_BOUNCE) 1738 vm_bounce_free(bp); 1739#endif 1740 1741 /* call optional completion function if requested */ 1742 if (bp->b_flags & B_CALL) { 1743 bp->b_flags &= ~B_CALL; 1744 (*bp->b_iodone) (bp); 1745 splx(s); 1746 return; 1747 } 1748 if (bp->b_flags & B_VMIO) { 1749 int i, resid; 1750 vm_ooffset_t foff; 1751 vm_page_t m; 1752 vm_object_t obj; 1753 int iosize; 1754 struct vnode *vp = bp->b_vp; 1755 1756 obj = vp->v_object; 1757 1758#if defined(VFS_BIO_DEBUG) 1759 if (vp->v_usecount == 0) { 1760 panic("biodone: zero vnode ref count"); 1761 } 1762 1763 if (vp->v_object == NULL) { 1764 panic("biodone: missing VM object"); 1765 } 1766 1767 if ((vp->v_flag & VVMIO) == 0) { 1768 panic("biodone: vnode is not setup for merged cache"); 1769 } 1770#endif 1771 1772 if (vp->v_type == VBLK) 1773 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1774 else 1775 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1776#if !defined(MAX_PERF) 1777 if (!obj) { 1778 panic("biodone: no object"); 1779 } 1780#endif 1781#if defined(VFS_BIO_DEBUG) 1782 if (obj->paging_in_progress < bp->b_npages) { 1783 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1784 obj->paging_in_progress, bp->b_npages); 1785 } 1786#endif 1787 iosize = bp->b_bufsize; 1788 for (i = 0; i < bp->b_npages; i++) { 1789 int bogusflag = 0; 1790 m = bp->b_pages[i]; 1791 if (m == bogus_page) { 1792 bogusflag = 1; 1793 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1794 if (!m) { 1795#if defined(VFS_BIO_DEBUG) 1796 printf("biodone: page disappeared\n"); 1797#endif 1798 --obj->paging_in_progress; 1799 continue; 1800 } 1801 bp->b_pages[i] = m; 1802 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1803 } 1804#if defined(VFS_BIO_DEBUG) 1805 if (OFF_TO_IDX(foff) != m->pindex) { 1806 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1807 } 1808#endif 1809 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1810 if (resid > iosize) 1811 resid = iosize; 1812 /* 1813 * In the write case, the valid and clean bits are 1814 * already changed correctly, so we only need to do this 1815 * here in the read case. 1816 */ 1817 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1818 vfs_page_set_valid(bp, foff, i, m); 1819 } 1820 1821 /* 1822 * when debugging new filesystems or buffer I/O methods, this 1823 * is the most common error that pops up. if you see this, you 1824 * have not set the page busy flag correctly!!! 1825 */ 1826 if (m->busy == 0) { 1827#if !defined(MAX_PERF) 1828 printf("biodone: page busy < 0, " 1829 "pindex: %d, foff: 0x(%x,%x), " 1830 "resid: %d, index: %d\n", 1831 (int) m->pindex, (int)(foff >> 32), 1832 (int) foff & 0xffffffff, resid, i); 1833#endif 1834 if (vp->v_type != VBLK) 1835#if !defined(MAX_PERF) 1836 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1837 bp->b_vp->v_mount->mnt_stat.f_iosize, 1838 (int) bp->b_lblkno, 1839 bp->b_flags, bp->b_npages); 1840 else 1841 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1842 (int) bp->b_lblkno, 1843 bp->b_flags, bp->b_npages); 1844 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1845 m->valid, m->dirty, m->wire_count); 1846#endif 1847 panic("biodone: page busy < 0\n"); 1848 } 1849 --m->busy; 1850 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1851 m->flags &= ~PG_WANTED; 1852 wakeup(m); 1853 } 1854 --obj->paging_in_progress; 1855 foff += resid; 1856 iosize -= resid; 1857 } 1858 if (obj && obj->paging_in_progress == 0 && 1859 (obj->flags & OBJ_PIPWNT)) { 1860 obj->flags &= ~OBJ_PIPWNT; 1861 wakeup(obj); 1862 } 1863 } 1864 /* 1865 * For asynchronous completions, release the buffer now. The brelse 1866 * checks for B_WANTED and will do the wakeup there if necessary - so 1867 * no need to do a wakeup here in the async case. 1868 */ 1869 1870 if (bp->b_flags & B_ASYNC) { 1871 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1872 brelse(bp); 1873 else 1874 bqrelse(bp); 1875 } else { 1876 bp->b_flags &= ~B_WANTED; 1877 wakeup(bp); 1878 } 1879 splx(s); 1880} 1881 1882int 1883count_lock_queue() 1884{ 1885 int count; 1886 struct buf *bp; 1887 1888 count = 0; 1889 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1890 bp != NULL; 1891 bp = TAILQ_NEXT(bp, b_freelist)) 1892 count++; 1893 return (count); 1894} 1895 1896int vfs_update_interval = 30; 1897 1898static void 1899vfs_update() 1900{ 1901 while (1) { 1902 tsleep(&vfs_update_wakeup, PUSER, "update", 1903 hz * vfs_update_interval); 1904 vfs_update_wakeup = 0; 1905 sync(curproc, NULL); 1906 } 1907} 1908 1909static int 1910sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1911{ 1912 int error = sysctl_handle_int(oidp, 1913 oidp->oid_arg1, oidp->oid_arg2, req); 1914 if (!error) 1915 wakeup(&vfs_update_wakeup); 1916 return error; 1917} 1918 1919SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1920 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1921 1922 1923/* 1924 * This routine is called in lieu of iodone in the case of 1925 * incomplete I/O. This keeps the busy status for pages 1926 * consistant. 1927 */ 1928void 1929vfs_unbusy_pages(struct buf * bp) 1930{ 1931 int i; 1932 1933 if (bp->b_flags & B_VMIO) { 1934 struct vnode *vp = bp->b_vp; 1935 vm_object_t obj = vp->v_object; 1936 vm_ooffset_t foff; 1937 1938 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1939 1940 for (i = 0; i < bp->b_npages; i++) { 1941 vm_page_t m = bp->b_pages[i]; 1942 1943 if (m == bogus_page) { 1944 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1945#if !defined(MAX_PERF) 1946 if (!m) { 1947 panic("vfs_unbusy_pages: page missing\n"); 1948 } 1949#endif 1950 bp->b_pages[i] = m; 1951 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1952 } 1953 --obj->paging_in_progress; 1954 --m->busy; 1955 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1956 m->flags &= ~PG_WANTED; 1957 wakeup(m); 1958 } 1959 } 1960 if (obj->paging_in_progress == 0 && 1961 (obj->flags & OBJ_PIPWNT)) { 1962 obj->flags &= ~OBJ_PIPWNT; 1963 wakeup(obj); 1964 } 1965 } 1966} 1967 1968/* 1969 * Set NFS' b_validoff and b_validend fields from the valid bits 1970 * of a page. If the consumer is not NFS, and the page is not 1971 * valid for the entire range, clear the B_CACHE flag to force 1972 * the consumer to re-read the page. 1973 */ 1974static void 1975vfs_buf_set_valid(struct buf *bp, 1976 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 1977 vm_page_t m) 1978{ 1979 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 1980 vm_offset_t svalid, evalid; 1981 int validbits = m->valid; 1982 1983 /* 1984 * This only bothers with the first valid range in the 1985 * page. 1986 */ 1987 svalid = off; 1988 while (validbits && !(validbits & 1)) { 1989 svalid += DEV_BSIZE; 1990 validbits >>= 1; 1991 } 1992 evalid = svalid; 1993 while (validbits & 1) { 1994 evalid += DEV_BSIZE; 1995 validbits >>= 1; 1996 } 1997 /* 1998 * Make sure this range is contiguous with the range 1999 * built up from previous pages. If not, then we will 2000 * just use the range from the previous pages. 2001 */ 2002 if (svalid == bp->b_validend) { 2003 bp->b_validoff = min(bp->b_validoff, svalid); 2004 bp->b_validend = max(bp->b_validend, evalid); 2005 } 2006 } else if (!vm_page_is_valid(m, 2007 (vm_offset_t) ((foff + off) & PAGE_MASK), 2008 size)) { 2009 bp->b_flags &= ~B_CACHE; 2010 } 2011} 2012 2013/* 2014 * Set the valid bits in a page, taking care of the b_validoff, 2015 * b_validend fields which NFS uses to optimise small reads. Off is 2016 * the offset within the file and pageno is the page index within the buf. 2017 */ 2018static void 2019vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2020{ 2021 struct vnode *vp = bp->b_vp; 2022 vm_ooffset_t soff, eoff; 2023 2024 soff = off; 2025 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2026 vm_page_set_invalid(m, 2027 (vm_offset_t) (soff & PAGE_MASK), 2028 (vm_offset_t) (eoff - soff)); 2029 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2030 vm_ooffset_t sv, ev; 2031 off = off - pageno * PAGE_SIZE; 2032 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2033 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 2034 soff = max(sv, soff); 2035 eoff = min(ev, eoff); 2036 } 2037 if (eoff > soff) 2038 vm_page_set_validclean(m, 2039 (vm_offset_t) (soff & PAGE_MASK), 2040 (vm_offset_t) (eoff - soff)); 2041} 2042 2043/* 2044 * This routine is called before a device strategy routine. 2045 * It is used to tell the VM system that paging I/O is in 2046 * progress, and treat the pages associated with the buffer 2047 * almost as being PG_BUSY. Also the object paging_in_progress 2048 * flag is handled to make sure that the object doesn't become 2049 * inconsistant. 2050 */ 2051void 2052vfs_busy_pages(struct buf * bp, int clear_modify) 2053{ 2054 int i; 2055 2056 if (bp->b_flags & B_VMIO) { 2057 struct vnode *vp = bp->b_vp; 2058 vm_object_t obj = vp->v_object; 2059 vm_ooffset_t foff; 2060 2061 if (vp->v_type == VBLK) 2062 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2063 else 2064 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2065 vfs_setdirty(bp); 2066 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2067 vm_page_t m = bp->b_pages[i]; 2068 2069 if ((bp->b_flags & B_CLUSTER) == 0) { 2070 obj->paging_in_progress++; 2071 m->busy++; 2072 } 2073 vm_page_protect(m, VM_PROT_NONE); 2074 if (clear_modify) 2075 vfs_page_set_valid(bp, foff, i, m); 2076 else if (bp->b_bcount >= PAGE_SIZE) { 2077 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2078 bp->b_pages[i] = bogus_page; 2079 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2080 } 2081 } 2082 } 2083 } 2084} 2085 2086/* 2087 * Tell the VM system that the pages associated with this buffer 2088 * are clean. This is used for delayed writes where the data is 2089 * going to go to disk eventually without additional VM intevention. 2090 */ 2091void 2092vfs_clean_pages(struct buf * bp) 2093{ 2094 int i; 2095 2096 if (bp->b_flags & B_VMIO) { 2097 struct vnode *vp = bp->b_vp; 2098 vm_ooffset_t foff; 2099 2100 if (vp->v_type == VBLK) 2101 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2102 else 2103 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2104 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2105 vm_page_t m = bp->b_pages[i]; 2106 2107 vfs_page_set_valid(bp, foff, i, m); 2108 } 2109 } 2110} 2111 2112void 2113vfs_bio_clrbuf(struct buf *bp) { 2114 int i; 2115 if( bp->b_flags & B_VMIO) { 2116 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2117 int mask; 2118 mask = 0; 2119 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2120 mask |= (1 << (i/DEV_BSIZE)); 2121 if( bp->b_pages[0]->valid != mask) { 2122 bzero(bp->b_data, bp->b_bufsize); 2123 } 2124 bp->b_pages[0]->valid = mask; 2125 bp->b_resid = 0; 2126 return; 2127 } 2128 for(i=0;i<bp->b_npages;i++) { 2129 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2130 continue; 2131 if( bp->b_pages[i]->valid == 0) { 2132 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2133 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2134 } 2135 } else { 2136 int j; 2137 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2138 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 2139 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2140 } 2141 } 2142 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 2143 } 2144 bp->b_resid = 0; 2145 } else { 2146 clrbuf(bp); 2147 } 2148} 2149 2150/* 2151 * vm_hold_load_pages and vm_hold_unload pages get pages into 2152 * a buffers address space. The pages are anonymous and are 2153 * not associated with a file object. 2154 */ 2155void 2156vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2157{ 2158 vm_offset_t pg; 2159 vm_page_t p; 2160 int index; 2161 2162 to = round_page(to); 2163 from = round_page(from); 2164 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2165 2166 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2167 2168tryagain: 2169 2170 p = vm_page_alloc(kernel_object, 2171 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2172 VM_ALLOC_NORMAL); 2173 if (!p) { 2174 VM_WAIT; 2175 goto tryagain; 2176 } 2177 vm_page_wire(p); 2178 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2179 bp->b_pages[index] = p; 2180 PAGE_WAKEUP(p); 2181 } 2182 bp->b_npages = index; 2183} 2184 2185void 2186vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2187{ 2188 vm_offset_t pg; 2189 vm_page_t p; 2190 int index, newnpages; 2191 2192 from = round_page(from); 2193 to = round_page(to); 2194 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2195 2196 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2197 p = bp->b_pages[index]; 2198 if (p && (index < bp->b_npages)) { 2199#if !defined(MAX_PERF) 2200 if (p->busy) { 2201 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2202 bp->b_blkno, bp->b_lblkno); 2203 } 2204#endif 2205 bp->b_pages[index] = NULL; 2206 pmap_kremove(pg); 2207 vm_page_unwire(p); 2208 vm_page_free(p); 2209 } 2210 } 2211 bp->b_npages = newnpages; 2212} 2213 2214 2215#include "opt_ddb.h" 2216#ifdef DDB 2217#include <ddb/ddb.h> 2218 2219DB_SHOW_COMMAND(buffer, db_show_buffer) 2220{ 2221 /* get args */ 2222 struct buf *bp = (struct buf *)addr; 2223 2224 if (!have_addr) { 2225 db_printf("usage: show buffer <addr>\n"); 2226 return; 2227 } 2228 2229 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2230 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2231 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2232 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2233 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2234 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2235 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2236 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2237 "b_blkno = %d, b_pblkno = %d\n", 2238 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2239 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2240 if (bp->b_npages) { 2241 int i; 2242 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2243 for (i = 0; i < bp->b_npages; i++) { 2244 vm_page_t m; 2245 m = bp->b_pages[i]; 2246 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2247 VM_PAGE_TO_PHYS(m)); 2248 if ((i + 1) < bp->b_npages) 2249 db_printf(","); 2250 } 2251 db_printf("\n"); 2252 } 2253} 2254#endif /* DDB */ 2255