vfs_bio.c revision 43043
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.195 1999/01/21 09:19:33 dillon Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <miscfs/specfs/specdev.h> 41#include <vm/vm.h> 42#include <vm/vm_param.h> 43#include <vm/vm_prot.h> 44#include <vm/vm_kern.h> 45#include <vm/vm_pageout.h> 46#include <vm/vm_page.h> 47#include <vm/vm_object.h> 48#include <vm/vm_extern.h> 49#include <vm/vm_map.h> 50#include <sys/buf.h> 51#include <sys/mount.h> 52#include <sys/malloc.h> 53#include <sys/resourcevar.h> 54 55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56 57struct bio_ops bioops; /* I/O operation notification */ 58 59#if 0 /* replaced bu sched_sync */ 60static void vfs_update __P((void)); 61static struct proc *updateproc; 62static struct kproc_desc up_kp = { 63 "update", 64 vfs_update, 65 &updateproc 66}; 67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 68#endif 69 70struct buf *buf; /* buffer header pool */ 71struct swqueue bswlist; 72 73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 74 vm_offset_t to); 75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 78 vm_offset_t off, vm_offset_t size, 79 vm_page_t m); 80static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 81 int pageno, vm_page_t m); 82static void vfs_clean_pages(struct buf * bp); 83static void vfs_setdirty(struct buf *bp); 84static void vfs_vmio_release(struct buf *bp); 85static void flushdirtybuffers(int slpflag, int slptimeo); 86 87int needsbuffer; 88 89/* 90 * Internal update daemon, process 3 91 * The variable vfs_update_wakeup allows for internal syncs. 92 */ 93int vfs_update_wakeup; 94 95 96/* 97 * buffers base kva 98 */ 99 100/* 101 * bogus page -- for I/O to/from partially complete buffers 102 * this is a temporary solution to the problem, but it is not 103 * really that bad. it would be better to split the buffer 104 * for input in the case of buffers partially already in memory, 105 * but the code is intricate enough already. 106 */ 107vm_page_t bogus_page; 108static vm_offset_t bogus_offset; 109 110static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 111 bufmallocspace, maxbufmallocspace; 112int numdirtybuffers; 113static int lodirtybuffers, hidirtybuffers; 114static int numfreebuffers, lofreebuffers, hifreebuffers; 115static int kvafreespace; 116 117SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 118 &numdirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 120 &lodirtybuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 122 &hidirtybuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 124 &numfreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 126 &lofreebuffers, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 128 &hifreebuffers, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 130 &maxbufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 132 &bufspace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 134 &maxvmiobufspace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 136 &vmiospace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 138 &maxbufmallocspace, 0, ""); 139SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 140 &bufmallocspace, 0, ""); 141SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 142 &kvafreespace, 0, ""); 143 144static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 145struct bqueues bufqueues[BUFFER_QUEUES] = {0}; 146 147extern int vm_swap_size; 148 149#define BUF_MAXUSE 24 150 151#define VFS_BIO_NEED_ANY 1 152#define VFS_BIO_NEED_LOWLIMIT 2 153#define VFS_BIO_NEED_FREE 4 154 155/* 156 * Initialize buffer headers and related structures. 157 */ 158void 159bufinit() 160{ 161 struct buf *bp; 162 int i; 163 164 TAILQ_INIT(&bswlist); 165 LIST_INIT(&invalhash); 166 167 /* first, make a null hash table */ 168 for (i = 0; i < BUFHSZ; i++) 169 LIST_INIT(&bufhashtbl[i]); 170 171 /* next, make a null set of free lists */ 172 for (i = 0; i < BUFFER_QUEUES; i++) 173 TAILQ_INIT(&bufqueues[i]); 174 175 /* finally, initialize each buffer header and stick on empty q */ 176 for (i = 0; i < nbuf; i++) { 177 bp = &buf[i]; 178 bzero(bp, sizeof *bp); 179 bp->b_flags = B_INVAL; /* we're just an empty header */ 180 bp->b_dev = NODEV; 181 bp->b_rcred = NOCRED; 182 bp->b_wcred = NOCRED; 183 bp->b_qindex = QUEUE_EMPTY; 184 bp->b_xflags = 0; 185 LIST_INIT(&bp->b_dep); 186 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 187 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 188 } 189/* 190 * maxbufspace is currently calculated to support all filesystem blocks 191 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 192 * cache is still the same as it would be for 8K filesystems. This 193 * keeps the size of the buffer cache "in check" for big block filesystems. 194 */ 195 maxbufspace = (nbuf + 8) * DFLTBSIZE; 196/* 197 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 198 */ 199 maxvmiobufspace = 2 * maxbufspace / 3; 200/* 201 * Limit the amount of malloc memory since it is wired permanently into 202 * the kernel space. Even though this is accounted for in the buffer 203 * allocation, we don't want the malloced region to grow uncontrolled. 204 * The malloc scheme improves memory utilization significantly on average 205 * (small) directories. 206 */ 207 maxbufmallocspace = maxbufspace / 20; 208 209/* 210 * Remove the probability of deadlock conditions by limiting the 211 * number of dirty buffers. 212 */ 213 hidirtybuffers = nbuf / 8 + 20; 214 lodirtybuffers = nbuf / 16 + 10; 215 numdirtybuffers = 0; 216 lofreebuffers = nbuf / 18 + 5; 217 hifreebuffers = 2 * lofreebuffers; 218 numfreebuffers = nbuf; 219 kvafreespace = 0; 220 221 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 222 bogus_page = vm_page_alloc(kernel_object, 223 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 224 VM_ALLOC_NORMAL); 225 226} 227 228/* 229 * Free the kva allocation for a buffer 230 * Must be called only at splbio or higher, 231 * as this is the only locking for buffer_map. 232 */ 233static void 234bfreekva(struct buf * bp) 235{ 236 if (bp->b_kvasize == 0) 237 return; 238 239 vm_map_delete(buffer_map, 240 (vm_offset_t) bp->b_kvabase, 241 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 242 243 bp->b_kvasize = 0; 244 245} 246 247/* 248 * remove the buffer from the appropriate free list 249 */ 250void 251bremfree(struct buf * bp) 252{ 253 int s = splbio(); 254 255 if (bp->b_qindex != QUEUE_NONE) { 256 if (bp->b_qindex == QUEUE_EMPTY) { 257 kvafreespace -= bp->b_kvasize; 258 } 259 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 260 bp->b_qindex = QUEUE_NONE; 261 } else { 262#if !defined(MAX_PERF) 263 panic("bremfree: removing a buffer when not on a queue"); 264#endif 265 } 266 if ((bp->b_flags & B_INVAL) || 267 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 268 --numfreebuffers; 269 splx(s); 270} 271 272 273/* 274 * Get a buffer with the specified data. Look in the cache first. 275 */ 276int 277bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 278 struct buf ** bpp) 279{ 280 struct buf *bp; 281 282 bp = getblk(vp, blkno, size, 0, 0); 283 *bpp = bp; 284 285 /* if not found in cache, do some I/O */ 286 if ((bp->b_flags & B_CACHE) == 0) { 287 if (curproc != NULL) 288 curproc->p_stats->p_ru.ru_inblock++; 289 bp->b_flags |= B_READ; 290 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 291 if (bp->b_rcred == NOCRED) { 292 if (cred != NOCRED) 293 crhold(cred); 294 bp->b_rcred = cred; 295 } 296 vfs_busy_pages(bp, 0); 297 VOP_STRATEGY(vp, bp); 298 return (biowait(bp)); 299 } 300 return (0); 301} 302 303/* 304 * Operates like bread, but also starts asynchronous I/O on 305 * read-ahead blocks. 306 */ 307int 308breadn(struct vnode * vp, daddr_t blkno, int size, 309 daddr_t * rablkno, int *rabsize, 310 int cnt, struct ucred * cred, struct buf ** bpp) 311{ 312 struct buf *bp, *rabp; 313 int i; 314 int rv = 0, readwait = 0; 315 316 *bpp = bp = getblk(vp, blkno, size, 0, 0); 317 318 /* if not found in cache, do some I/O */ 319 if ((bp->b_flags & B_CACHE) == 0) { 320 if (curproc != NULL) 321 curproc->p_stats->p_ru.ru_inblock++; 322 bp->b_flags |= B_READ; 323 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 324 if (bp->b_rcred == NOCRED) { 325 if (cred != NOCRED) 326 crhold(cred); 327 bp->b_rcred = cred; 328 } 329 vfs_busy_pages(bp, 0); 330 VOP_STRATEGY(vp, bp); 331 ++readwait; 332 } 333 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 334 if (inmem(vp, *rablkno)) 335 continue; 336 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 337 338 if ((rabp->b_flags & B_CACHE) == 0) { 339 if (curproc != NULL) 340 curproc->p_stats->p_ru.ru_inblock++; 341 rabp->b_flags |= B_READ | B_ASYNC; 342 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 343 if (rabp->b_rcred == NOCRED) { 344 if (cred != NOCRED) 345 crhold(cred); 346 rabp->b_rcred = cred; 347 } 348 vfs_busy_pages(rabp, 0); 349 VOP_STRATEGY(vp, rabp); 350 } else { 351 brelse(rabp); 352 } 353 } 354 355 if (readwait) { 356 rv = biowait(bp); 357 } 358 return (rv); 359} 360 361/* 362 * Write, release buffer on completion. (Done by iodone 363 * if async.) 364 */ 365int 366bwrite(struct buf * bp) 367{ 368 int oldflags, s; 369 struct vnode *vp; 370 struct mount *mp; 371 372 373 if (bp->b_flags & B_INVAL) { 374 brelse(bp); 375 return (0); 376 } 377 378 oldflags = bp->b_flags; 379 380#if !defined(MAX_PERF) 381 if ((bp->b_flags & B_BUSY) == 0) 382 panic("bwrite: buffer is not busy???"); 383#endif 384 385 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 386 bp->b_flags |= B_WRITEINPROG; 387 388 s = splbio(); 389 if ((oldflags & B_DELWRI) == B_DELWRI) { 390 --numdirtybuffers; 391 reassignbuf(bp, bp->b_vp); 392 } 393 394 bp->b_vp->v_numoutput++; 395 vfs_busy_pages(bp, 1); 396 if (curproc != NULL) 397 curproc->p_stats->p_ru.ru_oublock++; 398 splx(s); 399 VOP_STRATEGY(bp->b_vp, bp); 400 401 /* 402 * Collect statistics on synchronous and asynchronous writes. 403 * Writes to block devices are charged to their associated 404 * filesystem (if any). 405 */ 406 if ((vp = bp->b_vp) != NULL) { 407 if (vp->v_type == VBLK) 408 mp = vp->v_specmountpoint; 409 else 410 mp = vp->v_mount; 411 if (mp != NULL) 412 if ((oldflags & B_ASYNC) == 0) 413 mp->mnt_stat.f_syncwrites++; 414 else 415 mp->mnt_stat.f_asyncwrites++; 416 } 417 418 if ((oldflags & B_ASYNC) == 0) { 419 int rtval = biowait(bp); 420 brelse(bp); 421 return (rtval); 422 } 423 return (0); 424} 425 426void 427vfs_bio_need_satisfy(void) { 428 ++numfreebuffers; 429 if (!needsbuffer) 430 return; 431 if (numdirtybuffers < lodirtybuffers) { 432 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 433 } else { 434 needsbuffer &= ~VFS_BIO_NEED_ANY; 435 } 436 if (numfreebuffers >= hifreebuffers) { 437 needsbuffer &= ~VFS_BIO_NEED_FREE; 438 } 439 wakeup(&needsbuffer); 440} 441 442/* 443 * Delayed write. (Buffer is marked dirty). 444 */ 445void 446bdwrite(struct buf * bp) 447{ 448 struct vnode *vp; 449 450#if !defined(MAX_PERF) 451 if ((bp->b_flags & B_BUSY) == 0) { 452 panic("bdwrite: buffer is not busy"); 453 } 454#endif 455 456 if (bp->b_flags & B_INVAL) { 457 brelse(bp); 458 return; 459 } 460 bp->b_flags &= ~(B_READ|B_RELBUF); 461 if ((bp->b_flags & B_DELWRI) == 0) { 462 bp->b_flags |= B_DONE | B_DELWRI; 463 reassignbuf(bp, bp->b_vp); 464 ++numdirtybuffers; 465 } 466 467 /* 468 * This bmap keeps the system from needing to do the bmap later, 469 * perhaps when the system is attempting to do a sync. Since it 470 * is likely that the indirect block -- or whatever other datastructure 471 * that the filesystem needs is still in memory now, it is a good 472 * thing to do this. Note also, that if the pageout daemon is 473 * requesting a sync -- there might not be enough memory to do 474 * the bmap then... So, this is important to do. 475 */ 476 if (bp->b_lblkno == bp->b_blkno) { 477 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 478 } 479 480 /* 481 * Set the *dirty* buffer range based upon the VM system dirty pages. 482 */ 483 vfs_setdirty(bp); 484 485 /* 486 * We need to do this here to satisfy the vnode_pager and the 487 * pageout daemon, so that it thinks that the pages have been 488 * "cleaned". Note that since the pages are in a delayed write 489 * buffer -- the VFS layer "will" see that the pages get written 490 * out on the next sync, or perhaps the cluster will be completed. 491 */ 492 vfs_clean_pages(bp); 493 bqrelse(bp); 494 495 /* 496 * XXX The soft dependency code is not prepared to 497 * have I/O done when a bdwrite is requested. For 498 * now we just let the write be delayed if it is 499 * requested by the soft dependency code. 500 */ 501 if ((vp = bp->b_vp) && 502 ((vp->v_type == VBLK && vp->v_specmountpoint && 503 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 504 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)))) 505 return; 506 507 if (numdirtybuffers >= hidirtybuffers) 508 flushdirtybuffers(0, 0); 509 510 return; 511} 512 513 514/* 515 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 516 * Check how this compares with vfs_setdirty(); XXX [JRE] 517 */ 518void 519bdirty(bp) 520 struct buf *bp; 521{ 522 523 bp->b_flags &= ~(B_READ|B_RELBUF); /* XXX ??? check this */ 524 if ((bp->b_flags & B_DELWRI) == 0) { 525 bp->b_flags |= B_DONE | B_DELWRI; /* why done? XXX JRE */ 526 reassignbuf(bp, bp->b_vp); 527 ++numdirtybuffers; 528 } 529} 530 531/* 532 * Asynchronous write. 533 * Start output on a buffer, but do not wait for it to complete. 534 * The buffer is released when the output completes. 535 */ 536void 537bawrite(struct buf * bp) 538{ 539 bp->b_flags |= B_ASYNC; 540 (void) VOP_BWRITE(bp); 541} 542 543/* 544 * Ordered write. 545 * Start output on a buffer, and flag it so that the device will write 546 * it in the order it was queued. The buffer is released when the output 547 * completes. 548 */ 549int 550bowrite(struct buf * bp) 551{ 552 bp->b_flags |= B_ORDERED|B_ASYNC; 553 return (VOP_BWRITE(bp)); 554} 555 556/* 557 * Release a buffer. 558 */ 559void 560brelse(struct buf * bp) 561{ 562 int s; 563 564 if (bp->b_flags & B_CLUSTER) { 565 relpbuf(bp, NULL); 566 return; 567 } 568 569 s = splbio(); 570 571 /* anyone need this block? */ 572 if (bp->b_flags & B_WANTED) { 573 bp->b_flags &= ~(B_WANTED | B_AGE); 574 wakeup(bp); 575 } 576 577 if (bp->b_flags & B_LOCKED) 578 bp->b_flags &= ~B_ERROR; 579 580 if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) { 581 bp->b_flags &= ~B_ERROR; 582 bdirty(bp); 583 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 584 (bp->b_bufsize <= 0)) { 585 bp->b_flags |= B_INVAL; 586 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 587 (*bioops.io_deallocate)(bp); 588 if (bp->b_flags & B_DELWRI) 589 --numdirtybuffers; 590 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 591 if ((bp->b_flags & B_VMIO) == 0) { 592 if (bp->b_bufsize) 593 allocbuf(bp, 0); 594 if (bp->b_vp) 595 brelvp(bp); 596 } 597 } 598 599 /* 600 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 601 * is called with B_DELWRI set, the underlying pages may wind up 602 * getting freed causing a previous write (bdwrite()) to get 'lost' 603 * because pages associated with a B_DELWRI bp are marked clean. 604 * 605 * We still allow the B_INVAL case to call vfs_vmio_release(), even 606 * if B_DELWRI is set. 607 */ 608 609 if (bp->b_flags & B_DELWRI) 610 bp->b_flags &= ~B_RELBUF; 611 612 /* 613 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 614 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 615 * but the VM object is kept around. The B_NOCACHE flag is used to 616 * invalidate the pages in the VM object. 617 * 618 * The b_{validoff,validend,dirtyoff,dirtyend} values are relative 619 * to b_offset and currently have byte granularity, whereas the 620 * valid flags in the vm_pages have only DEV_BSIZE resolution. 621 * The byte resolution fields are used to avoid unnecessary re-reads 622 * of the buffer but the code really needs to be genericized so 623 * other filesystem modules can take advantage of these fields. 624 * 625 * XXX this seems to cause performance problems. 626 */ 627 if ((bp->b_flags & B_VMIO) 628 && !(bp->b_vp->v_tag == VT_NFS && 629 bp->b_vp->v_type != VBLK && 630 (bp->b_flags & B_DELWRI) != 0) 631#ifdef notdef 632 && (bp->b_vp->v_tag != VT_NFS 633 || bp->b_vp->v_type == VBLK 634 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 635 || bp->b_validend == 0 636 || (bp->b_validoff == 0 637 && bp->b_validend == bp->b_bufsize)) 638#endif 639 ) { 640 641 int i, j, resid; 642 vm_page_t m; 643 off_t foff; 644 vm_pindex_t poff; 645 vm_object_t obj; 646 struct vnode *vp; 647 648 vp = bp->b_vp; 649 650 /* 651 * Get the base offset and length of the buffer. Note that 652 * for block sizes that are less then PAGE_SIZE, the b_data 653 * base of the buffer does not represent exactly b_offset and 654 * neither b_offset nor b_size are necessarily page aligned. 655 * Instead, the starting position of b_offset is: 656 * 657 * b_data + (b_offset & PAGE_MASK) 658 * 659 * block sizes less then DEV_BSIZE (usually 512) are not 660 * supported due to the page granularity bits (m->valid, 661 * m->dirty, etc...). 662 * 663 * See man buf(9) for more information 664 */ 665 666 resid = bp->b_bufsize; 667 foff = bp->b_offset; 668 669 for (i = 0; i < bp->b_npages; i++) { 670 m = bp->b_pages[i]; 671 vm_page_flag_clear(m, PG_ZERO); 672 if (m == bogus_page) { 673 674 obj = (vm_object_t) vp->v_object; 675 poff = OFF_TO_IDX(bp->b_offset); 676 677 for (j = i; j < bp->b_npages; j++) { 678 m = bp->b_pages[j]; 679 if (m == bogus_page) { 680 m = vm_page_lookup(obj, poff + j); 681#if !defined(MAX_PERF) 682 if (!m) { 683 panic("brelse: page missing\n"); 684 } 685#endif 686 bp->b_pages[j] = m; 687 } 688 } 689 690 if ((bp->b_flags & B_INVAL) == 0) { 691 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 692 } 693 } 694 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 695 int poffset = foff & PAGE_MASK; 696 int presid = resid > (PAGE_SIZE - poffset) ? 697 (PAGE_SIZE - poffset) : resid; 698 699 KASSERT(presid >= 0, ("brelse: extra page")); 700 vm_page_set_invalid(m, poffset, presid); 701 } 702 resid -= PAGE_SIZE - (foff & PAGE_MASK); 703 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 704 } 705 706 if (bp->b_flags & (B_INVAL | B_RELBUF)) 707 vfs_vmio_release(bp); 708 709 } else if (bp->b_flags & B_VMIO) { 710 711 if (bp->b_flags & (B_INVAL | B_RELBUF)) 712 vfs_vmio_release(bp); 713 714 } 715 716#if !defined(MAX_PERF) 717 if (bp->b_qindex != QUEUE_NONE) 718 panic("brelse: free buffer onto another queue???"); 719#endif 720 721 /* enqueue */ 722 /* buffers with no memory */ 723 if (bp->b_bufsize == 0) { 724 bp->b_flags |= B_INVAL; 725 bp->b_qindex = QUEUE_EMPTY; 726 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 727 LIST_REMOVE(bp, b_hash); 728 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 729 bp->b_dev = NODEV; 730 kvafreespace += bp->b_kvasize; 731 732 /* buffers with junk contents */ 733 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 734 bp->b_flags |= B_INVAL; 735 bp->b_qindex = QUEUE_AGE; 736 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 737 LIST_REMOVE(bp, b_hash); 738 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 739 bp->b_dev = NODEV; 740 741 /* buffers that are locked */ 742 } else if (bp->b_flags & B_LOCKED) { 743 bp->b_qindex = QUEUE_LOCKED; 744 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 745 746 /* buffers with stale but valid contents */ 747 } else if (bp->b_flags & B_AGE) { 748 bp->b_qindex = QUEUE_AGE; 749 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 750 751 /* buffers with valid and quite potentially reuseable contents */ 752 } else { 753 bp->b_qindex = QUEUE_LRU; 754 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 755 } 756 757 if ((bp->b_flags & B_INVAL) || 758 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 759 if (bp->b_flags & B_DELWRI) { 760 --numdirtybuffers; 761 bp->b_flags &= ~B_DELWRI; 762 } 763 vfs_bio_need_satisfy(); 764 } 765 766 /* unlock */ 767 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 768 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 769 splx(s); 770} 771 772/* 773 * Release a buffer. 774 */ 775void 776bqrelse(struct buf * bp) 777{ 778 int s; 779 780 s = splbio(); 781 782 /* anyone need this block? */ 783 if (bp->b_flags & B_WANTED) { 784 bp->b_flags &= ~(B_WANTED | B_AGE); 785 wakeup(bp); 786 } 787 788#if !defined(MAX_PERF) 789 if (bp->b_qindex != QUEUE_NONE) 790 panic("bqrelse: free buffer onto another queue???"); 791#endif 792 793 if (bp->b_flags & B_LOCKED) { 794 bp->b_flags &= ~B_ERROR; 795 bp->b_qindex = QUEUE_LOCKED; 796 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 797 /* buffers with stale but valid contents */ 798 } else { 799 bp->b_qindex = QUEUE_LRU; 800 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 801 } 802 803 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 804 vfs_bio_need_satisfy(); 805 } 806 807 /* unlock */ 808 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 809 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 810 splx(s); 811} 812 813static void 814vfs_vmio_release(bp) 815 struct buf *bp; 816{ 817 int i, s; 818 vm_page_t m; 819 820 s = splvm(); 821 for (i = 0; i < bp->b_npages; i++) { 822 m = bp->b_pages[i]; 823 bp->b_pages[i] = NULL; 824 /* 825 * In order to keep page LRU ordering consistent, put 826 * everything on the inactive queue. 827 */ 828 vm_page_unwire(m, 0); 829 /* 830 * We don't mess with busy pages, it is 831 * the responsibility of the process that 832 * busied the pages to deal with them. 833 */ 834 if ((m->flags & PG_BUSY) || (m->busy != 0)) 835 continue; 836 837 if (m->wire_count == 0) { 838 vm_page_flag_clear(m, PG_ZERO); 839 /* 840 * Might as well free the page if we can and it has 841 * no valid data. 842 */ 843 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { 844 vm_page_busy(m); 845 vm_page_protect(m, VM_PROT_NONE); 846 vm_page_free(m); 847 } 848 } 849 } 850 splx(s); 851 bufspace -= bp->b_bufsize; 852 vmiospace -= bp->b_bufsize; 853 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 854 bp->b_npages = 0; 855 bp->b_bufsize = 0; 856 bp->b_flags &= ~B_VMIO; 857 if (bp->b_vp) 858 brelvp(bp); 859} 860 861/* 862 * Check to see if a block is currently memory resident. 863 */ 864struct buf * 865gbincore(struct vnode * vp, daddr_t blkno) 866{ 867 struct buf *bp; 868 struct bufhashhdr *bh; 869 870 bh = BUFHASH(vp, blkno); 871 bp = bh->lh_first; 872 873 /* Search hash chain */ 874 while (bp != NULL) { 875 /* hit */ 876 if (bp->b_vp == vp && bp->b_lblkno == blkno && 877 (bp->b_flags & B_INVAL) == 0) { 878 break; 879 } 880 bp = bp->b_hash.le_next; 881 } 882 return (bp); 883} 884 885/* 886 * this routine implements clustered async writes for 887 * clearing out B_DELWRI buffers... This is much better 888 * than the old way of writing only one buffer at a time. 889 */ 890int 891vfs_bio_awrite(struct buf * bp) 892{ 893 int i; 894 daddr_t lblkno = bp->b_lblkno; 895 struct vnode *vp = bp->b_vp; 896 int s; 897 int ncl; 898 struct buf *bpa; 899 int nwritten; 900 int size; 901 int maxcl; 902 903 s = splbio(); 904 /* 905 * right now we support clustered writing only to regular files 906 */ 907 if ((vp->v_type == VREG) && 908 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 909 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 910 911 size = vp->v_mount->mnt_stat.f_iosize; 912 maxcl = MAXPHYS / size; 913 914 for (i = 1; i < maxcl; i++) { 915 if ((bpa = gbincore(vp, lblkno + i)) && 916 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 917 (B_DELWRI | B_CLUSTEROK)) && 918 (bpa->b_bufsize == size)) { 919 if ((bpa->b_blkno == bpa->b_lblkno) || 920 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 921 break; 922 } else { 923 break; 924 } 925 } 926 ncl = i; 927 /* 928 * this is a possible cluster write 929 */ 930 if (ncl != 1) { 931 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 932 splx(s); 933 return nwritten; 934 } 935 } 936 937 bremfree(bp); 938 bp->b_flags |= B_BUSY | B_ASYNC; 939 940 splx(s); 941 /* 942 * default (old) behavior, writing out only one block 943 */ 944 nwritten = bp->b_bufsize; 945 (void) VOP_BWRITE(bp); 946 return nwritten; 947} 948 949 950/* 951 * Find a buffer header which is available for use. 952 */ 953static struct buf * 954getnewbuf(struct vnode *vp, daddr_t blkno, 955 int slpflag, int slptimeo, int size, int maxsize) 956{ 957 struct buf *bp, *bp1; 958 int nbyteswritten = 0; 959 vm_offset_t addr; 960 static int writerecursion = 0; 961 962start: 963 if (bufspace >= maxbufspace) 964 goto trytofreespace; 965 966 /* can we constitute a new buffer? */ 967 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 968#if !defined(MAX_PERF) 969 if (bp->b_qindex != QUEUE_EMPTY) 970 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 971 bp->b_qindex); 972#endif 973 bp->b_flags |= B_BUSY; 974 bremfree(bp); 975 goto fillbuf; 976 } 977trytofreespace: 978 /* 979 * We keep the file I/O from hogging metadata I/O 980 * This is desirable because file data is cached in the 981 * VM/Buffer cache even if a buffer is freed. 982 */ 983 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 984#if !defined(MAX_PERF) 985 if (bp->b_qindex != QUEUE_AGE) 986 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 987 bp->b_qindex); 988#endif 989 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 990#if !defined(MAX_PERF) 991 if (bp->b_qindex != QUEUE_LRU) 992 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 993 bp->b_qindex); 994#endif 995 } 996 if (!bp) { 997 /* wait for a free buffer of any kind */ 998 needsbuffer |= VFS_BIO_NEED_ANY; 999 do 1000 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 1001 slptimeo); 1002 while (needsbuffer & VFS_BIO_NEED_ANY); 1003 return (0); 1004 } 1005 KASSERT(!(bp->b_flags & B_BUSY), 1006 ("getnewbuf: busy buffer on free list\n")); 1007 /* 1008 * We are fairly aggressive about freeing VMIO buffers, but since 1009 * the buffering is intact without buffer headers, there is not 1010 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 1011 */ 1012 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 1013 if ((bp->b_flags & B_VMIO) == 0 || 1014 (vmiospace < maxvmiobufspace)) { 1015 --bp->b_usecount; 1016 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1017 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1018 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1019 goto start; 1020 } 1021 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1022 } 1023 } 1024 1025 1026 /* if we are a delayed write, convert to an async write */ 1027 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1028 1029 /* 1030 * If our delayed write is likely to be used soon, then 1031 * recycle back onto the LRU queue. 1032 */ 1033 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 1034 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 1035 1036 if (bp->b_usecount > 0) { 1037 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 1038 1039 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1040 1041 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1042 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1043 bp->b_usecount--; 1044 goto start; 1045 } 1046 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1047 } 1048 } 1049 } 1050 1051 /* 1052 * Certain layered filesystems can recursively re-enter the vfs_bio 1053 * code, due to delayed writes. This helps keep the system from 1054 * deadlocking. 1055 */ 1056 if (writerecursion > 0) { 1057 if (writerecursion > 5) { 1058 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1059 while (bp) { 1060 if ((bp->b_flags & B_DELWRI) == 0) 1061 break; 1062 bp = TAILQ_NEXT(bp, b_freelist); 1063 } 1064 if (bp == NULL) { 1065 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1066 while (bp) { 1067 if ((bp->b_flags & B_DELWRI) == 0) 1068 break; 1069 bp = TAILQ_NEXT(bp, b_freelist); 1070 } 1071 } 1072 if (bp == NULL) 1073 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1074 } else { 1075 bremfree(bp); 1076 bp->b_flags |= B_BUSY | B_AGE | B_ASYNC; 1077 nbyteswritten += bp->b_bufsize; 1078 ++writerecursion; 1079 VOP_BWRITE(bp); 1080 --writerecursion; 1081 if (!slpflag && !slptimeo) { 1082 return (0); 1083 } 1084 goto start; 1085 } 1086 } else { 1087 ++writerecursion; 1088 nbyteswritten += vfs_bio_awrite(bp); 1089 --writerecursion; 1090 if (!slpflag && !slptimeo) { 1091 return (0); 1092 } 1093 goto start; 1094 } 1095 } 1096 1097 if (bp->b_flags & B_WANTED) { 1098 bp->b_flags &= ~B_WANTED; 1099 wakeup(bp); 1100 } 1101 bremfree(bp); 1102 bp->b_flags |= B_BUSY; 1103 1104 if (bp->b_flags & B_VMIO) { 1105 bp->b_flags &= ~B_ASYNC; 1106 vfs_vmio_release(bp); 1107 } 1108 1109 if (bp->b_vp) 1110 brelvp(bp); 1111 1112fillbuf: 1113 1114 /* we are not free, nor do we contain interesting data */ 1115 if (bp->b_rcred != NOCRED) { 1116 crfree(bp->b_rcred); 1117 bp->b_rcred = NOCRED; 1118 } 1119 if (bp->b_wcred != NOCRED) { 1120 crfree(bp->b_wcred); 1121 bp->b_wcred = NOCRED; 1122 } 1123 if (LIST_FIRST(&bp->b_dep) != NULL && 1124 bioops.io_deallocate) 1125 (*bioops.io_deallocate)(bp); 1126 1127 LIST_REMOVE(bp, b_hash); 1128 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1129 if (bp->b_bufsize) { 1130 allocbuf(bp, 0); 1131 } 1132 bp->b_flags = B_BUSY; 1133 bp->b_dev = NODEV; 1134 bp->b_vp = NULL; 1135 bp->b_blkno = bp->b_lblkno = 0; 1136 bp->b_offset = NOOFFSET; 1137 bp->b_iodone = 0; 1138 bp->b_error = 0; 1139 bp->b_resid = 0; 1140 bp->b_bcount = 0; 1141 bp->b_npages = 0; 1142 bp->b_dirtyoff = bp->b_dirtyend = 0; 1143 bp->b_validoff = bp->b_validend = 0; 1144 bp->b_usecount = 5; 1145 /* Here, not kern_physio.c, is where this should be done*/ 1146 LIST_INIT(&bp->b_dep); 1147 1148 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1149 1150 /* 1151 * we assume that buffer_map is not at address 0 1152 */ 1153 addr = 0; 1154 if (maxsize != bp->b_kvasize) { 1155 bfreekva(bp); 1156 1157findkvaspace: 1158 /* 1159 * See if we have buffer kva space 1160 */ 1161 if (vm_map_findspace(buffer_map, 1162 vm_map_min(buffer_map), maxsize, &addr)) { 1163 if (kvafreespace > 0) { 1164 int totfree = 0, freed; 1165 do { 1166 freed = 0; 1167 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1168 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1169 if (bp1->b_kvasize != 0) { 1170 totfree += bp1->b_kvasize; 1171 freed = bp1->b_kvasize; 1172 bremfree(bp1); 1173 bfreekva(bp1); 1174 brelse(bp1); 1175 break; 1176 } 1177 } 1178 } while (freed); 1179 /* 1180 * if we found free space, then retry with the same buffer. 1181 */ 1182 if (totfree) 1183 goto findkvaspace; 1184 } 1185 bp->b_flags |= B_INVAL; 1186 brelse(bp); 1187 goto trytofreespace; 1188 } 1189 } 1190 1191 /* 1192 * See if we are below are allocated minimum 1193 */ 1194 if (bufspace >= (maxbufspace + nbyteswritten)) { 1195 bp->b_flags |= B_INVAL; 1196 brelse(bp); 1197 goto trytofreespace; 1198 } 1199 1200 /* 1201 * create a map entry for the buffer -- in essence 1202 * reserving the kva space. 1203 */ 1204 if (addr) { 1205 vm_map_insert(buffer_map, NULL, 0, 1206 addr, addr + maxsize, 1207 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1208 1209 bp->b_kvabase = (caddr_t) addr; 1210 bp->b_kvasize = maxsize; 1211 } 1212 bp->b_data = bp->b_kvabase; 1213 1214 return (bp); 1215} 1216 1217static void 1218waitfreebuffers(int slpflag, int slptimeo) { 1219 while (numfreebuffers < hifreebuffers) { 1220 flushdirtybuffers(slpflag, slptimeo); 1221 if (numfreebuffers < hifreebuffers) 1222 break; 1223 needsbuffer |= VFS_BIO_NEED_FREE; 1224 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1225 break; 1226 } 1227} 1228 1229static void 1230flushdirtybuffers(int slpflag, int slptimeo) { 1231 int s; 1232 static pid_t flushing = 0; 1233 1234 s = splbio(); 1235 1236 if (flushing) { 1237 if (flushing == curproc->p_pid) { 1238 splx(s); 1239 return; 1240 } 1241 while (flushing) { 1242 if (tsleep(&flushing, (PRIBIO + 4)|slpflag, "biofls", slptimeo)) { 1243 splx(s); 1244 return; 1245 } 1246 } 1247 } 1248 flushing = curproc->p_pid; 1249 1250 while (numdirtybuffers > lodirtybuffers) { 1251 struct buf *bp; 1252 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1253 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1254 if (bp == NULL) 1255 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1256 1257 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1258 bp = TAILQ_NEXT(bp, b_freelist); 1259 } 1260 1261 if (bp) { 1262 vfs_bio_awrite(bp); 1263 continue; 1264 } 1265 break; 1266 } 1267 1268 flushing = 0; 1269 wakeup(&flushing); 1270 splx(s); 1271} 1272 1273/* 1274 * Check to see if a block is currently memory resident. 1275 */ 1276struct buf * 1277incore(struct vnode * vp, daddr_t blkno) 1278{ 1279 struct buf *bp; 1280 1281 int s = splbio(); 1282 bp = gbincore(vp, blkno); 1283 splx(s); 1284 return (bp); 1285} 1286 1287/* 1288 * Returns true if no I/O is needed to access the 1289 * associated VM object. This is like incore except 1290 * it also hunts around in the VM system for the data. 1291 */ 1292 1293int 1294inmem(struct vnode * vp, daddr_t blkno) 1295{ 1296 vm_object_t obj; 1297 vm_offset_t toff, tinc, size; 1298 vm_page_t m; 1299 vm_ooffset_t off; 1300 1301 if (incore(vp, blkno)) 1302 return 1; 1303 if (vp->v_mount == NULL) 1304 return 0; 1305 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1306 return 0; 1307 1308 obj = vp->v_object; 1309 size = PAGE_SIZE; 1310 if (size > vp->v_mount->mnt_stat.f_iosize) 1311 size = vp->v_mount->mnt_stat.f_iosize; 1312 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 1313 1314 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1315 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1316 if (!m) 1317 return 0; 1318 tinc = size; 1319 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 1320 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 1321 if (vm_page_is_valid(m, 1322 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1323 return 0; 1324 } 1325 return 1; 1326} 1327 1328/* 1329 * now we set the dirty range for the buffer -- 1330 * for NFS -- if the file is mapped and pages have 1331 * been written to, let it know. We want the 1332 * entire range of the buffer to be marked dirty if 1333 * any of the pages have been written to for consistancy 1334 * with the b_validoff, b_validend set in the nfs write 1335 * code, and used by the nfs read code. 1336 */ 1337static void 1338vfs_setdirty(struct buf *bp) { 1339 int i; 1340 vm_object_t object; 1341 vm_offset_t boffset; 1342#if 0 1343 vm_offset_t offset; 1344#endif 1345 1346 /* 1347 * We qualify the scan for modified pages on whether the 1348 * object has been flushed yet. The OBJ_WRITEABLE flag 1349 * is not cleared simply by protecting pages off. 1350 */ 1351 if ((bp->b_flags & B_VMIO) && 1352 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1353 /* 1354 * test the pages to see if they have been modified directly 1355 * by users through the VM system. 1356 */ 1357 for (i = 0; i < bp->b_npages; i++) { 1358 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1359 vm_page_test_dirty(bp->b_pages[i]); 1360 } 1361 1362 /* 1363 * scan forwards for the first page modified 1364 */ 1365 for (i = 0; i < bp->b_npages; i++) { 1366 if (bp->b_pages[i]->dirty) { 1367 break; 1368 } 1369 } 1370 1371 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1372 if (boffset < bp->b_dirtyoff) { 1373 bp->b_dirtyoff = max(boffset, 0); 1374 } 1375 1376 /* 1377 * scan backwards for the last page modified 1378 */ 1379 for (i = bp->b_npages - 1; i >= 0; --i) { 1380 if (bp->b_pages[i]->dirty) { 1381 break; 1382 } 1383 } 1384 boffset = (i + 1); 1385#if 0 1386 offset = boffset + bp->b_pages[0]->pindex; 1387 if (offset >= object->size) 1388 boffset = object->size - bp->b_pages[0]->pindex; 1389#endif 1390 boffset = (boffset << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1391 if (bp->b_dirtyend < boffset) 1392 bp->b_dirtyend = min(boffset, bp->b_bufsize); 1393 } 1394} 1395 1396/* 1397 * Get a block given a specified block and offset into a file/device. 1398 */ 1399struct buf * 1400getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1401{ 1402 struct buf *bp; 1403 int i, s; 1404 struct bufhashhdr *bh; 1405 1406#if !defined(MAX_PERF) 1407 if (size > MAXBSIZE) 1408 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1409#endif 1410 1411 s = splbio(); 1412loop: 1413 if (numfreebuffers < lofreebuffers) { 1414 waitfreebuffers(slpflag, slptimeo); 1415 } 1416 1417 if ((bp = gbincore(vp, blkno))) { 1418 if (bp->b_flags & B_BUSY) { 1419 bp->b_flags |= B_WANTED; 1420 if (bp->b_usecount < BUF_MAXUSE) 1421 ++bp->b_usecount; 1422 1423 if (!tsleep(bp, 1424 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) { 1425 goto loop; 1426 } 1427 1428 splx(s); 1429 return (struct buf *) NULL; 1430 } 1431 bp->b_flags |= B_BUSY | B_CACHE; 1432 bremfree(bp); 1433 1434 /* 1435 * check for size inconsistancies for non-VMIO case. 1436 */ 1437 1438 if (bp->b_bcount != size) { 1439 if ((bp->b_flags & B_VMIO) == 0 || 1440 (size > bp->b_kvasize) 1441 ) { 1442 if (bp->b_flags & B_DELWRI) { 1443 bp->b_flags |= B_NOCACHE; 1444 VOP_BWRITE(bp); 1445 } else { 1446 if ((bp->b_flags & B_VMIO) && 1447 (LIST_FIRST(&bp->b_dep) == NULL)) { 1448 bp->b_flags |= B_RELBUF; 1449 brelse(bp); 1450 } else { 1451 bp->b_flags |= B_NOCACHE; 1452 VOP_BWRITE(bp); 1453 } 1454 } 1455 goto loop; 1456 } 1457 } 1458 1459 /* 1460 * If the size is inconsistant in the VMIO case, we can resize 1461 * the buffer. This might lead to B_CACHE getting cleared. 1462 */ 1463 1464 if (bp->b_bcount != size) 1465 allocbuf(bp, size); 1466 1467 KASSERT(bp->b_offset != NOOFFSET, 1468 ("getblk: no buffer offset")); 1469 1470 /* 1471 * Check that the constituted buffer really deserves for the 1472 * B_CACHE bit to be set. B_VMIO type buffers might not 1473 * contain fully valid pages. Normal (old-style) buffers 1474 * should be fully valid. This might also lead to B_CACHE 1475 * getting clear. 1476 */ 1477 if ((bp->b_flags & B_VMIO|B_CACHE) == (B_VMIO|B_CACHE)) { 1478 int checksize = bp->b_bufsize; 1479 int poffset = bp->b_offset & PAGE_MASK; 1480 int resid; 1481 for (i = 0; i < bp->b_npages; i++) { 1482 resid = (checksize > (PAGE_SIZE - poffset)) ? 1483 (PAGE_SIZE - poffset) : checksize; 1484 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) { 1485 bp->b_flags &= ~(B_CACHE | B_DONE); 1486 break; 1487 } 1488 checksize -= resid; 1489 poffset = 0; 1490 } 1491 } 1492 1493 /* 1494 * If B_DELWRI is set and B_CACHE got cleared ( or was 1495 * already clear ), we have to commit the write and 1496 * retry. The NFS code absolutely depends on this, 1497 * and so might the FFS code. In anycase, it formalizes 1498 * the B_CACHE rules. See sys/buf.h. 1499 */ 1500 1501 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 1502 VOP_BWRITE(bp); 1503 goto loop; 1504 } 1505 1506 if (bp->b_usecount < BUF_MAXUSE) 1507 ++bp->b_usecount; 1508 splx(s); 1509 return (bp); 1510 } else { 1511 int bsize, maxsize, vmio; 1512 off_t offset; 1513 1514 if (vp->v_type == VBLK) 1515 bsize = DEV_BSIZE; 1516 else if (vp->v_mountedhere) 1517 bsize = vp->v_mountedhere->mnt_stat.f_iosize; 1518 else if (vp->v_mount) 1519 bsize = vp->v_mount->mnt_stat.f_iosize; 1520 else 1521 bsize = size; 1522 1523 offset = (off_t)blkno * bsize; 1524 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF); 1525 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 1526 maxsize = imax(maxsize, bsize); 1527 1528 if ((bp = getnewbuf(vp, blkno, 1529 slpflag, slptimeo, size, maxsize)) == 0) { 1530 if (slpflag || slptimeo) { 1531 splx(s); 1532 return NULL; 1533 } 1534 goto loop; 1535 } 1536 1537 /* 1538 * This code is used to make sure that a buffer is not 1539 * created while the getnewbuf routine is blocked. 1540 * Normally the vnode is locked so this isn't a problem. 1541 * VBLK type I/O requests, however, don't lock the vnode. 1542 */ 1543 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE && gbincore(vp, blkno)) { 1544 bp->b_flags |= B_INVAL; 1545 brelse(bp); 1546 goto loop; 1547 } 1548 1549 /* 1550 * Insert the buffer into the hash, so that it can 1551 * be found by incore. 1552 */ 1553 bp->b_blkno = bp->b_lblkno = blkno; 1554 bp->b_offset = offset; 1555 1556 bgetvp(vp, bp); 1557 LIST_REMOVE(bp, b_hash); 1558 bh = BUFHASH(vp, blkno); 1559 LIST_INSERT_HEAD(bh, bp, b_hash); 1560 1561 if (vmio) { 1562 bp->b_flags |= (B_VMIO | B_CACHE); 1563#if defined(VFS_BIO_DEBUG) 1564 if (vp->v_type != VREG && vp->v_type != VBLK) 1565 printf("getblk: vmioing file type %d???\n", vp->v_type); 1566#endif 1567 } else { 1568 bp->b_flags &= ~B_VMIO; 1569 } 1570 1571 allocbuf(bp, size); 1572 1573 splx(s); 1574 return (bp); 1575 } 1576} 1577 1578/* 1579 * Get an empty, disassociated buffer of given size. 1580 */ 1581struct buf * 1582geteblk(int size) 1583{ 1584 struct buf *bp; 1585 int s; 1586 1587 s = splbio(); 1588 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1589 splx(s); 1590 allocbuf(bp, size); 1591 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 1592 return (bp); 1593} 1594 1595 1596/* 1597 * This code constitutes the buffer memory from either anonymous system 1598 * memory (in the case of non-VMIO operations) or from an associated 1599 * VM object (in the case of VMIO operations). This code is able to 1600 * resize a buffer up or down. 1601 * 1602 * Note that this code is tricky, and has many complications to resolve 1603 * deadlock or inconsistant data situations. Tread lightly!!! 1604 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 1605 * the caller. Calling this code willy nilly can result in the loss of data. 1606 */ 1607 1608int 1609allocbuf(struct buf *bp, int size) 1610{ 1611 int newbsize, mbsize; 1612 int i; 1613 1614#if !defined(MAX_PERF) 1615 if (!(bp->b_flags & B_BUSY)) 1616 panic("allocbuf: buffer not busy"); 1617 1618 if (bp->b_kvasize < size) 1619 panic("allocbuf: buffer too small"); 1620#endif 1621 1622 if ((bp->b_flags & B_VMIO) == 0) { 1623 caddr_t origbuf; 1624 int origbufsize; 1625 /* 1626 * Just get anonymous memory from the kernel 1627 */ 1628 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1629#if !defined(NO_B_MALLOC) 1630 if (bp->b_flags & B_MALLOC) 1631 newbsize = mbsize; 1632 else 1633#endif 1634 newbsize = round_page(size); 1635 1636 if (newbsize < bp->b_bufsize) { 1637#if !defined(NO_B_MALLOC) 1638 /* 1639 * malloced buffers are not shrunk 1640 */ 1641 if (bp->b_flags & B_MALLOC) { 1642 if (newbsize) { 1643 bp->b_bcount = size; 1644 } else { 1645 free(bp->b_data, M_BIOBUF); 1646 bufspace -= bp->b_bufsize; 1647 bufmallocspace -= bp->b_bufsize; 1648 bp->b_data = bp->b_kvabase; 1649 bp->b_bufsize = 0; 1650 bp->b_bcount = 0; 1651 bp->b_flags &= ~B_MALLOC; 1652 } 1653 return 1; 1654 } 1655#endif 1656 vm_hold_free_pages( 1657 bp, 1658 (vm_offset_t) bp->b_data + newbsize, 1659 (vm_offset_t) bp->b_data + bp->b_bufsize); 1660 } else if (newbsize > bp->b_bufsize) { 1661#if !defined(NO_B_MALLOC) 1662 /* 1663 * We only use malloced memory on the first allocation. 1664 * and revert to page-allocated memory when the buffer grows. 1665 */ 1666 if ( (bufmallocspace < maxbufmallocspace) && 1667 (bp->b_bufsize == 0) && 1668 (mbsize <= PAGE_SIZE/2)) { 1669 1670 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1671 bp->b_bufsize = mbsize; 1672 bp->b_bcount = size; 1673 bp->b_flags |= B_MALLOC; 1674 bufspace += mbsize; 1675 bufmallocspace += mbsize; 1676 return 1; 1677 } 1678#endif 1679 origbuf = NULL; 1680 origbufsize = 0; 1681#if !defined(NO_B_MALLOC) 1682 /* 1683 * If the buffer is growing on its other-than-first allocation, 1684 * then we revert to the page-allocation scheme. 1685 */ 1686 if (bp->b_flags & B_MALLOC) { 1687 origbuf = bp->b_data; 1688 origbufsize = bp->b_bufsize; 1689 bp->b_data = bp->b_kvabase; 1690 bufspace -= bp->b_bufsize; 1691 bufmallocspace -= bp->b_bufsize; 1692 bp->b_bufsize = 0; 1693 bp->b_flags &= ~B_MALLOC; 1694 newbsize = round_page(newbsize); 1695 } 1696#endif 1697 vm_hold_load_pages( 1698 bp, 1699 (vm_offset_t) bp->b_data + bp->b_bufsize, 1700 (vm_offset_t) bp->b_data + newbsize); 1701#if !defined(NO_B_MALLOC) 1702 if (origbuf) { 1703 bcopy(origbuf, bp->b_data, origbufsize); 1704 free(origbuf, M_BIOBUF); 1705 } 1706#endif 1707 } 1708 } else { 1709 vm_page_t m; 1710 int desiredpages; 1711 1712 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1713 desiredpages = (size == 0) ? 0 : 1714 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 1715 1716#if !defined(NO_B_MALLOC) 1717 if (bp->b_flags & B_MALLOC) 1718 panic("allocbuf: VMIO buffer can't be malloced"); 1719#endif 1720 1721 if (newbsize < bp->b_bufsize) { 1722 if (desiredpages < bp->b_npages) { 1723 for (i = desiredpages; i < bp->b_npages; i++) { 1724 /* 1725 * the page is not freed here -- it 1726 * is the responsibility of vnode_pager_setsize 1727 */ 1728 m = bp->b_pages[i]; 1729 KASSERT(m != bogus_page, 1730 ("allocbuf: bogus page found")); 1731 while (vm_page_sleep_busy(m, TRUE, "biodep")) 1732 ; 1733 1734 bp->b_pages[i] = NULL; 1735 vm_page_unwire(m, 0); 1736 } 1737 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 1738 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1739 bp->b_npages = desiredpages; 1740 } 1741 } else if (newbsize > bp->b_bufsize) { 1742 vm_object_t obj; 1743 vm_offset_t tinc, toff; 1744 vm_ooffset_t off; 1745 vm_pindex_t objoff; 1746 int pageindex, curbpnpages; 1747 struct vnode *vp; 1748 int bsize; 1749 int orig_validoff = bp->b_validoff; 1750 int orig_validend = bp->b_validend; 1751 1752 vp = bp->b_vp; 1753 1754 if (vp->v_type == VBLK) 1755 bsize = DEV_BSIZE; 1756 else 1757 bsize = vp->v_mount->mnt_stat.f_iosize; 1758 1759 if (bp->b_npages < desiredpages) { 1760 obj = vp->v_object; 1761 tinc = PAGE_SIZE; 1762 1763 off = bp->b_offset; 1764 KASSERT(bp->b_offset != NOOFFSET, 1765 ("allocbuf: no buffer offset")); 1766 curbpnpages = bp->b_npages; 1767 doretry: 1768 bp->b_validoff = orig_validoff; 1769 bp->b_validend = orig_validend; 1770 bp->b_flags |= B_CACHE; 1771 for (toff = 0; toff < newbsize; toff += tinc) { 1772 objoff = OFF_TO_IDX(off + toff); 1773 pageindex = objoff - OFF_TO_IDX(off); 1774 tinc = PAGE_SIZE - ((off + toff) & PAGE_MASK); 1775 if (pageindex < curbpnpages) { 1776 1777 m = bp->b_pages[pageindex]; 1778#ifdef VFS_BIO_DIAG 1779 if (m->pindex != objoff) 1780 panic("allocbuf: page changed offset?!!!?"); 1781#endif 1782 if (tinc > (newbsize - toff)) 1783 tinc = newbsize - toff; 1784 if (bp->b_flags & B_CACHE) 1785 vfs_buf_set_valid(bp, off, toff, tinc, m); 1786 continue; 1787 } 1788 m = vm_page_lookup(obj, objoff); 1789 if (!m) { 1790 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1791 if (!m) { 1792 VM_WAIT; 1793 vm_pageout_deficit += (desiredpages - curbpnpages); 1794 goto doretry; 1795 } 1796 1797 vm_page_wire(m); 1798 vm_page_wakeup(m); 1799 bp->b_flags &= ~B_CACHE; 1800 1801 } else if (vm_page_sleep_busy(m, FALSE, "pgtblk")) { 1802 /* 1803 * If we had to sleep, retry. 1804 * 1805 * Also note that we only test 1806 * PG_BUSY here, not m->busy. 1807 * 1808 * We cannot sleep on m->busy 1809 * here because a vm_fault -> 1810 * getpages -> cluster-read -> 1811 * ...-> allocbuf sequence 1812 * will convert PG_BUSY to 1813 * m->busy so we have to let 1814 * m->busy through if we do 1815 * not want to deadlock. 1816 */ 1817 goto doretry; 1818 } else { 1819 if ((curproc != pageproc) && 1820 ((m->queue - m->pc) == PQ_CACHE) && 1821 ((cnt.v_free_count + cnt.v_cache_count) < 1822 (cnt.v_free_min + cnt.v_cache_min))) { 1823 pagedaemon_wakeup(); 1824 } 1825 if (tinc > (newbsize - toff)) 1826 tinc = newbsize - toff; 1827 if (bp->b_flags & B_CACHE) 1828 vfs_buf_set_valid(bp, off, toff, tinc, m); 1829 vm_page_flag_clear(m, PG_ZERO); 1830 vm_page_wire(m); 1831 } 1832 bp->b_pages[pageindex] = m; 1833 curbpnpages = pageindex + 1; 1834 } 1835 if (vp->v_tag == VT_NFS && 1836 vp->v_type != VBLK) { 1837 if (bp->b_dirtyend > 0) { 1838 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1839 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1840 } 1841 if (bp->b_validend == 0) 1842 bp->b_flags &= ~B_CACHE; 1843 } 1844 bp->b_data = (caddr_t) trunc_page((vm_offset_t)bp->b_data); 1845 bp->b_npages = curbpnpages; 1846 pmap_qenter((vm_offset_t) bp->b_data, 1847 bp->b_pages, bp->b_npages); 1848 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1849 } 1850 } 1851 } 1852 if (bp->b_flags & B_VMIO) 1853 vmiospace += (newbsize - bp->b_bufsize); 1854 bufspace += (newbsize - bp->b_bufsize); 1855 bp->b_bufsize = newbsize; 1856 bp->b_bcount = size; 1857 return 1; 1858} 1859 1860/* 1861 * Wait for buffer I/O completion, returning error status. 1862 */ 1863int 1864biowait(register struct buf * bp) 1865{ 1866 int s; 1867 1868 s = splbio(); 1869 while ((bp->b_flags & B_DONE) == 0) 1870#if defined(NO_SCHEDULE_MODS) 1871 tsleep(bp, PRIBIO, "biowait", 0); 1872#else 1873 if (bp->b_flags & B_READ) 1874 tsleep(bp, PRIBIO, "biord", 0); 1875 else 1876 tsleep(bp, PRIBIO, "biowr", 0); 1877#endif 1878 splx(s); 1879 if (bp->b_flags & B_EINTR) { 1880 bp->b_flags &= ~B_EINTR; 1881 return (EINTR); 1882 } 1883 if (bp->b_flags & B_ERROR) { 1884 return (bp->b_error ? bp->b_error : EIO); 1885 } else { 1886 return (0); 1887 } 1888} 1889 1890/* 1891 * Finish I/O on a buffer, calling an optional function. 1892 * This is usually called from interrupt level, so process blocking 1893 * is not *a good idea*. 1894 */ 1895void 1896biodone(register struct buf * bp) 1897{ 1898 int s; 1899 1900 s = splbio(); 1901 1902#if !defined(MAX_PERF) 1903 if (!(bp->b_flags & B_BUSY)) 1904 panic("biodone: buffer not busy"); 1905#endif 1906 1907 if (bp->b_flags & B_DONE) { 1908 splx(s); 1909#if !defined(MAX_PERF) 1910 printf("biodone: buffer already done\n"); 1911#endif 1912 return; 1913 } 1914 bp->b_flags |= B_DONE; 1915 1916 if (bp->b_flags & B_FREEBUF) { 1917 brelse(bp); 1918 splx(s); 1919 return; 1920 } 1921 1922 if ((bp->b_flags & B_READ) == 0) { 1923 vwakeup(bp); 1924 } 1925 1926 /* call optional completion function if requested */ 1927 if (bp->b_flags & B_CALL) { 1928 bp->b_flags &= ~B_CALL; 1929 (*bp->b_iodone) (bp); 1930 splx(s); 1931 return; 1932 } 1933 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1934 (*bioops.io_complete)(bp); 1935 1936 if (bp->b_flags & B_VMIO) { 1937 int i, resid; 1938 vm_ooffset_t foff; 1939 vm_page_t m; 1940 vm_object_t obj; 1941 int iosize; 1942 struct vnode *vp = bp->b_vp; 1943 1944 obj = vp->v_object; 1945 1946#if defined(VFS_BIO_DEBUG) 1947 if (vp->v_usecount == 0) { 1948 panic("biodone: zero vnode ref count"); 1949 } 1950 1951 if (vp->v_object == NULL) { 1952 panic("biodone: missing VM object"); 1953 } 1954 1955 if ((vp->v_flag & VOBJBUF) == 0) { 1956 panic("biodone: vnode is not setup for merged cache"); 1957 } 1958#endif 1959 1960 foff = bp->b_offset; 1961 KASSERT(bp->b_offset != NOOFFSET, 1962 ("biodone: no buffer offset")); 1963 1964#if !defined(MAX_PERF) 1965 if (!obj) { 1966 panic("biodone: no object"); 1967 } 1968#endif 1969#if defined(VFS_BIO_DEBUG) 1970 if (obj->paging_in_progress < bp->b_npages) { 1971 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1972 obj->paging_in_progress, bp->b_npages); 1973 } 1974#endif 1975 iosize = bp->b_bufsize; 1976 for (i = 0; i < bp->b_npages; i++) { 1977 int bogusflag = 0; 1978 m = bp->b_pages[i]; 1979 if (m == bogus_page) { 1980 bogusflag = 1; 1981 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1982 if (!m) { 1983#if defined(VFS_BIO_DEBUG) 1984 printf("biodone: page disappeared\n"); 1985#endif 1986 vm_object_pip_subtract(obj, 1); 1987 continue; 1988 } 1989 bp->b_pages[i] = m; 1990 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 1991 } 1992#if defined(VFS_BIO_DEBUG) 1993 if (OFF_TO_IDX(foff) != m->pindex) { 1994 printf( 1995"biodone: foff(%lu)/m->pindex(%d) mismatch\n", 1996 (unsigned long)foff, m->pindex); 1997 } 1998#endif 1999 resid = IDX_TO_OFF(m->pindex + 1) - foff; 2000 if (resid > iosize) 2001 resid = iosize; 2002 2003 /* 2004 * In the write case, the valid and clean bits are 2005 * already changed correctly, so we only need to do this 2006 * here in the read case. 2007 */ 2008 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 2009 vfs_page_set_valid(bp, foff, i, m); 2010 } 2011 vm_page_flag_clear(m, PG_ZERO); 2012 2013 /* 2014 * when debugging new filesystems or buffer I/O methods, this 2015 * is the most common error that pops up. if you see this, you 2016 * have not set the page busy flag correctly!!! 2017 */ 2018 if (m->busy == 0) { 2019#if !defined(MAX_PERF) 2020 printf("biodone: page busy < 0, " 2021 "pindex: %d, foff: 0x(%x,%x), " 2022 "resid: %d, index: %d\n", 2023 (int) m->pindex, (int)(foff >> 32), 2024 (int) foff & 0xffffffff, resid, i); 2025#endif 2026 if (vp->v_type != VBLK) 2027#if !defined(MAX_PERF) 2028 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2029 bp->b_vp->v_mount->mnt_stat.f_iosize, 2030 (int) bp->b_lblkno, 2031 bp->b_flags, bp->b_npages); 2032 else 2033 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2034 (int) bp->b_lblkno, 2035 bp->b_flags, bp->b_npages); 2036 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2037 m->valid, m->dirty, m->wire_count); 2038#endif 2039 panic("biodone: page busy < 0\n"); 2040 } 2041 vm_page_io_finish(m); 2042 vm_object_pip_subtract(obj, 1); 2043 foff += resid; 2044 iosize -= resid; 2045 } 2046 if (obj) 2047 vm_object_pip_wakeupn(obj, 0); 2048 } 2049 /* 2050 * For asynchronous completions, release the buffer now. The brelse 2051 * checks for B_WANTED and will do the wakeup there if necessary - so 2052 * no need to do a wakeup here in the async case. 2053 */ 2054 2055 if (bp->b_flags & B_ASYNC) { 2056 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2057 brelse(bp); 2058 else 2059 bqrelse(bp); 2060 } else { 2061 bp->b_flags &= ~B_WANTED; 2062 wakeup(bp); 2063 } 2064 splx(s); 2065} 2066 2067#if 0 /* not with kirks code */ 2068static int vfs_update_interval = 30; 2069 2070static void 2071vfs_update() 2072{ 2073 while (1) { 2074 tsleep(&vfs_update_wakeup, PUSER, "update", 2075 hz * vfs_update_interval); 2076 vfs_update_wakeup = 0; 2077 sync(curproc, NULL); 2078 } 2079} 2080 2081static int 2082sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2083{ 2084 int error = sysctl_handle_int(oidp, 2085 oidp->oid_arg1, oidp->oid_arg2, req); 2086 if (!error) 2087 wakeup(&vfs_update_wakeup); 2088 return error; 2089} 2090 2091SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2092 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2093 2094#endif 2095 2096 2097/* 2098 * This routine is called in lieu of iodone in the case of 2099 * incomplete I/O. This keeps the busy status for pages 2100 * consistant. 2101 */ 2102void 2103vfs_unbusy_pages(struct buf * bp) 2104{ 2105 int i; 2106 2107 if (bp->b_flags & B_VMIO) { 2108 struct vnode *vp = bp->b_vp; 2109 vm_object_t obj = vp->v_object; 2110 2111 for (i = 0; i < bp->b_npages; i++) { 2112 vm_page_t m = bp->b_pages[i]; 2113 2114 if (m == bogus_page) { 2115 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2116#if !defined(MAX_PERF) 2117 if (!m) { 2118 panic("vfs_unbusy_pages: page missing\n"); 2119 } 2120#endif 2121 bp->b_pages[i] = m; 2122 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2123 } 2124 vm_object_pip_subtract(obj, 1); 2125 vm_page_flag_clear(m, PG_ZERO); 2126 vm_page_io_finish(m); 2127 } 2128 vm_object_pip_wakeupn(obj, 0); 2129 } 2130} 2131 2132/* 2133 * Set NFS' b_validoff and b_validend fields from the valid bits 2134 * of a page. If the consumer is not NFS, and the page is not 2135 * valid for the entire range, clear the B_CACHE flag to force 2136 * the consumer to re-read the page. 2137 * 2138 * B_CACHE interaction is especially tricky. 2139 */ 2140static void 2141vfs_buf_set_valid(struct buf *bp, 2142 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2143 vm_page_t m) 2144{ 2145 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2146 vm_offset_t svalid, evalid; 2147 int validbits = m->valid >> (((foff+off)&PAGE_MASK)/DEV_BSIZE); 2148 2149 /* 2150 * This only bothers with the first valid range in the 2151 * page. 2152 */ 2153 svalid = off; 2154 while (validbits && !(validbits & 1)) { 2155 svalid += DEV_BSIZE; 2156 validbits >>= 1; 2157 } 2158 evalid = svalid; 2159 while (validbits & 1) { 2160 evalid += DEV_BSIZE; 2161 validbits >>= 1; 2162 } 2163 evalid = min(evalid, off + size); 2164 /* 2165 * We can only set b_validoff/end if this range is contiguous 2166 * with the range built up already. If we cannot set 2167 * b_validoff/end, we must clear B_CACHE to force an update 2168 * to clean the bp up. 2169 */ 2170 if (svalid == bp->b_validend) { 2171 bp->b_validoff = min(bp->b_validoff, svalid); 2172 bp->b_validend = max(bp->b_validend, evalid); 2173 } else { 2174 bp->b_flags &= ~B_CACHE; 2175 } 2176 } else if (!vm_page_is_valid(m, 2177 (vm_offset_t) ((foff + off) & PAGE_MASK), 2178 size)) { 2179 bp->b_flags &= ~B_CACHE; 2180 } 2181} 2182 2183/* 2184 * Set the valid bits in a page, taking care of the b_validoff, 2185 * b_validend fields which NFS uses to optimise small reads. Off is 2186 * the offset within the file and pageno is the page index within the buf. 2187 * 2188 * XXX we have to set the valid & clean bits for all page fragments 2189 * touched by b_validoff/validend, even if the page fragment goes somewhat 2190 * beyond b_validoff/validend due to alignment. 2191 */ 2192static void 2193vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2194{ 2195 struct vnode *vp = bp->b_vp; 2196 vm_ooffset_t soff, eoff; 2197 2198 soff = off; 2199 eoff = (off + PAGE_SIZE) & ~PAGE_MASK; 2200 if (eoff > bp->b_offset + bp->b_bufsize) 2201 eoff = bp->b_offset + bp->b_bufsize; 2202 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2203 vm_ooffset_t sv, ev; 2204 vm_page_set_invalid(m, 2205 (vm_offset_t) (soff & PAGE_MASK), 2206 (vm_offset_t) (eoff - soff)); 2207 sv = (bp->b_offset + bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2208 ev = (bp->b_offset + bp->b_validend + (DEV_BSIZE - 1)) & 2209 ~(DEV_BSIZE - 1); 2210 soff = qmax(sv, soff); 2211 eoff = qmin(ev, eoff); 2212 } 2213 if (eoff > soff) 2214 vm_page_set_validclean(m, 2215 (vm_offset_t) (soff & PAGE_MASK), 2216 (vm_offset_t) (eoff - soff)); 2217} 2218 2219/* 2220 * This routine is called before a device strategy routine. 2221 * It is used to tell the VM system that paging I/O is in 2222 * progress, and treat the pages associated with the buffer 2223 * almost as being PG_BUSY. Also the object paging_in_progress 2224 * flag is handled to make sure that the object doesn't become 2225 * inconsistant. 2226 */ 2227void 2228vfs_busy_pages(struct buf * bp, int clear_modify) 2229{ 2230 int i, bogus; 2231 2232 if (bp->b_flags & B_VMIO) { 2233 struct vnode *vp = bp->b_vp; 2234 vm_object_t obj = vp->v_object; 2235 vm_ooffset_t foff; 2236 2237 foff = bp->b_offset; 2238 KASSERT(bp->b_offset != NOOFFSET, 2239 ("vfs_busy_pages: no buffer offset")); 2240 vfs_setdirty(bp); 2241 2242retry: 2243 for (i = 0; i < bp->b_npages; i++) { 2244 vm_page_t m = bp->b_pages[i]; 2245 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 2246 goto retry; 2247 } 2248 2249 bogus = 0; 2250 for (i = 0; i < bp->b_npages; i++) { 2251 vm_page_t m = bp->b_pages[i]; 2252 2253 vm_page_flag_clear(m, PG_ZERO); 2254 if ((bp->b_flags & B_CLUSTER) == 0) { 2255 vm_object_pip_add(obj, 1); 2256 vm_page_io_start(m); 2257 } 2258 2259 vm_page_protect(m, VM_PROT_NONE); 2260 if (clear_modify) 2261 vfs_page_set_valid(bp, foff, i, m); 2262 else if (m->valid == VM_PAGE_BITS_ALL && 2263 (bp->b_flags & B_CACHE) == 0) { 2264 bp->b_pages[i] = bogus_page; 2265 bogus++; 2266 } 2267 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2268 } 2269 if (bogus) 2270 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2271 } 2272} 2273 2274/* 2275 * Tell the VM system that the pages associated with this buffer 2276 * are clean. This is used for delayed writes where the data is 2277 * going to go to disk eventually without additional VM intevention. 2278 */ 2279void 2280vfs_clean_pages(struct buf * bp) 2281{ 2282 int i; 2283 2284 if (bp->b_flags & B_VMIO) { 2285 vm_ooffset_t foff; 2286 foff = bp->b_offset; 2287 KASSERT(bp->b_offset != NOOFFSET, 2288 ("vfs_clean_pages: no buffer offset")); 2289 for (i = 0; i < bp->b_npages; i++) { 2290 vm_page_t m = bp->b_pages[i]; 2291 vfs_page_set_valid(bp, foff, i, m); 2292 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2293 } 2294 } 2295} 2296 2297void 2298vfs_bio_clrbuf(struct buf *bp) { 2299 int i, mask = 0; 2300 caddr_t sa, ea; 2301 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2302 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 2303 (bp->b_offset & PAGE_MASK) == 0) { 2304 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 2305 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2306 ((bp->b_pages[0]->valid & mask) != mask)) { 2307 bzero(bp->b_data, bp->b_bufsize); 2308 } 2309 bp->b_pages[0]->valid |= mask; 2310 bp->b_resid = 0; 2311 return; 2312 } 2313 ea = sa = bp->b_data; 2314 for(i=0;i<bp->b_npages;i++,sa=ea) { 2315 int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE; 2316 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 2317 ea = (caddr_t)ulmin((u_long)ea, 2318 (u_long)bp->b_data + bp->b_bufsize); 2319 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 2320 if ((bp->b_pages[i]->valid & mask) == mask) 2321 continue; 2322 if ((bp->b_pages[i]->valid & mask) == 0) { 2323 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2324 bzero(sa, ea - sa); 2325 } 2326 } else { 2327 for (; sa < ea; sa += DEV_BSIZE, j++) { 2328 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2329 (bp->b_pages[i]->valid & (1<<j)) == 0) 2330 bzero(sa, DEV_BSIZE); 2331 } 2332 } 2333 bp->b_pages[i]->valid |= mask; 2334 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2335 } 2336 bp->b_resid = 0; 2337 } else { 2338 clrbuf(bp); 2339 } 2340} 2341 2342/* 2343 * vm_hold_load_pages and vm_hold_unload pages get pages into 2344 * a buffers address space. The pages are anonymous and are 2345 * not associated with a file object. 2346 */ 2347void 2348vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2349{ 2350 vm_offset_t pg; 2351 vm_page_t p; 2352 int index; 2353 2354 to = round_page(to); 2355 from = round_page(from); 2356 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2357 2358 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2359 2360tryagain: 2361 2362 p = vm_page_alloc(kernel_object, 2363 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2364 VM_ALLOC_NORMAL); 2365 if (!p) { 2366 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2367 VM_WAIT; 2368 goto tryagain; 2369 } 2370 vm_page_wire(p); 2371 p->valid = VM_PAGE_BITS_ALL; 2372 vm_page_flag_clear(p, PG_ZERO); 2373 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2374 bp->b_pages[index] = p; 2375 vm_page_wakeup(p); 2376 } 2377 bp->b_npages = index; 2378} 2379 2380void 2381vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2382{ 2383 vm_offset_t pg; 2384 vm_page_t p; 2385 int index, newnpages; 2386 2387 from = round_page(from); 2388 to = round_page(to); 2389 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2390 2391 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2392 p = bp->b_pages[index]; 2393 if (p && (index < bp->b_npages)) { 2394#if !defined(MAX_PERF) 2395 if (p->busy) { 2396 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2397 bp->b_blkno, bp->b_lblkno); 2398 } 2399#endif 2400 bp->b_pages[index] = NULL; 2401 pmap_kremove(pg); 2402 vm_page_busy(p); 2403 vm_page_unwire(p, 0); 2404 vm_page_free(p); 2405 } 2406 } 2407 bp->b_npages = newnpages; 2408} 2409 2410 2411#include "opt_ddb.h" 2412#ifdef DDB 2413#include <ddb/ddb.h> 2414 2415DB_SHOW_COMMAND(buffer, db_show_buffer) 2416{ 2417 /* get args */ 2418 struct buf *bp = (struct buf *)addr; 2419 2420 if (!have_addr) { 2421 db_printf("usage: show buffer <addr>\n"); 2422 return; 2423 } 2424 2425 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2426 (u_int)bp->b_flags, PRINT_BUF_FLAGS); 2427 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2428 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2429 "b_blkno = %d, b_pblkno = %d\n", 2430 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2431 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2432 if (bp->b_npages) { 2433 int i; 2434 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2435 for (i = 0; i < bp->b_npages; i++) { 2436 vm_page_t m; 2437 m = bp->b_pages[i]; 2438 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 2439 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 2440 if ((i + 1) < bp->b_npages) 2441 db_printf(","); 2442 } 2443 db_printf("\n"); 2444 } 2445} 2446#endif /* DDB */ 2447