vfs_bio.c revision 45347
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.203 1999/03/19 10:17:44 bde Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <miscfs/specfs/specdev.h> 41#include <vm/vm.h> 42#include <vm/vm_param.h> 43#include <vm/vm_prot.h> 44#include <vm/vm_kern.h> 45#include <vm/vm_pageout.h> 46#include <vm/vm_page.h> 47#include <vm/vm_object.h> 48#include <vm/vm_extern.h> 49#include <vm/vm_map.h> 50#include <sys/buf.h> 51#include <sys/mount.h> 52#include <sys/malloc.h> 53#include <sys/resourcevar.h> 54 55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56 57struct bio_ops bioops; /* I/O operation notification */ 58 59#if 0 /* replaced bu sched_sync */ 60static void vfs_update __P((void)); 61static struct proc *updateproc; 62static struct kproc_desc up_kp = { 63 "update", 64 vfs_update, 65 &updateproc 66}; 67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 68#endif 69 70struct buf *buf; /* buffer header pool */ 71struct swqueue bswlist; 72 73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 74 vm_offset_t to); 75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 78 vm_offset_t off, vm_offset_t size, 79 vm_page_t m); 80static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 81 int pageno, vm_page_t m); 82static void vfs_clean_pages(struct buf * bp); 83static void vfs_setdirty(struct buf *bp); 84static void vfs_vmio_release(struct buf *bp); 85static void flushdirtybuffers(int slpflag, int slptimeo); 86static int flushbufqueues(void); 87 88/* 89 * Internal update daemon, process 3 90 * The variable vfs_update_wakeup allows for internal syncs. 91 */ 92int vfs_update_wakeup; 93 94/* 95 * bogus page -- for I/O to/from partially complete buffers 96 * this is a temporary solution to the problem, but it is not 97 * really that bad. it would be better to split the buffer 98 * for input in the case of buffers partially already in memory, 99 * but the code is intricate enough already. 100 */ 101vm_page_t bogus_page; 102int runningbufspace; 103static vm_offset_t bogus_offset; 104 105static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 106 bufmallocspace, maxbufmallocspace, hibufspace; 107static int needsbuffer; 108static int numdirtybuffers, lodirtybuffers, hidirtybuffers; 109static int numfreebuffers, lofreebuffers, hifreebuffers; 110static int kvafreespace; 111 112SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 113 &numdirtybuffers, 0, ""); 114SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 115 &lodirtybuffers, 0, ""); 116SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 117 &hidirtybuffers, 0, ""); 118SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 119 &numfreebuffers, 0, ""); 120SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 121 &lofreebuffers, 0, ""); 122SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 123 &hifreebuffers, 0, ""); 124SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, 125 &runningbufspace, 0, ""); 126SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 127 &maxbufspace, 0, ""); 128SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, 129 &hibufspace, 0, ""); 130SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 131 &bufspace, 0, ""); 132SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 133 &maxvmiobufspace, 0, ""); 134SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 135 &vmiospace, 0, ""); 136SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 137 &maxbufmallocspace, 0, ""); 138SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 139 &bufmallocspace, 0, ""); 140SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 141 &kvafreespace, 0, ""); 142 143static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 144struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } }; 145 146extern int vm_swap_size; 147 148#define BUF_MAXUSE 24 149 150#define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 151#define VFS_BIO_NEED_RESERVED02 0x02 /* unused */ 152#define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 153#define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 154#define VFS_BIO_NEED_KVASPACE 0x10 /* wait for buffer_map space, emerg */ 155 156/* 157 * kvaspacewakeup: 158 * 159 * Called when kva space is potential available for recovery or when 160 * kva space is recovered in the buffer_map. This function wakes up 161 * anyone waiting for buffer_map kva space. Even though the buffer_map 162 * is larger then maxbufspace, this situation will typically occur 163 * when the buffer_map gets fragmented. 164 */ 165 166static __inline void 167kvaspacewakeup(void) 168{ 169 /* 170 * If someone is waiting for KVA space, wake them up. Even 171 * though we haven't freed the kva space yet, the waiting 172 * process will be able to now. 173 */ 174 if (needsbuffer & VFS_BIO_NEED_KVASPACE) { 175 needsbuffer &= ~VFS_BIO_NEED_KVASPACE; 176 wakeup(&needsbuffer); 177 } 178} 179 180/* 181 * bufspacewakeup: 182 * 183 * Called when buffer space is potentially available for recovery or when 184 * buffer space is recovered. getnewbuf() will block on this flag when 185 * it is unable to free sufficient buffer space. Buffer space becomes 186 * recoverable when bp's get placed back in the queues. 187 */ 188 189static __inline void 190bufspacewakeup(void) 191{ 192 /* 193 * If someone is waiting for BUF space, wake them up. Even 194 * though we haven't freed the kva space yet, the waiting 195 * process will be able to now. 196 */ 197 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 198 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 199 wakeup(&needsbuffer); 200 } 201} 202 203/* 204 * bufcountwakeup: 205 * 206 * Called when a buffer has been added to one of the free queues to 207 * account for the buffer and to wakeup anyone waiting for free buffers. 208 * This typically occurs when large amounts of metadata are being handled 209 * by the buffer cache ( else buffer space runs out first, usually ). 210 */ 211 212static __inline void 213bufcountwakeup(void) 214{ 215 ++numfreebuffers; 216 if (needsbuffer) { 217 needsbuffer &= ~VFS_BIO_NEED_ANY; 218 if (numfreebuffers >= hifreebuffers) 219 needsbuffer &= ~VFS_BIO_NEED_FREE; 220 wakeup(&needsbuffer); 221 } 222} 223 224/* 225 * Initialize buffer headers and related structures. 226 */ 227void 228bufinit() 229{ 230 struct buf *bp; 231 int i; 232 233 TAILQ_INIT(&bswlist); 234 LIST_INIT(&invalhash); 235 236 /* first, make a null hash table */ 237 for (i = 0; i < BUFHSZ; i++) 238 LIST_INIT(&bufhashtbl[i]); 239 240 /* next, make a null set of free lists */ 241 for (i = 0; i < BUFFER_QUEUES; i++) 242 TAILQ_INIT(&bufqueues[i]); 243 244 /* finally, initialize each buffer header and stick on empty q */ 245 for (i = 0; i < nbuf; i++) { 246 bp = &buf[i]; 247 bzero(bp, sizeof *bp); 248 bp->b_flags = B_INVAL; /* we're just an empty header */ 249 bp->b_dev = NODEV; 250 bp->b_rcred = NOCRED; 251 bp->b_wcred = NOCRED; 252 bp->b_qindex = QUEUE_EMPTY; 253 bp->b_xflags = 0; 254 LIST_INIT(&bp->b_dep); 255 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 256 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 257 } 258 259 /* 260 * maxbufspace is currently calculated to support all filesystem 261 * blocks to be 8K. If you happen to use a 16K filesystem, the size 262 * of the buffer cache is still the same as it would be for 8K 263 * filesystems. This keeps the size of the buffer cache "in check" 264 * for big block filesystems. 265 * 266 * maxbufspace is calculated as around 50% of the KVA available in 267 * the buffer_map ( DFLTSIZE vs BKVASIZE ), I presume to reduce the 268 * effect of fragmentation. 269 */ 270 maxbufspace = (nbuf + 8) * DFLTBSIZE; 271 if ((hibufspace = maxbufspace - MAXBSIZE * 5) <= MAXBSIZE) 272 hibufspace = 3 * maxbufspace / 4; 273/* 274 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 275 */ 276 maxvmiobufspace = 2 * hibufspace / 3; 277/* 278 * Limit the amount of malloc memory since it is wired permanently into 279 * the kernel space. Even though this is accounted for in the buffer 280 * allocation, we don't want the malloced region to grow uncontrolled. 281 * The malloc scheme improves memory utilization significantly on average 282 * (small) directories. 283 */ 284 maxbufmallocspace = hibufspace / 20; 285 286/* 287 * Reduce the chance of a deadlock occuring by limiting the number 288 * of delayed-write dirty buffers we allow to stack up. 289 */ 290 lodirtybuffers = nbuf / 16 + 10; 291 hidirtybuffers = nbuf / 8 + 20; 292 numdirtybuffers = 0; 293 294/* 295 * Try to keep the number of free buffers in the specified range, 296 * and give the syncer access to an emergency reserve. 297 */ 298 lofreebuffers = nbuf / 18 + 5; 299 hifreebuffers = 2 * lofreebuffers; 300 numfreebuffers = nbuf; 301 302 kvafreespace = 0; 303 304 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 305 bogus_page = vm_page_alloc(kernel_object, 306 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 307 VM_ALLOC_NORMAL); 308 309} 310 311/* 312 * Free the kva allocation for a buffer 313 * Must be called only at splbio or higher, 314 * as this is the only locking for buffer_map. 315 */ 316static void 317bfreekva(struct buf * bp) 318{ 319 if (bp->b_kvasize) { 320 vm_map_delete(buffer_map, 321 (vm_offset_t) bp->b_kvabase, 322 (vm_offset_t) bp->b_kvabase + bp->b_kvasize 323 ); 324 bp->b_kvasize = 0; 325 kvaspacewakeup(); 326 } 327} 328 329/* 330 * bremfree: 331 * 332 * Remove the buffer from the appropriate free list. 333 */ 334void 335bremfree(struct buf * bp) 336{ 337 int s = splbio(); 338 int old_qindex = bp->b_qindex; 339 340 if (bp->b_qindex != QUEUE_NONE) { 341 if (bp->b_qindex == QUEUE_EMPTY) { 342 kvafreespace -= bp->b_kvasize; 343 } 344 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 345 bp->b_qindex = QUEUE_NONE; 346 runningbufspace += bp->b_bufsize; 347 } else { 348#if !defined(MAX_PERF) 349 panic("bremfree: removing a buffer when not on a queue"); 350#endif 351 } 352 353 /* 354 * Fixup numfreebuffers count. If the buffer is invalid or not 355 * delayed-write, and it was on the EMPTY, LRU, or AGE queues, 356 * the buffer was free and we must decrement numfreebuffers. 357 */ 358 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 359 switch(old_qindex) { 360 case QUEUE_EMPTY: 361 case QUEUE_LRU: 362 case QUEUE_AGE: 363 --numfreebuffers; 364 break; 365 default: 366 break; 367 } 368 } 369 splx(s); 370} 371 372 373/* 374 * Get a buffer with the specified data. Look in the cache first. 375 */ 376int 377bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 378 struct buf ** bpp) 379{ 380 struct buf *bp; 381 382 bp = getblk(vp, blkno, size, 0, 0); 383 *bpp = bp; 384 385 /* if not found in cache, do some I/O */ 386 if ((bp->b_flags & B_CACHE) == 0) { 387 if (curproc != NULL) 388 curproc->p_stats->p_ru.ru_inblock++; 389 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); 390 bp->b_flags |= B_READ; 391 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 392 if (bp->b_rcred == NOCRED) { 393 if (cred != NOCRED) 394 crhold(cred); 395 bp->b_rcred = cred; 396 } 397 vfs_busy_pages(bp, 0); 398 VOP_STRATEGY(vp, bp); 399 return (biowait(bp)); 400 } 401 return (0); 402} 403 404/* 405 * Operates like bread, but also starts asynchronous I/O on 406 * read-ahead blocks. 407 */ 408int 409breadn(struct vnode * vp, daddr_t blkno, int size, 410 daddr_t * rablkno, int *rabsize, 411 int cnt, struct ucred * cred, struct buf ** bpp) 412{ 413 struct buf *bp, *rabp; 414 int i; 415 int rv = 0, readwait = 0; 416 417 *bpp = bp = getblk(vp, blkno, size, 0, 0); 418 419 /* if not found in cache, do some I/O */ 420 if ((bp->b_flags & B_CACHE) == 0) { 421 if (curproc != NULL) 422 curproc->p_stats->p_ru.ru_inblock++; 423 bp->b_flags |= B_READ; 424 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 425 if (bp->b_rcred == NOCRED) { 426 if (cred != NOCRED) 427 crhold(cred); 428 bp->b_rcred = cred; 429 } 430 vfs_busy_pages(bp, 0); 431 VOP_STRATEGY(vp, bp); 432 ++readwait; 433 } 434 435 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 436 if (inmem(vp, *rablkno)) 437 continue; 438 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 439 440 if ((rabp->b_flags & B_CACHE) == 0) { 441 if (curproc != NULL) 442 curproc->p_stats->p_ru.ru_inblock++; 443 rabp->b_flags |= B_READ | B_ASYNC; 444 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 445 if (rabp->b_rcred == NOCRED) { 446 if (cred != NOCRED) 447 crhold(cred); 448 rabp->b_rcred = cred; 449 } 450 vfs_busy_pages(rabp, 0); 451 VOP_STRATEGY(vp, rabp); 452 } else { 453 brelse(rabp); 454 } 455 } 456 457 if (readwait) { 458 rv = biowait(bp); 459 } 460 return (rv); 461} 462 463/* 464 * Write, release buffer on completion. (Done by iodone 465 * if async.) 466 */ 467int 468bwrite(struct buf * bp) 469{ 470 int oldflags, s; 471 struct vnode *vp; 472 struct mount *mp; 473 474 if (bp->b_flags & B_INVAL) { 475 brelse(bp); 476 return (0); 477 } 478 479 oldflags = bp->b_flags; 480 481#if !defined(MAX_PERF) 482 if ((bp->b_flags & B_BUSY) == 0) 483 panic("bwrite: buffer is not busy???"); 484#endif 485 s = splbio(); 486 bundirty(bp); 487 488 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 489 bp->b_flags |= B_WRITEINPROG; 490 491 bp->b_vp->v_numoutput++; 492 vfs_busy_pages(bp, 1); 493 if (curproc != NULL) 494 curproc->p_stats->p_ru.ru_oublock++; 495 splx(s); 496 VOP_STRATEGY(bp->b_vp, bp); 497 498 /* 499 * Collect statistics on synchronous and asynchronous writes. 500 * Writes to block devices are charged to their associated 501 * filesystem (if any). 502 */ 503 if ((vp = bp->b_vp) != NULL) { 504 if (vp->v_type == VBLK) 505 mp = vp->v_specmountpoint; 506 else 507 mp = vp->v_mount; 508 if (mp != NULL) 509 if ((oldflags & B_ASYNC) == 0) 510 mp->mnt_stat.f_syncwrites++; 511 else 512 mp->mnt_stat.f_asyncwrites++; 513 } 514 515 if ((oldflags & B_ASYNC) == 0) { 516 int rtval = biowait(bp); 517 brelse(bp); 518 return (rtval); 519 } 520 521 return (0); 522} 523 524/* 525 * Delayed write. (Buffer is marked dirty). 526 */ 527void 528bdwrite(struct buf * bp) 529{ 530 struct vnode *vp; 531 532#if !defined(MAX_PERF) 533 if ((bp->b_flags & B_BUSY) == 0) { 534 panic("bdwrite: buffer is not busy"); 535 } 536#endif 537 538 if (bp->b_flags & B_INVAL) { 539 brelse(bp); 540 return; 541 } 542 bdirty(bp); 543 544 /* 545 * This bmap keeps the system from needing to do the bmap later, 546 * perhaps when the system is attempting to do a sync. Since it 547 * is likely that the indirect block -- or whatever other datastructure 548 * that the filesystem needs is still in memory now, it is a good 549 * thing to do this. Note also, that if the pageout daemon is 550 * requesting a sync -- there might not be enough memory to do 551 * the bmap then... So, this is important to do. 552 */ 553 if (bp->b_lblkno == bp->b_blkno) { 554 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 555 } 556 557 /* 558 * Set the *dirty* buffer range based upon the VM system dirty pages. 559 */ 560 vfs_setdirty(bp); 561 562 /* 563 * We need to do this here to satisfy the vnode_pager and the 564 * pageout daemon, so that it thinks that the pages have been 565 * "cleaned". Note that since the pages are in a delayed write 566 * buffer -- the VFS layer "will" see that the pages get written 567 * out on the next sync, or perhaps the cluster will be completed. 568 */ 569 vfs_clean_pages(bp); 570 bqrelse(bp); 571 572 /* 573 * XXX The soft dependency code is not prepared to 574 * have I/O done when a bdwrite is requested. For 575 * now we just let the write be delayed if it is 576 * requested by the soft dependency code. 577 */ 578 if ((vp = bp->b_vp) && 579 ((vp->v_type == VBLK && vp->v_specmountpoint && 580 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 581 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)))) 582 return; 583 584 if (numdirtybuffers >= hidirtybuffers) 585 flushdirtybuffers(0, 0); 586} 587 588/* 589 * bdirty: 590 * 591 * Turn buffer into delayed write request. We must clear B_READ and 592 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 593 * itself to properly update it in the dirty/clean lists. We mark it 594 * B_DONE to ensure that any asynchronization of the buffer properly 595 * clears B_DONE ( else a panic will occur later ). Note that B_INVALID 596 * buffers are not considered dirty even if B_DELWRI is set. 597 * 598 * Since the buffer is not on a queue, we do not update the numfreebuffers 599 * count. 600 * 601 * Must be called at splbio(). 602 * The buffer must be on QUEUE_NONE. 603 */ 604void 605bdirty(bp) 606 struct buf *bp; 607{ 608 KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 609 bp->b_flags &= ~(B_READ|B_RELBUF); 610 611 if ((bp->b_flags & B_DELWRI) == 0) { 612 bp->b_flags |= B_DONE | B_DELWRI; 613 reassignbuf(bp, bp->b_vp); 614 ++numdirtybuffers; 615 } 616} 617 618/* 619 * bundirty: 620 * 621 * Clear B_DELWRI for buffer. 622 * 623 * Since the buffer is not on a queue, we do not update the numfreebuffers 624 * count. 625 * 626 * Must be called at splbio(). 627 * The buffer must be on QUEUE_NONE. 628 */ 629 630void 631bundirty(bp) 632 struct buf *bp; 633{ 634 KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 635 636 if (bp->b_flags & B_DELWRI) { 637 bp->b_flags &= ~B_DELWRI; 638 reassignbuf(bp, bp->b_vp); 639 --numdirtybuffers; 640 } 641} 642 643/* 644 * bawrite: 645 * 646 * Asynchronous write. Start output on a buffer, but do not wait for 647 * it to complete. The buffer is released when the output completes. 648 */ 649void 650bawrite(struct buf * bp) 651{ 652 bp->b_flags |= B_ASYNC; 653 (void) VOP_BWRITE(bp); 654} 655 656/* 657 * bowrite: 658 * 659 * Ordered write. Start output on a buffer, and flag it so that the 660 * device will write it in the order it was queued. The buffer is 661 * released when the output completes. 662 */ 663int 664bowrite(struct buf * bp) 665{ 666 bp->b_flags |= B_ORDERED | B_ASYNC; 667 return (VOP_BWRITE(bp)); 668} 669 670/* 671 * brelse: 672 * 673 * Release a busy buffer and, if requested, free its resources. The 674 * buffer will be stashed in the appropriate bufqueue[] allowing it 675 * to be accessed later as a cache entity or reused for other purposes. 676 */ 677void 678brelse(struct buf * bp) 679{ 680 int s; 681 682 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 683 684#if 0 685 if (bp->b_flags & B_CLUSTER) { 686 relpbuf(bp, NULL); 687 return; 688 } 689#endif 690 691 s = splbio(); 692 693 if (bp->b_flags & B_LOCKED) 694 bp->b_flags &= ~B_ERROR; 695 696 if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) { 697 bp->b_flags &= ~B_ERROR; 698 bdirty(bp); 699 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 700 (bp->b_bufsize <= 0)) { 701 bp->b_flags |= B_INVAL; 702 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 703 (*bioops.io_deallocate)(bp); 704 if (bp->b_flags & B_DELWRI) 705 --numdirtybuffers; 706 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 707 if ((bp->b_flags & B_VMIO) == 0) { 708 if (bp->b_bufsize) 709 allocbuf(bp, 0); 710 if (bp->b_vp) 711 brelvp(bp); 712 } 713 } 714 715 /* 716 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 717 * is called with B_DELWRI set, the underlying pages may wind up 718 * getting freed causing a previous write (bdwrite()) to get 'lost' 719 * because pages associated with a B_DELWRI bp are marked clean. 720 * 721 * We still allow the B_INVAL case to call vfs_vmio_release(), even 722 * if B_DELWRI is set. 723 */ 724 725 if (bp->b_flags & B_DELWRI) 726 bp->b_flags &= ~B_RELBUF; 727 728 /* 729 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 730 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 731 * but the VM object is kept around. The B_NOCACHE flag is used to 732 * invalidate the pages in the VM object. 733 * 734 * The b_{validoff,validend,dirtyoff,dirtyend} values are relative 735 * to b_offset and currently have byte granularity, whereas the 736 * valid flags in the vm_pages have only DEV_BSIZE resolution. 737 * The byte resolution fields are used to avoid unnecessary re-reads 738 * of the buffer but the code really needs to be genericized so 739 * other filesystem modules can take advantage of these fields. 740 * 741 * XXX this seems to cause performance problems. 742 */ 743 if ((bp->b_flags & B_VMIO) 744 && !(bp->b_vp->v_tag == VT_NFS && 745 bp->b_vp->v_type != VBLK && 746 (bp->b_flags & B_DELWRI) != 0) 747#ifdef notdef 748 && (bp->b_vp->v_tag != VT_NFS 749 || bp->b_vp->v_type == VBLK 750 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 751 || bp->b_validend == 0 752 || (bp->b_validoff == 0 753 && bp->b_validend == bp->b_bufsize)) 754#endif 755 ) { 756 757 int i, j, resid; 758 vm_page_t m; 759 off_t foff; 760 vm_pindex_t poff; 761 vm_object_t obj; 762 struct vnode *vp; 763 764 vp = bp->b_vp; 765 766 /* 767 * Get the base offset and length of the buffer. Note that 768 * for block sizes that are less then PAGE_SIZE, the b_data 769 * base of the buffer does not represent exactly b_offset and 770 * neither b_offset nor b_size are necessarily page aligned. 771 * Instead, the starting position of b_offset is: 772 * 773 * b_data + (b_offset & PAGE_MASK) 774 * 775 * block sizes less then DEV_BSIZE (usually 512) are not 776 * supported due to the page granularity bits (m->valid, 777 * m->dirty, etc...). 778 * 779 * See man buf(9) for more information 780 */ 781 782 resid = bp->b_bufsize; 783 foff = bp->b_offset; 784 785 for (i = 0; i < bp->b_npages; i++) { 786 m = bp->b_pages[i]; 787 vm_page_flag_clear(m, PG_ZERO); 788 if (m == bogus_page) { 789 790 obj = (vm_object_t) vp->v_object; 791 poff = OFF_TO_IDX(bp->b_offset); 792 793 for (j = i; j < bp->b_npages; j++) { 794 m = bp->b_pages[j]; 795 if (m == bogus_page) { 796 m = vm_page_lookup(obj, poff + j); 797#if !defined(MAX_PERF) 798 if (!m) { 799 panic("brelse: page missing\n"); 800 } 801#endif 802 bp->b_pages[j] = m; 803 } 804 } 805 806 if ((bp->b_flags & B_INVAL) == 0) { 807 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 808 } 809 } 810 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 811 int poffset = foff & PAGE_MASK; 812 int presid = resid > (PAGE_SIZE - poffset) ? 813 (PAGE_SIZE - poffset) : resid; 814 815 KASSERT(presid >= 0, ("brelse: extra page")); 816 vm_page_set_invalid(m, poffset, presid); 817 } 818 resid -= PAGE_SIZE - (foff & PAGE_MASK); 819 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 820 } 821 822 if (bp->b_flags & (B_INVAL | B_RELBUF)) 823 vfs_vmio_release(bp); 824 825 } else if (bp->b_flags & B_VMIO) { 826 827 if (bp->b_flags & (B_INVAL | B_RELBUF)) 828 vfs_vmio_release(bp); 829 830 } 831 832#if !defined(MAX_PERF) 833 if (bp->b_qindex != QUEUE_NONE) 834 panic("brelse: free buffer onto another queue???"); 835#endif 836 /* enqueue */ 837 838 /* buffers with no memory */ 839 if (bp->b_bufsize == 0) { 840 bp->b_flags |= B_INVAL; 841 bp->b_qindex = QUEUE_EMPTY; 842 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 843 LIST_REMOVE(bp, b_hash); 844 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 845 bp->b_dev = NODEV; 846 kvafreespace += bp->b_kvasize; 847 if (bp->b_kvasize) 848 kvaspacewakeup(); 849 /* buffers with junk contents */ 850 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 851 bp->b_flags |= B_INVAL; 852 bp->b_qindex = QUEUE_AGE; 853 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 854 LIST_REMOVE(bp, b_hash); 855 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 856 bp->b_dev = NODEV; 857 858 /* buffers that are locked */ 859 } else if (bp->b_flags & B_LOCKED) { 860 bp->b_qindex = QUEUE_LOCKED; 861 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 862 863 /* buffers with stale but valid contents */ 864 } else if (bp->b_flags & B_AGE) { 865 bp->b_qindex = QUEUE_AGE; 866 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 867 868 /* buffers with valid and quite potentially reuseable contents */ 869 } else { 870 bp->b_qindex = QUEUE_LRU; 871 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 872 } 873 874 /* 875 * If B_INVAL, clear B_DELWRI. 876 */ 877 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) { 878 bp->b_flags &= ~B_DELWRI; 879 --numdirtybuffers; 880 } 881 882 runningbufspace -= bp->b_bufsize; 883 884 /* 885 * Fixup numfreebuffers count. The bp is on an appropriate queue 886 * unless locked. We then bump numfreebuffers if it is not B_DELWRI. 887 * We've already handled the B_INVAL case ( B_DELWRI will be clear 888 * if B_INVAL is set ). 889 */ 890 891 if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI)) 892 bufcountwakeup(); 893 894 /* 895 * Something we can maybe free. 896 */ 897 898 if (bp->b_bufsize) 899 bufspacewakeup(); 900 901 if (bp->b_flags & B_WANTED) { 902 bp->b_flags &= ~(B_WANTED | B_AGE); 903 wakeup(bp); 904 } 905 906 /* unlock */ 907 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 908 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 909 splx(s); 910} 911 912/* 913 * Release a buffer back to the appropriate queue but do not try to free 914 * it. 915 */ 916void 917bqrelse(struct buf * bp) 918{ 919 int s; 920 921 s = splbio(); 922 923 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 924 925#if !defined(MAX_PERF) 926 if (bp->b_qindex != QUEUE_NONE) 927 panic("bqrelse: free buffer onto another queue???"); 928#endif 929 if (bp->b_flags & B_LOCKED) { 930 bp->b_flags &= ~B_ERROR; 931 bp->b_qindex = QUEUE_LOCKED; 932 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 933 /* buffers with stale but valid contents */ 934 } else { 935 bp->b_qindex = QUEUE_LRU; 936 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 937 } 938 939 runningbufspace -= bp->b_bufsize; 940 941 if ((bp->b_flags & B_LOCKED) == 0 && 942 ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 943 ) { 944 bufcountwakeup(); 945 } 946 947 /* 948 * Something we can maybe wakeup 949 */ 950 if (bp->b_bufsize) 951 bufspacewakeup(); 952 953 /* anyone need this block? */ 954 if (bp->b_flags & B_WANTED) { 955 bp->b_flags &= ~(B_WANTED | B_AGE); 956 wakeup(bp); 957 } 958 959 /* unlock */ 960 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 961 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 962 splx(s); 963} 964 965static void 966vfs_vmio_release(bp) 967 struct buf *bp; 968{ 969 int i, s; 970 vm_page_t m; 971 972 s = splvm(); 973 for (i = 0; i < bp->b_npages; i++) { 974 m = bp->b_pages[i]; 975 bp->b_pages[i] = NULL; 976 /* 977 * In order to keep page LRU ordering consistent, put 978 * everything on the inactive queue. 979 */ 980 vm_page_unwire(m, 0); 981 /* 982 * We don't mess with busy pages, it is 983 * the responsibility of the process that 984 * busied the pages to deal with them. 985 */ 986 if ((m->flags & PG_BUSY) || (m->busy != 0)) 987 continue; 988 989 if (m->wire_count == 0) { 990 vm_page_flag_clear(m, PG_ZERO); 991 /* 992 * Might as well free the page if we can and it has 993 * no valid data. 994 */ 995 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { 996 vm_page_busy(m); 997 vm_page_protect(m, VM_PROT_NONE); 998 vm_page_free(m); 999 } 1000 } 1001 } 1002 bufspace -= bp->b_bufsize; 1003 vmiospace -= bp->b_bufsize; 1004 runningbufspace -= bp->b_bufsize; 1005 splx(s); 1006 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 1007 if (bp->b_bufsize) 1008 bufspacewakeup(); 1009 bp->b_npages = 0; 1010 bp->b_bufsize = 0; 1011 bp->b_flags &= ~B_VMIO; 1012 if (bp->b_vp) 1013 brelvp(bp); 1014} 1015 1016/* 1017 * Check to see if a block is currently memory resident. 1018 */ 1019struct buf * 1020gbincore(struct vnode * vp, daddr_t blkno) 1021{ 1022 struct buf *bp; 1023 struct bufhashhdr *bh; 1024 1025 bh = BUFHASH(vp, blkno); 1026 bp = bh->lh_first; 1027 1028 /* Search hash chain */ 1029 while (bp != NULL) { 1030 /* hit */ 1031 if (bp->b_vp == vp && bp->b_lblkno == blkno && 1032 (bp->b_flags & B_INVAL) == 0) { 1033 break; 1034 } 1035 bp = bp->b_hash.le_next; 1036 } 1037 return (bp); 1038} 1039 1040/* 1041 * this routine implements clustered async writes for 1042 * clearing out B_DELWRI buffers... This is much better 1043 * than the old way of writing only one buffer at a time. 1044 */ 1045int 1046vfs_bio_awrite(struct buf * bp) 1047{ 1048 int i; 1049 daddr_t lblkno = bp->b_lblkno; 1050 struct vnode *vp = bp->b_vp; 1051 int s; 1052 int ncl; 1053 struct buf *bpa; 1054 int nwritten; 1055 int size; 1056 int maxcl; 1057 1058 s = splbio(); 1059 /* 1060 * right now we support clustered writing only to regular files, and 1061 * then only if our I/O system is not saturated. 1062 */ 1063 if ((vp->v_type == VREG) && 1064 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1065 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1066 1067 size = vp->v_mount->mnt_stat.f_iosize; 1068 maxcl = MAXPHYS / size; 1069 1070 for (i = 1; i < maxcl; i++) { 1071 if ((bpa = gbincore(vp, lblkno + i)) && 1072 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1073 (B_DELWRI | B_CLUSTEROK)) && 1074 (bpa->b_bufsize == size)) { 1075 if ((bpa->b_blkno == bpa->b_lblkno) || 1076 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 1077 break; 1078 } else { 1079 break; 1080 } 1081 } 1082 ncl = i; 1083 /* 1084 * this is a possible cluster write 1085 */ 1086 if (ncl != 1) { 1087 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 1088 splx(s); 1089 return nwritten; 1090 } 1091 } 1092 1093 bremfree(bp); 1094 bp->b_flags |= B_BUSY | B_ASYNC; 1095 1096 splx(s); 1097 /* 1098 * default (old) behavior, writing out only one block 1099 */ 1100 nwritten = bp->b_bufsize; 1101 (void) VOP_BWRITE(bp); 1102 1103 return nwritten; 1104} 1105 1106/* 1107 * getnewbuf: 1108 * 1109 * Find and initialize a new buffer header, freeing up existing buffers 1110 * in the bufqueues as necessary. 1111 * 1112 * We block if: 1113 * We have insufficient buffer headers 1114 * We have insufficient buffer space 1115 * buffer_map is too fragmented ( space reservation fails ) 1116 * 1117 * We do *not* attempt to flush dirty buffers more then one level deep. 1118 * I.e., if P_FLSINPROG is set we do not flush dirty buffers at all. 1119 * 1120 * If P_FLSINPROG is set, we are allowed to dip into our emergency 1121 * reserve. 1122 */ 1123static struct buf * 1124getnewbuf(struct vnode *vp, daddr_t blkno, 1125 int slpflag, int slptimeo, int size, int maxsize) 1126{ 1127 struct buf *bp; 1128 struct buf *nbp; 1129 int outofspace; 1130 int nqindex; 1131 int defrag = 0; 1132 int countawrites = 0; 1133 1134restart: 1135 1136 /* 1137 * Setup for scan. If we do not have enough free buffers, 1138 * we setup a degenerate case that falls through the while. 1139 * 1140 * If we are in the middle of a flush, we can dip into the 1141 * emergency reserve. 1142 */ 1143 1144 if ((curproc->p_flag & P_FLSINPROG) == 0 && 1145 numfreebuffers < lofreebuffers 1146 ) { 1147 nqindex = QUEUE_LRU; 1148 nbp = NULL; 1149 } else { 1150 nqindex = QUEUE_EMPTY; 1151 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY])) == NULL) { 1152 nqindex = QUEUE_AGE; 1153 nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1154 if (nbp == NULL) { 1155 nqindex = QUEUE_LRU; 1156 nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1157 } 1158 } 1159 } 1160 1161 /* 1162 * Calculate whether we are out of buffer space. This state is 1163 * recalculated on every restart. If we are out of space, we 1164 * have to turn off defragmentation. The outofspace code will 1165 * defragment too, but the looping conditionals will be messed up 1166 * if both outofspace and defrag are on. 1167 */ 1168 1169 outofspace = 0; 1170 if (bufspace >= hibufspace) { 1171 if ((curproc->p_flag & P_FLSINPROG) == 0 || 1172 bufspace >= maxbufspace 1173 ) { 1174 outofspace = 1; 1175 defrag = 0; 1176 } 1177 } 1178 1179 /* 1180 * defrag state is semi-persistant. 1 means we are flagged for 1181 * defragging. -1 means we actually defragged something. 1182 */ 1183 /* nop */ 1184 1185 /* 1186 * Run scan, possibly freeing data and/or kva mappings on the fly 1187 * depending. 1188 */ 1189 1190 while ((bp = nbp) != NULL) { 1191 int qindex = nqindex; 1192 /* 1193 * Calculate next bp ( we can only use it if we do not block 1194 * or do other fancy things ). 1195 */ 1196 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 1197 switch(qindex) { 1198 case QUEUE_EMPTY: 1199 nqindex = QUEUE_AGE; 1200 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) 1201 break; 1202 /* fall through */ 1203 case QUEUE_AGE: 1204 nqindex = QUEUE_LRU; 1205 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) 1206 break; 1207 /* fall through */ 1208 case QUEUE_LRU: 1209 /* 1210 * nbp is NULL. 1211 */ 1212 break; 1213 } 1214 } 1215 1216 /* 1217 * Sanity Checks 1218 */ 1219 KASSERT(!(bp->b_flags & B_BUSY), ("getnewbuf: busy buffer %p on free list", bp)); 1220 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp)); 1221 1222 /* 1223 * Here we try to move NON VMIO buffers to the end of the 1224 * LRU queue in order to make VMIO buffers more readily 1225 * freeable. We also try to move buffers with a positive 1226 * usecount to the end. 1227 * 1228 * Note that by moving the bp to the end, we setup a following 1229 * loop. Since we continue to decrement b_usecount this 1230 * is ok and, in fact, desireable. 1231 * 1232 * If we are at the end of the list, we move ourself to the 1233 * same place and need to fixup nbp and nqindex to handle 1234 * the following case. 1235 */ 1236 1237 if ((qindex == QUEUE_LRU) && bp->b_usecount > 0) { 1238 if ((bp->b_flags & B_VMIO) == 0 || 1239 (vmiospace < maxvmiobufspace) 1240 ) { 1241 --bp->b_usecount; 1242 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1243 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1244 if (nbp == NULL) { 1245 nqindex = qindex; 1246 nbp = bp; 1247 } 1248 continue; 1249 } 1250 } 1251 1252 /* 1253 * If we come across a delayed write and numdirtybuffers should 1254 * be flushed, try to write it out. Only if P_FLSINPROG is 1255 * not set. We can't afford to recursively stack more then 1256 * one deep due to the possibility of having deep VFS call 1257 * stacks. 1258 * 1259 * Limit the number of dirty buffers we are willing to try 1260 * to recover since it really isn't our job here. 1261 */ 1262 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1263 if ((curproc->p_flag & P_FLSINPROG) || 1264 numdirtybuffers < hidirtybuffers || 1265 countawrites > 16 1266 ) { 1267 continue; 1268 } 1269 curproc->p_flag |= P_FLSINPROG; 1270 vfs_bio_awrite(bp); 1271 curproc->p_flag &= ~P_FLSINPROG; 1272 ++countawrites; 1273 goto restart; 1274 } 1275 1276 if (defrag > 0 && bp->b_kvasize == 0) 1277 continue; 1278 if (outofspace > 0 && bp->b_bufsize == 0) 1279 continue; 1280 1281 /* 1282 * Start freeing the bp. This is somewhat involved. nbp 1283 * remains valid only for QUEUE_EMPTY bp's. 1284 */ 1285 1286 bremfree(bp); 1287 bp->b_flags |= B_BUSY; 1288 1289 if (qindex == QUEUE_LRU || qindex == QUEUE_AGE) { 1290 if (bp->b_flags & B_VMIO) { 1291 bp->b_flags &= ~B_ASYNC; 1292 vfs_vmio_release(bp); 1293 } 1294 if (bp->b_vp) 1295 brelvp(bp); 1296 } 1297 1298 if (bp->b_flags & B_WANTED) { 1299 bp->b_flags &= ~B_WANTED; 1300 wakeup(bp); 1301 } 1302 1303 /* 1304 * NOTE: nbp is now entirely invalid. We can only restart 1305 * the scan from this point on. 1306 * 1307 * Get the rest of the buffer freed up. b_kva* is still 1308 * valid after this operation. 1309 */ 1310 1311 if (bp->b_rcred != NOCRED) { 1312 crfree(bp->b_rcred); 1313 bp->b_rcred = NOCRED; 1314 } 1315 if (bp->b_wcred != NOCRED) { 1316 crfree(bp->b_wcred); 1317 bp->b_wcred = NOCRED; 1318 } 1319 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1320 (*bioops.io_deallocate)(bp); 1321 1322 LIST_REMOVE(bp, b_hash); 1323 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1324 1325 if (bp->b_bufsize) 1326 allocbuf(bp, 0); 1327 1328 bp->b_flags = B_BUSY; 1329 bp->b_dev = NODEV; 1330 bp->b_vp = NULL; 1331 bp->b_blkno = bp->b_lblkno = 0; 1332 bp->b_offset = NOOFFSET; 1333 bp->b_iodone = 0; 1334 bp->b_error = 0; 1335 bp->b_resid = 0; 1336 bp->b_bcount = 0; 1337 bp->b_npages = 0; 1338 bp->b_dirtyoff = bp->b_dirtyend = 0; 1339 bp->b_validoff = bp->b_validend = 0; 1340 bp->b_usecount = 5; 1341 1342 LIST_INIT(&bp->b_dep); 1343 1344 /* 1345 * Ok, now that we have a free buffer, if we are defragging 1346 * we have to recover the kvaspace. 1347 */ 1348 1349 if (defrag > 0) { 1350 defrag = -1; 1351 bp->b_flags |= B_INVAL; 1352 bfreekva(bp); 1353 brelse(bp); 1354 goto restart; 1355 } 1356 1357 if (outofspace > 0) { 1358 outofspace = -1; 1359 bp->b_flags |= B_INVAL; 1360 bfreekva(bp); 1361 brelse(bp); 1362 goto restart; 1363 } 1364 1365 /* 1366 * We are done 1367 */ 1368 break; 1369 } 1370 1371 /* 1372 * If we exhausted our list, sleep as appropriate. 1373 */ 1374 1375 if (bp == NULL) { 1376 int flags; 1377 1378dosleep: 1379 if (defrag > 0) 1380 flags = VFS_BIO_NEED_KVASPACE; 1381 else if (outofspace > 0) 1382 flags = VFS_BIO_NEED_BUFSPACE; 1383 else 1384 flags = VFS_BIO_NEED_ANY; 1385 1386 if (rushjob < syncdelay / 2) 1387 ++rushjob; 1388 needsbuffer |= flags; 1389 while (needsbuffer & flags) { 1390 tsleep( 1391 &needsbuffer, 1392 (PRIBIO + 4) | slpflag, 1393 "newbuf", 1394 slptimeo 1395 ); 1396 } 1397 } else { 1398 /* 1399 * We finally have a valid bp. We aren't quite out of the 1400 * woods, we still have to reserve kva space. 1401 */ 1402 vm_offset_t addr = 0; 1403 1404 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1405 1406 if (maxsize != bp->b_kvasize) { 1407 bfreekva(bp); 1408 1409 if (vm_map_findspace(buffer_map, 1410 vm_map_min(buffer_map), maxsize, &addr) 1411 ) { 1412 /* 1413 * Uh oh. Buffer map is to fragmented. Try 1414 * to defragment. 1415 */ 1416 if (defrag <= 0) { 1417 defrag = 1; 1418 bp->b_flags |= B_INVAL; 1419 brelse(bp); 1420 goto restart; 1421 } 1422 /* 1423 * Uh oh. We couldn't seem to defragment 1424 */ 1425 bp = NULL; 1426 goto dosleep; 1427 } 1428 } 1429 if (addr) { 1430 vm_map_insert(buffer_map, NULL, 0, 1431 addr, addr + maxsize, 1432 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1433 1434 bp->b_kvabase = (caddr_t) addr; 1435 bp->b_kvasize = maxsize; 1436 } 1437 bp->b_data = bp->b_kvabase; 1438 } 1439 1440 return (bp); 1441} 1442 1443/* 1444 * waitfreebuffers: 1445 * 1446 * Wait for sufficient free buffers. This routine is not called if 1447 * curproc is the update process so we do not have to do anything 1448 * fancy. 1449 */ 1450 1451static void 1452waitfreebuffers(int slpflag, int slptimeo) 1453{ 1454 while (numfreebuffers < hifreebuffers) { 1455 flushdirtybuffers(slpflag, slptimeo); 1456 if (numfreebuffers < hifreebuffers) 1457 break; 1458 needsbuffer |= VFS_BIO_NEED_FREE; 1459 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1460 break; 1461 } 1462} 1463 1464/* 1465 * flushdirtybuffers: 1466 * 1467 * This routine is called when we get too many dirty buffers. 1468 * 1469 * We have to protect ourselves from recursion, but we also do not want 1470 * other process's flushdirtybuffers() to interfere with the syncer if 1471 * it decides to flushdirtybuffers(). 1472 * 1473 * In order to maximize operations, we allow any process to flush 1474 * dirty buffers and use P_FLSINPROG to prevent recursion. 1475 */ 1476 1477static void 1478flushdirtybuffers(int slpflag, int slptimeo) 1479{ 1480 int s; 1481 1482 s = splbio(); 1483 1484 if (curproc->p_flag & P_FLSINPROG) { 1485 splx(s); 1486 return; 1487 } 1488 curproc->p_flag |= P_FLSINPROG; 1489 1490 while (numdirtybuffers > lodirtybuffers) { 1491 if (flushbufqueues() == 0) 1492 break; 1493 } 1494 1495 curproc->p_flag &= ~P_FLSINPROG; 1496 1497 splx(s); 1498} 1499 1500static int 1501flushbufqueues(void) 1502{ 1503 struct buf *bp; 1504 int qindex; 1505 int r = 0; 1506 1507 qindex = QUEUE_AGE; 1508 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1509 1510 for (;;) { 1511 if (bp == NULL) { 1512 if (qindex == QUEUE_LRU) 1513 break; 1514 qindex = QUEUE_LRU; 1515 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL) 1516 break; 1517 } 1518 1519 /* 1520 * XXX NFS does weird things with B_INVAL bps if we bwrite 1521 * them ( vfs_bio_awrite/bawrite/bdwrite/etc ) Why? 1522 * 1523 */ 1524 if ((bp->b_flags & B_DELWRI) != 0) { 1525 if (bp->b_flags & B_INVAL) { 1526 bremfree(bp); 1527 bp->b_flags |= B_BUSY; 1528 brelse(bp); 1529 } else { 1530 vfs_bio_awrite(bp); 1531 } 1532 ++r; 1533 break; 1534 } 1535 bp = TAILQ_NEXT(bp, b_freelist); 1536 } 1537 return(r); 1538} 1539 1540/* 1541 * Check to see if a block is currently memory resident. 1542 */ 1543struct buf * 1544incore(struct vnode * vp, daddr_t blkno) 1545{ 1546 struct buf *bp; 1547 1548 int s = splbio(); 1549 bp = gbincore(vp, blkno); 1550 splx(s); 1551 return (bp); 1552} 1553 1554/* 1555 * Returns true if no I/O is needed to access the 1556 * associated VM object. This is like incore except 1557 * it also hunts around in the VM system for the data. 1558 */ 1559 1560int 1561inmem(struct vnode * vp, daddr_t blkno) 1562{ 1563 vm_object_t obj; 1564 vm_offset_t toff, tinc, size; 1565 vm_page_t m; 1566 vm_ooffset_t off; 1567 1568 if (incore(vp, blkno)) 1569 return 1; 1570 if (vp->v_mount == NULL) 1571 return 0; 1572 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1573 return 0; 1574 1575 obj = vp->v_object; 1576 size = PAGE_SIZE; 1577 if (size > vp->v_mount->mnt_stat.f_iosize) 1578 size = vp->v_mount->mnt_stat.f_iosize; 1579 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 1580 1581 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1582 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1583 if (!m) 1584 return 0; 1585 tinc = size; 1586 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 1587 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 1588 if (vm_page_is_valid(m, 1589 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1590 return 0; 1591 } 1592 return 1; 1593} 1594 1595/* 1596 * now we set the dirty range for the buffer -- 1597 * for NFS -- if the file is mapped and pages have 1598 * been written to, let it know. We want the 1599 * entire range of the buffer to be marked dirty if 1600 * any of the pages have been written to for consistancy 1601 * with the b_validoff, b_validend set in the nfs write 1602 * code, and used by the nfs read code. 1603 */ 1604static void 1605vfs_setdirty(struct buf *bp) 1606{ 1607 int i; 1608 vm_object_t object; 1609 vm_offset_t boffset; 1610 1611 /* 1612 * We qualify the scan for modified pages on whether the 1613 * object has been flushed yet. The OBJ_WRITEABLE flag 1614 * is not cleared simply by protecting pages off. 1615 */ 1616 1617 if ((bp->b_flags & B_VMIO) == 0) 1618 return; 1619 1620 object = bp->b_pages[0]->object; 1621 1622 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY)) 1623 printf("Warning: object %p writeable but not mightbedirty\n", object); 1624 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY)) 1625 printf("Warning: object %p mightbedirty but not writeable\n", object); 1626 1627 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 1628 /* 1629 * test the pages to see if they have been modified directly 1630 * by users through the VM system. 1631 */ 1632 for (i = 0; i < bp->b_npages; i++) { 1633 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1634 vm_page_test_dirty(bp->b_pages[i]); 1635 } 1636 1637 /* 1638 * scan forwards for the first page modified 1639 */ 1640 for (i = 0; i < bp->b_npages; i++) { 1641 if (bp->b_pages[i]->dirty) { 1642 break; 1643 } 1644 } 1645 1646 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1647 if (boffset < bp->b_dirtyoff) { 1648 bp->b_dirtyoff = max(boffset, 0); 1649 } 1650 1651 /* 1652 * scan backwards for the last page modified 1653 */ 1654 for (i = bp->b_npages - 1; i >= 0; --i) { 1655 if (bp->b_pages[i]->dirty) { 1656 break; 1657 } 1658 } 1659 boffset = (i + 1); 1660#if 0 1661 offset = boffset + bp->b_pages[0]->pindex; 1662 if (offset >= object->size) 1663 boffset = object->size - bp->b_pages[0]->pindex; 1664#endif 1665 boffset = (boffset << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1666 if (bp->b_dirtyend < boffset) 1667 bp->b_dirtyend = min(boffset, bp->b_bufsize); 1668 } 1669} 1670 1671/* 1672 * Get a block given a specified block and offset into a file/device. 1673 */ 1674struct buf * 1675getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1676{ 1677 struct buf *bp; 1678 int i, s; 1679 struct bufhashhdr *bh; 1680 1681#if !defined(MAX_PERF) 1682 if (size > MAXBSIZE) 1683 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1684#endif 1685 1686 s = splbio(); 1687loop: 1688 /* 1689 * Block if we are low on buffers. The syncer is allowed more 1690 * buffers in order to avoid a deadlock. 1691 */ 1692 if (curproc == updateproc && numfreebuffers == 0) { 1693 needsbuffer |= VFS_BIO_NEED_ANY; 1694 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 1695 slptimeo); 1696 } else if (curproc != updateproc && numfreebuffers < lofreebuffers) { 1697 waitfreebuffers(slpflag, slptimeo); 1698 } 1699 1700 if ((bp = gbincore(vp, blkno))) { 1701 if (bp->b_flags & B_BUSY) { 1702 bp->b_flags |= B_WANTED; 1703 if (bp->b_usecount < BUF_MAXUSE) 1704 ++bp->b_usecount; 1705 1706 if (!tsleep(bp, 1707 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) { 1708 goto loop; 1709 } 1710 1711 splx(s); 1712 return (struct buf *) NULL; 1713 } 1714 bp->b_flags |= B_BUSY | B_CACHE; 1715 bremfree(bp); 1716 1717 /* 1718 * check for size inconsistancies for non-VMIO case. 1719 */ 1720 1721 if (bp->b_bcount != size) { 1722 if ((bp->b_flags & B_VMIO) == 0 || 1723 (size > bp->b_kvasize) 1724 ) { 1725 if (bp->b_flags & B_DELWRI) { 1726 bp->b_flags |= B_NOCACHE; 1727 VOP_BWRITE(bp); 1728 } else { 1729 if ((bp->b_flags & B_VMIO) && 1730 (LIST_FIRST(&bp->b_dep) == NULL)) { 1731 bp->b_flags |= B_RELBUF; 1732 brelse(bp); 1733 } else { 1734 bp->b_flags |= B_NOCACHE; 1735 VOP_BWRITE(bp); 1736 } 1737 } 1738 goto loop; 1739 } 1740 } 1741 1742 /* 1743 * If the size is inconsistant in the VMIO case, we can resize 1744 * the buffer. This might lead to B_CACHE getting cleared. 1745 */ 1746 1747 if (bp->b_bcount != size) 1748 allocbuf(bp, size); 1749 1750 KASSERT(bp->b_offset != NOOFFSET, 1751 ("getblk: no buffer offset")); 1752 1753 /* 1754 * Check that the constituted buffer really deserves for the 1755 * B_CACHE bit to be set. B_VMIO type buffers might not 1756 * contain fully valid pages. Normal (old-style) buffers 1757 * should be fully valid. This might also lead to B_CACHE 1758 * getting clear. 1759 * 1760 * If B_CACHE is already clear, don't bother checking to see 1761 * if we have to clear it again. 1762 * 1763 * XXX this code should not be necessary unless the B_CACHE 1764 * handling is broken elsewhere in the kernel. We need to 1765 * check the cases and then turn the clearing part of this 1766 * code into a panic. 1767 */ 1768 if ( 1769 (bp->b_flags & (B_VMIO|B_CACHE)) == (B_VMIO|B_CACHE) && 1770 (bp->b_vp->v_tag != VT_NFS || bp->b_validend <= 0) 1771 ) { 1772 int checksize = bp->b_bufsize; 1773 int poffset = bp->b_offset & PAGE_MASK; 1774 int resid; 1775 for (i = 0; i < bp->b_npages; i++) { 1776 resid = (checksize > (PAGE_SIZE - poffset)) ? 1777 (PAGE_SIZE - poffset) : checksize; 1778 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) { 1779 bp->b_flags &= ~(B_CACHE | B_DONE); 1780 break; 1781 } 1782 checksize -= resid; 1783 poffset = 0; 1784 } 1785 } 1786 1787 /* 1788 * If B_DELWRI is set and B_CACHE got cleared ( or was 1789 * already clear ), we have to commit the write and 1790 * retry. The NFS code absolutely depends on this, 1791 * and so might the FFS code. In anycase, it formalizes 1792 * the B_CACHE rules. See sys/buf.h. 1793 */ 1794 1795 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 1796 VOP_BWRITE(bp); 1797 goto loop; 1798 } 1799 1800 if (bp->b_usecount < BUF_MAXUSE) 1801 ++bp->b_usecount; 1802 splx(s); 1803 return (bp); 1804 } else { 1805 int bsize, maxsize, vmio; 1806 off_t offset; 1807 1808 if (vp->v_type == VBLK) 1809 bsize = DEV_BSIZE; 1810 else if (vp->v_mountedhere) 1811 bsize = vp->v_mountedhere->mnt_stat.f_iosize; 1812 else if (vp->v_mount) 1813 bsize = vp->v_mount->mnt_stat.f_iosize; 1814 else 1815 bsize = size; 1816 1817 offset = (off_t)blkno * bsize; 1818 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF); 1819 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 1820 maxsize = imax(maxsize, bsize); 1821 1822 if ((bp = getnewbuf(vp, blkno, 1823 slpflag, slptimeo, size, maxsize)) == 0) { 1824 if (slpflag || slptimeo) { 1825 splx(s); 1826 return NULL; 1827 } 1828 goto loop; 1829 } 1830 1831 /* 1832 * This code is used to make sure that a buffer is not 1833 * created while the getnewbuf routine is blocked. 1834 * This can be a problem whether the vnode is locked or not. 1835 */ 1836 if (gbincore(vp, blkno)) { 1837 bp->b_flags |= B_INVAL; 1838 brelse(bp); 1839 goto loop; 1840 } 1841 1842 /* 1843 * Insert the buffer into the hash, so that it can 1844 * be found by incore. 1845 */ 1846 bp->b_blkno = bp->b_lblkno = blkno; 1847 bp->b_offset = offset; 1848 1849 bgetvp(vp, bp); 1850 LIST_REMOVE(bp, b_hash); 1851 bh = BUFHASH(vp, blkno); 1852 LIST_INSERT_HEAD(bh, bp, b_hash); 1853 1854 if (vmio) { 1855 bp->b_flags |= (B_VMIO | B_CACHE); 1856#if defined(VFS_BIO_DEBUG) 1857 if (vp->v_type != VREG && vp->v_type != VBLK) 1858 printf("getblk: vmioing file type %d???\n", vp->v_type); 1859#endif 1860 } else { 1861 bp->b_flags &= ~B_VMIO; 1862 } 1863 1864 allocbuf(bp, size); 1865 1866 splx(s); 1867 return (bp); 1868 } 1869} 1870 1871/* 1872 * Get an empty, disassociated buffer of given size. 1873 */ 1874struct buf * 1875geteblk(int size) 1876{ 1877 struct buf *bp; 1878 int s; 1879 1880 s = splbio(); 1881 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1882 splx(s); 1883 allocbuf(bp, size); 1884 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 1885 return (bp); 1886} 1887 1888 1889/* 1890 * This code constitutes the buffer memory from either anonymous system 1891 * memory (in the case of non-VMIO operations) or from an associated 1892 * VM object (in the case of VMIO operations). This code is able to 1893 * resize a buffer up or down. 1894 * 1895 * Note that this code is tricky, and has many complications to resolve 1896 * deadlock or inconsistant data situations. Tread lightly!!! 1897 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 1898 * the caller. Calling this code willy nilly can result in the loss of data. 1899 */ 1900 1901int 1902allocbuf(struct buf *bp, int size) 1903{ 1904 int newbsize, mbsize; 1905 int i; 1906 1907#if !defined(MAX_PERF) 1908 if (!(bp->b_flags & B_BUSY)) 1909 panic("allocbuf: buffer not busy"); 1910 1911 if (bp->b_kvasize < size) 1912 panic("allocbuf: buffer too small"); 1913#endif 1914 1915 if ((bp->b_flags & B_VMIO) == 0) { 1916 caddr_t origbuf; 1917 int origbufsize; 1918 /* 1919 * Just get anonymous memory from the kernel 1920 */ 1921 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1922#if !defined(NO_B_MALLOC) 1923 if (bp->b_flags & B_MALLOC) 1924 newbsize = mbsize; 1925 else 1926#endif 1927 newbsize = round_page(size); 1928 1929 if (newbsize < bp->b_bufsize) { 1930#if !defined(NO_B_MALLOC) 1931 /* 1932 * malloced buffers are not shrunk 1933 */ 1934 if (bp->b_flags & B_MALLOC) { 1935 if (newbsize) { 1936 bp->b_bcount = size; 1937 } else { 1938 free(bp->b_data, M_BIOBUF); 1939 bufspace -= bp->b_bufsize; 1940 bufmallocspace -= bp->b_bufsize; 1941 runningbufspace -= bp->b_bufsize; 1942 if (bp->b_bufsize) 1943 bufspacewakeup(); 1944 bp->b_data = bp->b_kvabase; 1945 bp->b_bufsize = 0; 1946 bp->b_bcount = 0; 1947 bp->b_flags &= ~B_MALLOC; 1948 } 1949 return 1; 1950 } 1951#endif 1952 vm_hold_free_pages( 1953 bp, 1954 (vm_offset_t) bp->b_data + newbsize, 1955 (vm_offset_t) bp->b_data + bp->b_bufsize); 1956 } else if (newbsize > bp->b_bufsize) { 1957#if !defined(NO_B_MALLOC) 1958 /* 1959 * We only use malloced memory on the first allocation. 1960 * and revert to page-allocated memory when the buffer grows. 1961 */ 1962 if ( (bufmallocspace < maxbufmallocspace) && 1963 (bp->b_bufsize == 0) && 1964 (mbsize <= PAGE_SIZE/2)) { 1965 1966 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1967 bp->b_bufsize = mbsize; 1968 bp->b_bcount = size; 1969 bp->b_flags |= B_MALLOC; 1970 bufspace += mbsize; 1971 bufmallocspace += mbsize; 1972 runningbufspace += bp->b_bufsize; 1973 return 1; 1974 } 1975#endif 1976 origbuf = NULL; 1977 origbufsize = 0; 1978#if !defined(NO_B_MALLOC) 1979 /* 1980 * If the buffer is growing on its other-than-first allocation, 1981 * then we revert to the page-allocation scheme. 1982 */ 1983 if (bp->b_flags & B_MALLOC) { 1984 origbuf = bp->b_data; 1985 origbufsize = bp->b_bufsize; 1986 bp->b_data = bp->b_kvabase; 1987 bufspace -= bp->b_bufsize; 1988 bufmallocspace -= bp->b_bufsize; 1989 runningbufspace -= bp->b_bufsize; 1990 if (bp->b_bufsize) 1991 bufspacewakeup(); 1992 bp->b_bufsize = 0; 1993 bp->b_flags &= ~B_MALLOC; 1994 newbsize = round_page(newbsize); 1995 } 1996#endif 1997 vm_hold_load_pages( 1998 bp, 1999 (vm_offset_t) bp->b_data + bp->b_bufsize, 2000 (vm_offset_t) bp->b_data + newbsize); 2001#if !defined(NO_B_MALLOC) 2002 if (origbuf) { 2003 bcopy(origbuf, bp->b_data, origbufsize); 2004 free(origbuf, M_BIOBUF); 2005 } 2006#endif 2007 } 2008 } else { 2009 vm_page_t m; 2010 int desiredpages; 2011 2012 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2013 desiredpages = (size == 0) ? 0 : 2014 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 2015 2016#if !defined(NO_B_MALLOC) 2017 if (bp->b_flags & B_MALLOC) 2018 panic("allocbuf: VMIO buffer can't be malloced"); 2019#endif 2020 2021 if (newbsize < bp->b_bufsize) { 2022 if (desiredpages < bp->b_npages) { 2023 for (i = desiredpages; i < bp->b_npages; i++) { 2024 /* 2025 * the page is not freed here -- it 2026 * is the responsibility of vnode_pager_setsize 2027 */ 2028 m = bp->b_pages[i]; 2029 KASSERT(m != bogus_page, 2030 ("allocbuf: bogus page found")); 2031 while (vm_page_sleep_busy(m, TRUE, "biodep")) 2032 ; 2033 2034 bp->b_pages[i] = NULL; 2035 vm_page_unwire(m, 0); 2036 } 2037 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2038 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 2039 bp->b_npages = desiredpages; 2040 } 2041 } else if (newbsize > bp->b_bufsize) { 2042 vm_object_t obj; 2043 vm_offset_t tinc, toff; 2044 vm_ooffset_t off; 2045 vm_pindex_t objoff; 2046 int pageindex, curbpnpages; 2047 struct vnode *vp; 2048 int bsize; 2049 int orig_validoff = bp->b_validoff; 2050 int orig_validend = bp->b_validend; 2051 2052 vp = bp->b_vp; 2053 2054 if (vp->v_type == VBLK) 2055 bsize = DEV_BSIZE; 2056 else 2057 bsize = vp->v_mount->mnt_stat.f_iosize; 2058 2059 if (bp->b_npages < desiredpages) { 2060 obj = vp->v_object; 2061 tinc = PAGE_SIZE; 2062 2063 off = bp->b_offset; 2064 KASSERT(bp->b_offset != NOOFFSET, 2065 ("allocbuf: no buffer offset")); 2066 curbpnpages = bp->b_npages; 2067 doretry: 2068 bp->b_validoff = orig_validoff; 2069 bp->b_validend = orig_validend; 2070 bp->b_flags |= B_CACHE; 2071 for (toff = 0; toff < newbsize; toff += tinc) { 2072 objoff = OFF_TO_IDX(off + toff); 2073 pageindex = objoff - OFF_TO_IDX(off); 2074 tinc = PAGE_SIZE - ((off + toff) & PAGE_MASK); 2075 if (pageindex < curbpnpages) { 2076 2077 m = bp->b_pages[pageindex]; 2078#ifdef VFS_BIO_DIAG 2079 if (m->pindex != objoff) 2080 panic("allocbuf: page changed offset?!!!?"); 2081#endif 2082 if (tinc > (newbsize - toff)) 2083 tinc = newbsize - toff; 2084 if (bp->b_flags & B_CACHE) 2085 vfs_buf_set_valid(bp, off, toff, tinc, m); 2086 continue; 2087 } 2088 m = vm_page_lookup(obj, objoff); 2089 if (!m) { 2090 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 2091 if (!m) { 2092 VM_WAIT; 2093 vm_pageout_deficit += (desiredpages - curbpnpages); 2094 goto doretry; 2095 } 2096 2097 vm_page_wire(m); 2098 vm_page_wakeup(m); 2099 bp->b_flags &= ~B_CACHE; 2100 2101 } else if (vm_page_sleep_busy(m, FALSE, "pgtblk")) { 2102 /* 2103 * If we had to sleep, retry. 2104 * 2105 * Also note that we only test 2106 * PG_BUSY here, not m->busy. 2107 * 2108 * We cannot sleep on m->busy 2109 * here because a vm_fault -> 2110 * getpages -> cluster-read -> 2111 * ...-> allocbuf sequence 2112 * will convert PG_BUSY to 2113 * m->busy so we have to let 2114 * m->busy through if we do 2115 * not want to deadlock. 2116 */ 2117 goto doretry; 2118 } else { 2119 if ((curproc != pageproc) && 2120 ((m->queue - m->pc) == PQ_CACHE) && 2121 ((cnt.v_free_count + cnt.v_cache_count) < 2122 (cnt.v_free_min + cnt.v_cache_min))) { 2123 pagedaemon_wakeup(); 2124 } 2125 if (tinc > (newbsize - toff)) 2126 tinc = newbsize - toff; 2127 if (bp->b_flags & B_CACHE) 2128 vfs_buf_set_valid(bp, off, toff, tinc, m); 2129 vm_page_flag_clear(m, PG_ZERO); 2130 vm_page_wire(m); 2131 } 2132 bp->b_pages[pageindex] = m; 2133 curbpnpages = pageindex + 1; 2134 } 2135 if (vp->v_tag == VT_NFS && 2136 vp->v_type != VBLK) { 2137 if (bp->b_dirtyend > 0) { 2138 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 2139 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 2140 } 2141 if (bp->b_validend == 0) 2142 bp->b_flags &= ~B_CACHE; 2143 } 2144 bp->b_data = (caddr_t) trunc_page((vm_offset_t)bp->b_data); 2145 bp->b_npages = curbpnpages; 2146 pmap_qenter((vm_offset_t) bp->b_data, 2147 bp->b_pages, bp->b_npages); 2148 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 2149 } 2150 } 2151 } 2152 if (bp->b_flags & B_VMIO) 2153 vmiospace += (newbsize - bp->b_bufsize); 2154 bufspace += (newbsize - bp->b_bufsize); 2155 runningbufspace += (newbsize - bp->b_bufsize); 2156 if (newbsize < bp->b_bufsize) 2157 bufspacewakeup(); 2158 bp->b_bufsize = newbsize; 2159 bp->b_bcount = size; 2160 return 1; 2161} 2162 2163/* 2164 * Wait for buffer I/O completion, returning error status. 2165 */ 2166int 2167biowait(register struct buf * bp) 2168{ 2169 int s; 2170 2171 s = splbio(); 2172 while ((bp->b_flags & B_DONE) == 0) 2173#if defined(NO_SCHEDULE_MODS) 2174 tsleep(bp, PRIBIO, "biowait", 0); 2175#else 2176 if (bp->b_flags & B_READ) 2177 tsleep(bp, PRIBIO, "biord", 0); 2178 else 2179 tsleep(bp, PRIBIO, "biowr", 0); 2180#endif 2181 splx(s); 2182 if (bp->b_flags & B_EINTR) { 2183 bp->b_flags &= ~B_EINTR; 2184 return (EINTR); 2185 } 2186 if (bp->b_flags & B_ERROR) { 2187 return (bp->b_error ? bp->b_error : EIO); 2188 } else { 2189 return (0); 2190 } 2191} 2192 2193/* 2194 * Finish I/O on a buffer, calling an optional function. 2195 * This is usually called from interrupt level, so process blocking 2196 * is not *a good idea*. 2197 */ 2198void 2199biodone(register struct buf * bp) 2200{ 2201 int s; 2202 2203 s = splbio(); 2204 2205 KASSERT((bp->b_flags & B_BUSY), ("biodone: bp %p not busy", bp)); 2206 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 2207 2208 bp->b_flags |= B_DONE; 2209 2210 if (bp->b_flags & B_FREEBUF) { 2211 brelse(bp); 2212 splx(s); 2213 return; 2214 } 2215 2216 if ((bp->b_flags & B_READ) == 0) { 2217 vwakeup(bp); 2218 } 2219 2220 /* call optional completion function if requested */ 2221 if (bp->b_flags & B_CALL) { 2222 bp->b_flags &= ~B_CALL; 2223 (*bp->b_iodone) (bp); 2224 splx(s); 2225 return; 2226 } 2227 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 2228 (*bioops.io_complete)(bp); 2229 2230 if (bp->b_flags & B_VMIO) { 2231 int i, resid; 2232 vm_ooffset_t foff; 2233 vm_page_t m; 2234 vm_object_t obj; 2235 int iosize; 2236 struct vnode *vp = bp->b_vp; 2237 2238 obj = vp->v_object; 2239 2240#if defined(VFS_BIO_DEBUG) 2241 if (vp->v_usecount == 0) { 2242 panic("biodone: zero vnode ref count"); 2243 } 2244 2245 if (vp->v_object == NULL) { 2246 panic("biodone: missing VM object"); 2247 } 2248 2249 if ((vp->v_flag & VOBJBUF) == 0) { 2250 panic("biodone: vnode is not setup for merged cache"); 2251 } 2252#endif 2253 2254 foff = bp->b_offset; 2255 KASSERT(bp->b_offset != NOOFFSET, 2256 ("biodone: no buffer offset")); 2257 2258#if !defined(MAX_PERF) 2259 if (!obj) { 2260 panic("biodone: no object"); 2261 } 2262#endif 2263#if defined(VFS_BIO_DEBUG) 2264 if (obj->paging_in_progress < bp->b_npages) { 2265 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 2266 obj->paging_in_progress, bp->b_npages); 2267 } 2268#endif 2269 iosize = bp->b_bufsize; 2270 for (i = 0; i < bp->b_npages; i++) { 2271 int bogusflag = 0; 2272 m = bp->b_pages[i]; 2273 if (m == bogus_page) { 2274 bogusflag = 1; 2275 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 2276 if (!m) { 2277#if defined(VFS_BIO_DEBUG) 2278 printf("biodone: page disappeared\n"); 2279#endif 2280 vm_object_pip_subtract(obj, 1); 2281 continue; 2282 } 2283 bp->b_pages[i] = m; 2284 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2285 } 2286#if defined(VFS_BIO_DEBUG) 2287 if (OFF_TO_IDX(foff) != m->pindex) { 2288 printf( 2289"biodone: foff(%lu)/m->pindex(%d) mismatch\n", 2290 (unsigned long)foff, m->pindex); 2291 } 2292#endif 2293 resid = IDX_TO_OFF(m->pindex + 1) - foff; 2294 if (resid > iosize) 2295 resid = iosize; 2296 2297 /* 2298 * In the write case, the valid and clean bits are 2299 * already changed correctly, so we only need to do this 2300 * here in the read case. 2301 */ 2302 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 2303 vfs_page_set_valid(bp, foff, i, m); 2304 } 2305 vm_page_flag_clear(m, PG_ZERO); 2306 2307 /* 2308 * when debugging new filesystems or buffer I/O methods, this 2309 * is the most common error that pops up. if you see this, you 2310 * have not set the page busy flag correctly!!! 2311 */ 2312 if (m->busy == 0) { 2313#if !defined(MAX_PERF) 2314 printf("biodone: page busy < 0, " 2315 "pindex: %d, foff: 0x(%x,%x), " 2316 "resid: %d, index: %d\n", 2317 (int) m->pindex, (int)(foff >> 32), 2318 (int) foff & 0xffffffff, resid, i); 2319#endif 2320 if (vp->v_type != VBLK) 2321#if !defined(MAX_PERF) 2322 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2323 bp->b_vp->v_mount->mnt_stat.f_iosize, 2324 (int) bp->b_lblkno, 2325 bp->b_flags, bp->b_npages); 2326 else 2327 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2328 (int) bp->b_lblkno, 2329 bp->b_flags, bp->b_npages); 2330 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2331 m->valid, m->dirty, m->wire_count); 2332#endif 2333 panic("biodone: page busy < 0\n"); 2334 } 2335 vm_page_io_finish(m); 2336 vm_object_pip_subtract(obj, 1); 2337 foff += resid; 2338 iosize -= resid; 2339 } 2340 if (obj) 2341 vm_object_pip_wakeupn(obj, 0); 2342 } 2343 /* 2344 * For asynchronous completions, release the buffer now. The brelse 2345 * checks for B_WANTED and will do the wakeup there if necessary - so 2346 * no need to do a wakeup here in the async case. 2347 */ 2348 2349 if (bp->b_flags & B_ASYNC) { 2350 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2351 brelse(bp); 2352 else 2353 bqrelse(bp); 2354 } else { 2355 bp->b_flags &= ~B_WANTED; 2356 wakeup(bp); 2357 } 2358 splx(s); 2359} 2360 2361#if 0 /* not with kirks code */ 2362static int vfs_update_interval = 30; 2363 2364static void 2365vfs_update() 2366{ 2367 while (1) { 2368 tsleep(&vfs_update_wakeup, PUSER, "update", 2369 hz * vfs_update_interval); 2370 vfs_update_wakeup = 0; 2371 sync(curproc, NULL); 2372 } 2373} 2374 2375static int 2376sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2377{ 2378 int error = sysctl_handle_int(oidp, 2379 oidp->oid_arg1, oidp->oid_arg2, req); 2380 if (!error) 2381 wakeup(&vfs_update_wakeup); 2382 return error; 2383} 2384 2385SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2386 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2387 2388#endif 2389 2390 2391/* 2392 * This routine is called in lieu of iodone in the case of 2393 * incomplete I/O. This keeps the busy status for pages 2394 * consistant. 2395 */ 2396void 2397vfs_unbusy_pages(struct buf * bp) 2398{ 2399 int i; 2400 2401 if (bp->b_flags & B_VMIO) { 2402 struct vnode *vp = bp->b_vp; 2403 vm_object_t obj = vp->v_object; 2404 2405 for (i = 0; i < bp->b_npages; i++) { 2406 vm_page_t m = bp->b_pages[i]; 2407 2408 if (m == bogus_page) { 2409 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2410#if !defined(MAX_PERF) 2411 if (!m) { 2412 panic("vfs_unbusy_pages: page missing\n"); 2413 } 2414#endif 2415 bp->b_pages[i] = m; 2416 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2417 } 2418 vm_object_pip_subtract(obj, 1); 2419 vm_page_flag_clear(m, PG_ZERO); 2420 vm_page_io_finish(m); 2421 } 2422 vm_object_pip_wakeupn(obj, 0); 2423 } 2424} 2425 2426/* 2427 * Set NFS' b_validoff and b_validend fields from the valid bits 2428 * of a page. If the consumer is not NFS, and the page is not 2429 * valid for the entire range, clear the B_CACHE flag to force 2430 * the consumer to re-read the page. 2431 * 2432 * B_CACHE interaction is especially tricky. 2433 */ 2434static void 2435vfs_buf_set_valid(struct buf *bp, 2436 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2437 vm_page_t m) 2438{ 2439 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2440 vm_offset_t svalid, evalid; 2441 int validbits = m->valid >> (((foff+off)&PAGE_MASK)/DEV_BSIZE); 2442 2443 /* 2444 * This only bothers with the first valid range in the 2445 * page. 2446 */ 2447 svalid = off; 2448 while (validbits && !(validbits & 1)) { 2449 svalid += DEV_BSIZE; 2450 validbits >>= 1; 2451 } 2452 evalid = svalid; 2453 while (validbits & 1) { 2454 evalid += DEV_BSIZE; 2455 validbits >>= 1; 2456 } 2457 evalid = min(evalid, off + size); 2458 /* 2459 * We can only set b_validoff/end if this range is contiguous 2460 * with the range built up already. If we cannot set 2461 * b_validoff/end, we must clear B_CACHE to force an update 2462 * to clean the bp up. 2463 */ 2464 if (svalid == bp->b_validend) { 2465 bp->b_validoff = min(bp->b_validoff, svalid); 2466 bp->b_validend = max(bp->b_validend, evalid); 2467 } else { 2468 bp->b_flags &= ~B_CACHE; 2469 } 2470 } else if (!vm_page_is_valid(m, 2471 (vm_offset_t) ((foff + off) & PAGE_MASK), 2472 size)) { 2473 bp->b_flags &= ~B_CACHE; 2474 } 2475} 2476 2477/* 2478 * Set the valid bits in a page, taking care of the b_validoff, 2479 * b_validend fields which NFS uses to optimise small reads. Off is 2480 * the offset within the file and pageno is the page index within the buf. 2481 * 2482 * XXX we have to set the valid & clean bits for all page fragments 2483 * touched by b_validoff/validend, even if the page fragment goes somewhat 2484 * beyond b_validoff/validend due to alignment. 2485 */ 2486static void 2487vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2488{ 2489 struct vnode *vp = bp->b_vp; 2490 vm_ooffset_t soff, eoff; 2491 2492 /* 2493 * Start and end offsets in buffer. eoff - soff may not cross a 2494 * page boundry or cross the end of the buffer. 2495 */ 2496 soff = off; 2497 eoff = (off + PAGE_SIZE) & ~PAGE_MASK; 2498 if (eoff > bp->b_offset + bp->b_bufsize) 2499 eoff = bp->b_offset + bp->b_bufsize; 2500 2501 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2502 vm_ooffset_t sv, ev; 2503 vm_page_set_invalid(m, 2504 (vm_offset_t) (soff & PAGE_MASK), 2505 (vm_offset_t) (eoff - soff)); 2506 /* 2507 * bp->b_validoff and bp->b_validend restrict the valid range 2508 * that we can set. Note that these offsets are not DEV_BSIZE 2509 * aligned. vm_page_set_validclean() must know what 2510 * sub-DEV_BSIZE ranges to clear. 2511 */ 2512#if 0 2513 sv = (bp->b_offset + bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2514 ev = (bp->b_offset + bp->b_validend + (DEV_BSIZE - 1)) & 2515 ~(DEV_BSIZE - 1); 2516#endif 2517 sv = bp->b_offset + bp->b_validoff; 2518 ev = bp->b_offset + bp->b_validend; 2519 soff = qmax(sv, soff); 2520 eoff = qmin(ev, eoff); 2521 } 2522 2523 if (eoff > soff) 2524 vm_page_set_validclean(m, 2525 (vm_offset_t) (soff & PAGE_MASK), 2526 (vm_offset_t) (eoff - soff)); 2527} 2528 2529/* 2530 * This routine is called before a device strategy routine. 2531 * It is used to tell the VM system that paging I/O is in 2532 * progress, and treat the pages associated with the buffer 2533 * almost as being PG_BUSY. Also the object paging_in_progress 2534 * flag is handled to make sure that the object doesn't become 2535 * inconsistant. 2536 */ 2537void 2538vfs_busy_pages(struct buf * bp, int clear_modify) 2539{ 2540 int i, bogus; 2541 2542 if (bp->b_flags & B_VMIO) { 2543 struct vnode *vp = bp->b_vp; 2544 vm_object_t obj = vp->v_object; 2545 vm_ooffset_t foff; 2546 2547 foff = bp->b_offset; 2548 KASSERT(bp->b_offset != NOOFFSET, 2549 ("vfs_busy_pages: no buffer offset")); 2550 vfs_setdirty(bp); 2551 2552retry: 2553 for (i = 0; i < bp->b_npages; i++) { 2554 vm_page_t m = bp->b_pages[i]; 2555 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 2556 goto retry; 2557 } 2558 2559 bogus = 0; 2560 for (i = 0; i < bp->b_npages; i++) { 2561 vm_page_t m = bp->b_pages[i]; 2562 2563 vm_page_flag_clear(m, PG_ZERO); 2564 if ((bp->b_flags & B_CLUSTER) == 0) { 2565 vm_object_pip_add(obj, 1); 2566 vm_page_io_start(m); 2567 } 2568 2569 vm_page_protect(m, VM_PROT_NONE); 2570 if (clear_modify) 2571 vfs_page_set_valid(bp, foff, i, m); 2572 else if (m->valid == VM_PAGE_BITS_ALL && 2573 (bp->b_flags & B_CACHE) == 0) { 2574 bp->b_pages[i] = bogus_page; 2575 bogus++; 2576 } 2577 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2578 } 2579 if (bogus) 2580 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2581 } 2582} 2583 2584/* 2585 * Tell the VM system that the pages associated with this buffer 2586 * are clean. This is used for delayed writes where the data is 2587 * going to go to disk eventually without additional VM intevention. 2588 */ 2589void 2590vfs_clean_pages(struct buf * bp) 2591{ 2592 int i; 2593 2594 if (bp->b_flags & B_VMIO) { 2595 vm_ooffset_t foff; 2596 foff = bp->b_offset; 2597 KASSERT(bp->b_offset != NOOFFSET, 2598 ("vfs_clean_pages: no buffer offset")); 2599 for (i = 0; i < bp->b_npages; i++) { 2600 vm_page_t m = bp->b_pages[i]; 2601 vfs_page_set_valid(bp, foff, i, m); 2602 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2603 } 2604 } 2605} 2606 2607void 2608vfs_bio_clrbuf(struct buf *bp) { 2609 int i, mask = 0; 2610 caddr_t sa, ea; 2611 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2612 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 2613 (bp->b_offset & PAGE_MASK) == 0) { 2614 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 2615 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2616 ((bp->b_pages[0]->valid & mask) != mask)) { 2617 bzero(bp->b_data, bp->b_bufsize); 2618 } 2619 bp->b_pages[0]->valid |= mask; 2620 bp->b_resid = 0; 2621 return; 2622 } 2623 ea = sa = bp->b_data; 2624 for(i=0;i<bp->b_npages;i++,sa=ea) { 2625 int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE; 2626 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 2627 ea = (caddr_t)ulmin((u_long)ea, 2628 (u_long)bp->b_data + bp->b_bufsize); 2629 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 2630 if ((bp->b_pages[i]->valid & mask) == mask) 2631 continue; 2632 if ((bp->b_pages[i]->valid & mask) == 0) { 2633 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2634 bzero(sa, ea - sa); 2635 } 2636 } else { 2637 for (; sa < ea; sa += DEV_BSIZE, j++) { 2638 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2639 (bp->b_pages[i]->valid & (1<<j)) == 0) 2640 bzero(sa, DEV_BSIZE); 2641 } 2642 } 2643 bp->b_pages[i]->valid |= mask; 2644 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2645 } 2646 bp->b_resid = 0; 2647 } else { 2648 clrbuf(bp); 2649 } 2650} 2651 2652/* 2653 * vm_hold_load_pages and vm_hold_unload pages get pages into 2654 * a buffers address space. The pages are anonymous and are 2655 * not associated with a file object. 2656 */ 2657void 2658vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2659{ 2660 vm_offset_t pg; 2661 vm_page_t p; 2662 int index; 2663 2664 to = round_page(to); 2665 from = round_page(from); 2666 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2667 2668 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2669 2670tryagain: 2671 2672 p = vm_page_alloc(kernel_object, 2673 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2674 VM_ALLOC_NORMAL); 2675 if (!p) { 2676 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2677 VM_WAIT; 2678 goto tryagain; 2679 } 2680 vm_page_wire(p); 2681 p->valid = VM_PAGE_BITS_ALL; 2682 vm_page_flag_clear(p, PG_ZERO); 2683 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2684 bp->b_pages[index] = p; 2685 vm_page_wakeup(p); 2686 } 2687 bp->b_npages = index; 2688} 2689 2690void 2691vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2692{ 2693 vm_offset_t pg; 2694 vm_page_t p; 2695 int index, newnpages; 2696 2697 from = round_page(from); 2698 to = round_page(to); 2699 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2700 2701 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2702 p = bp->b_pages[index]; 2703 if (p && (index < bp->b_npages)) { 2704#if !defined(MAX_PERF) 2705 if (p->busy) { 2706 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2707 bp->b_blkno, bp->b_lblkno); 2708 } 2709#endif 2710 bp->b_pages[index] = NULL; 2711 pmap_kremove(pg); 2712 vm_page_busy(p); 2713 vm_page_unwire(p, 0); 2714 vm_page_free(p); 2715 } 2716 } 2717 bp->b_npages = newnpages; 2718} 2719 2720 2721#include "opt_ddb.h" 2722#ifdef DDB 2723#include <ddb/ddb.h> 2724 2725DB_SHOW_COMMAND(buffer, db_show_buffer) 2726{ 2727 /* get args */ 2728 struct buf *bp = (struct buf *)addr; 2729 2730 if (!have_addr) { 2731 db_printf("usage: show buffer <addr>\n"); 2732 return; 2733 } 2734 2735 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2736 (u_int)bp->b_flags, PRINT_BUF_FLAGS); 2737 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2738 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2739 "b_blkno = %d, b_pblkno = %d\n", 2740 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2741 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2742 if (bp->b_npages) { 2743 int i; 2744 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2745 for (i = 0; i < bp->b_npages; i++) { 2746 vm_page_t m; 2747 m = bp->b_pages[i]; 2748 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 2749 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 2750 if ((i + 1) < bp->b_npages) 2751 db_printf(","); 2752 } 2753 db_printf("\n"); 2754 } 2755} 2756#endif /* DDB */ 2757