vfs_bio.c revision 48333
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.218 1999/06/28 15:32:10 peter Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <miscfs/specfs/specdev.h> 41#include <vm/vm.h> 42#include <vm/vm_param.h> 43#include <vm/vm_prot.h> 44#include <vm/vm_kern.h> 45#include <vm/vm_pageout.h> 46#include <vm/vm_page.h> 47#include <vm/vm_object.h> 48#include <vm/vm_extern.h> 49#include <vm/vm_map.h> 50#include <sys/buf.h> 51#include <sys/mount.h> 52#include <sys/malloc.h> 53#include <sys/resourcevar.h> 54 55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56 57struct bio_ops bioops; /* I/O operation notification */ 58 59struct buf *buf; /* buffer header pool */ 60struct swqueue bswlist; 61 62static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 63 vm_offset_t to); 64static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 65 vm_offset_t to); 66static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 67 int pageno, vm_page_t m); 68static void vfs_clean_pages(struct buf * bp); 69static void vfs_setdirty(struct buf *bp); 70static void vfs_vmio_release(struct buf *bp); 71static void flushdirtybuffers(int slpflag, int slptimeo); 72static int flushbufqueues(void); 73 74/* 75 * bogus page -- for I/O to/from partially complete buffers 76 * this is a temporary solution to the problem, but it is not 77 * really that bad. it would be better to split the buffer 78 * for input in the case of buffers partially already in memory, 79 * but the code is intricate enough already. 80 */ 81vm_page_t bogus_page; 82int runningbufspace; 83static vm_offset_t bogus_offset; 84 85static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 86 bufmallocspace, maxbufmallocspace, hibufspace; 87static int needsbuffer; 88static int numdirtybuffers, lodirtybuffers, hidirtybuffers; 89static int numfreebuffers, lofreebuffers, hifreebuffers; 90static int kvafreespace; 91 92SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 93 &numdirtybuffers, 0, ""); 94SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 95 &lodirtybuffers, 0, ""); 96SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 97 &hidirtybuffers, 0, ""); 98SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 99 &numfreebuffers, 0, ""); 100SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 101 &lofreebuffers, 0, ""); 102SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 103 &hifreebuffers, 0, ""); 104SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, 105 &runningbufspace, 0, ""); 106SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 107 &maxbufspace, 0, ""); 108SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, 109 &hibufspace, 0, ""); 110SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 111 &bufspace, 0, ""); 112SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 113 &maxvmiobufspace, 0, ""); 114SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 115 &vmiospace, 0, ""); 116SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 117 &maxbufmallocspace, 0, ""); 118SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 119 &bufmallocspace, 0, ""); 120SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 121 &kvafreespace, 0, ""); 122 123static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 124struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } }; 125char *buf_wmesg = BUF_WMESG; 126 127extern int vm_swap_size; 128 129#define BUF_MAXUSE 24 130 131#define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 132#define VFS_BIO_NEED_RESERVED02 0x02 /* unused */ 133#define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 134#define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 135#define VFS_BIO_NEED_KVASPACE 0x10 /* wait for buffer_map space, emerg */ 136 137/* 138 * kvaspacewakeup: 139 * 140 * Called when kva space is potential available for recovery or when 141 * kva space is recovered in the buffer_map. This function wakes up 142 * anyone waiting for buffer_map kva space. Even though the buffer_map 143 * is larger then maxbufspace, this situation will typically occur 144 * when the buffer_map gets fragmented. 145 */ 146 147static __inline void 148kvaspacewakeup(void) 149{ 150 /* 151 * If someone is waiting for KVA space, wake them up. Even 152 * though we haven't freed the kva space yet, the waiting 153 * process will be able to now. 154 */ 155 if (needsbuffer & VFS_BIO_NEED_KVASPACE) { 156 needsbuffer &= ~VFS_BIO_NEED_KVASPACE; 157 wakeup(&needsbuffer); 158 } 159} 160 161/* 162 * bufspacewakeup: 163 * 164 * Called when buffer space is potentially available for recovery or when 165 * buffer space is recovered. getnewbuf() will block on this flag when 166 * it is unable to free sufficient buffer space. Buffer space becomes 167 * recoverable when bp's get placed back in the queues. 168 */ 169 170static __inline void 171bufspacewakeup(void) 172{ 173 /* 174 * If someone is waiting for BUF space, wake them up. Even 175 * though we haven't freed the kva space yet, the waiting 176 * process will be able to now. 177 */ 178 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 179 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 180 wakeup(&needsbuffer); 181 } 182} 183 184/* 185 * bufcountwakeup: 186 * 187 * Called when a buffer has been added to one of the free queues to 188 * account for the buffer and to wakeup anyone waiting for free buffers. 189 * This typically occurs when large amounts of metadata are being handled 190 * by the buffer cache ( else buffer space runs out first, usually ). 191 */ 192 193static __inline void 194bufcountwakeup(void) 195{ 196 ++numfreebuffers; 197 if (needsbuffer) { 198 needsbuffer &= ~VFS_BIO_NEED_ANY; 199 if (numfreebuffers >= hifreebuffers) 200 needsbuffer &= ~VFS_BIO_NEED_FREE; 201 wakeup(&needsbuffer); 202 } 203} 204 205/* 206 * vfs_buf_test_cache: 207 * 208 * Called when a buffer is extended. This function clears the B_CACHE 209 * bit if the newly extended portion of the buffer does not contain 210 * valid data. 211 */ 212static __inline__ 213void 214vfs_buf_test_cache(struct buf *bp, 215 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 216 vm_page_t m) 217{ 218 if (bp->b_flags & B_CACHE) { 219 int base = (foff + off) & PAGE_MASK; 220 if (vm_page_is_valid(m, base, size) == 0) 221 bp->b_flags &= ~B_CACHE; 222 } 223} 224 225 226/* 227 * Initialize buffer headers and related structures. 228 */ 229void 230bufinit() 231{ 232 struct buf *bp; 233 int i; 234 235 TAILQ_INIT(&bswlist); 236 LIST_INIT(&invalhash); 237 simple_lock_init(&buftimelock); 238 239 /* first, make a null hash table */ 240 for (i = 0; i < BUFHSZ; i++) 241 LIST_INIT(&bufhashtbl[i]); 242 243 /* next, make a null set of free lists */ 244 for (i = 0; i < BUFFER_QUEUES; i++) 245 TAILQ_INIT(&bufqueues[i]); 246 247 /* finally, initialize each buffer header and stick on empty q */ 248 for (i = 0; i < nbuf; i++) { 249 bp = &buf[i]; 250 bzero(bp, sizeof *bp); 251 bp->b_flags = B_INVAL; /* we're just an empty header */ 252 bp->b_dev = NODEV; 253 bp->b_rcred = NOCRED; 254 bp->b_wcred = NOCRED; 255 bp->b_qindex = QUEUE_EMPTY; 256 bp->b_xflags = 0; 257 LIST_INIT(&bp->b_dep); 258 BUF_LOCKINIT(bp); 259 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 260 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 261 } 262 263 /* 264 * maxbufspace is currently calculated to support all filesystem 265 * blocks to be 8K. If you happen to use a 16K filesystem, the size 266 * of the buffer cache is still the same as it would be for 8K 267 * filesystems. This keeps the size of the buffer cache "in check" 268 * for big block filesystems. 269 * 270 * maxbufspace is calculated as around 50% of the KVA available in 271 * the buffer_map ( DFLTSIZE vs BKVASIZE ), I presume to reduce the 272 * effect of fragmentation. 273 */ 274 maxbufspace = (nbuf + 8) * DFLTBSIZE; 275 if ((hibufspace = maxbufspace - MAXBSIZE * 5) <= MAXBSIZE) 276 hibufspace = 3 * maxbufspace / 4; 277/* 278 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 279 */ 280 maxvmiobufspace = 2 * hibufspace / 3; 281/* 282 * Limit the amount of malloc memory since it is wired permanently into 283 * the kernel space. Even though this is accounted for in the buffer 284 * allocation, we don't want the malloced region to grow uncontrolled. 285 * The malloc scheme improves memory utilization significantly on average 286 * (small) directories. 287 */ 288 maxbufmallocspace = hibufspace / 20; 289 290/* 291 * Reduce the chance of a deadlock occuring by limiting the number 292 * of delayed-write dirty buffers we allow to stack up. 293 */ 294 lodirtybuffers = nbuf / 16 + 10; 295 hidirtybuffers = nbuf / 8 + 20; 296 numdirtybuffers = 0; 297 298/* 299 * Try to keep the number of free buffers in the specified range, 300 * and give the syncer access to an emergency reserve. 301 */ 302 lofreebuffers = nbuf / 18 + 5; 303 hifreebuffers = 2 * lofreebuffers; 304 numfreebuffers = nbuf; 305 306 kvafreespace = 0; 307 308 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 309 bogus_page = vm_page_alloc(kernel_object, 310 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 311 VM_ALLOC_NORMAL); 312 313} 314 315/* 316 * Free the kva allocation for a buffer 317 * Must be called only at splbio or higher, 318 * as this is the only locking for buffer_map. 319 */ 320static void 321bfreekva(struct buf * bp) 322{ 323 if (bp->b_kvasize) { 324 vm_map_delete(buffer_map, 325 (vm_offset_t) bp->b_kvabase, 326 (vm_offset_t) bp->b_kvabase + bp->b_kvasize 327 ); 328 bp->b_kvasize = 0; 329 kvaspacewakeup(); 330 } 331} 332 333/* 334 * bremfree: 335 * 336 * Remove the buffer from the appropriate free list. 337 */ 338void 339bremfree(struct buf * bp) 340{ 341 int s = splbio(); 342 int old_qindex = bp->b_qindex; 343 344 if (bp->b_qindex != QUEUE_NONE) { 345 if (bp->b_qindex == QUEUE_EMPTY) { 346 kvafreespace -= bp->b_kvasize; 347 } 348 if (BUF_REFCNT(bp) == 1) 349 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 350 else if (BUF_REFCNT(bp) == 0) 351 panic("bremfree: not locked"); 352 else 353 /* Temporary panic to verify exclusive locking */ 354 /* This panic goes away when we allow shared refs */ 355 panic("bremfree: multiple refs"); 356 bp->b_qindex = QUEUE_NONE; 357 runningbufspace += bp->b_bufsize; 358 } else { 359#if !defined(MAX_PERF) 360 panic("bremfree: removing a buffer when not on a queue"); 361#endif 362 } 363 364 /* 365 * Fixup numfreebuffers count. If the buffer is invalid or not 366 * delayed-write, and it was on the EMPTY, LRU, or AGE queues, 367 * the buffer was free and we must decrement numfreebuffers. 368 */ 369 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 370 switch(old_qindex) { 371 case QUEUE_EMPTY: 372 case QUEUE_LRU: 373 case QUEUE_AGE: 374 --numfreebuffers; 375 break; 376 default: 377 break; 378 } 379 } 380 splx(s); 381} 382 383 384/* 385 * Get a buffer with the specified data. Look in the cache first. We 386 * must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 387 * is set, the buffer is valid and we do not have to do anything ( see 388 * getblk() ). 389 */ 390int 391bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 392 struct buf ** bpp) 393{ 394 struct buf *bp; 395 396 bp = getblk(vp, blkno, size, 0, 0); 397 *bpp = bp; 398 399 /* if not found in cache, do some I/O */ 400 if ((bp->b_flags & B_CACHE) == 0) { 401 if (curproc != NULL) 402 curproc->p_stats->p_ru.ru_inblock++; 403 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); 404 bp->b_flags |= B_READ; 405 bp->b_flags &= ~(B_ERROR | B_INVAL); 406 if (bp->b_rcred == NOCRED) { 407 if (cred != NOCRED) 408 crhold(cred); 409 bp->b_rcred = cred; 410 } 411 vfs_busy_pages(bp, 0); 412 VOP_STRATEGY(vp, bp); 413 return (biowait(bp)); 414 } 415 return (0); 416} 417 418/* 419 * Operates like bread, but also starts asynchronous I/O on 420 * read-ahead blocks. We must clear B_ERROR and B_INVAL prior 421 * to initiating I/O . If B_CACHE is set, the buffer is valid 422 * and we do not have to do anything. 423 */ 424int 425breadn(struct vnode * vp, daddr_t blkno, int size, 426 daddr_t * rablkno, int *rabsize, 427 int cnt, struct ucred * cred, struct buf ** bpp) 428{ 429 struct buf *bp, *rabp; 430 int i; 431 int rv = 0, readwait = 0; 432 433 *bpp = bp = getblk(vp, blkno, size, 0, 0); 434 435 /* if not found in cache, do some I/O */ 436 if ((bp->b_flags & B_CACHE) == 0) { 437 if (curproc != NULL) 438 curproc->p_stats->p_ru.ru_inblock++; 439 bp->b_flags |= B_READ; 440 bp->b_flags &= ~(B_ERROR | B_INVAL); 441 if (bp->b_rcred == NOCRED) { 442 if (cred != NOCRED) 443 crhold(cred); 444 bp->b_rcred = cred; 445 } 446 vfs_busy_pages(bp, 0); 447 VOP_STRATEGY(vp, bp); 448 ++readwait; 449 } 450 451 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 452 if (inmem(vp, *rablkno)) 453 continue; 454 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 455 456 if ((rabp->b_flags & B_CACHE) == 0) { 457 if (curproc != NULL) 458 curproc->p_stats->p_ru.ru_inblock++; 459 rabp->b_flags |= B_READ | B_ASYNC; 460 rabp->b_flags &= ~(B_ERROR | B_INVAL); 461 if (rabp->b_rcred == NOCRED) { 462 if (cred != NOCRED) 463 crhold(cred); 464 rabp->b_rcred = cred; 465 } 466 vfs_busy_pages(rabp, 0); 467 BUF_KERNPROC(rabp); 468 VOP_STRATEGY(vp, rabp); 469 } else { 470 brelse(rabp); 471 } 472 } 473 474 if (readwait) { 475 rv = biowait(bp); 476 } 477 return (rv); 478} 479 480/* 481 * Write, release buffer on completion. (Done by iodone 482 * if async). Do not bother writing anything if the buffer 483 * is invalid. 484 * 485 * Note that we set B_CACHE here, indicating that buffer is 486 * fully valid and thus cacheable. This is true even of NFS 487 * now so we set it generally. This could be set either here 488 * or in biodone() since the I/O is synchronous. We put it 489 * here. 490 */ 491int 492bwrite(struct buf * bp) 493{ 494 int oldflags, s; 495 struct vnode *vp; 496 struct mount *mp; 497 498 if (bp->b_flags & B_INVAL) { 499 brelse(bp); 500 return (0); 501 } 502 503 oldflags = bp->b_flags; 504 505#if !defined(MAX_PERF) 506 if (BUF_REFCNT(bp) == 0) 507 panic("bwrite: buffer is not busy???"); 508#endif 509 s = splbio(); 510 bundirty(bp); 511 512 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 513 bp->b_flags |= B_WRITEINPROG | B_CACHE; 514 515 bp->b_vp->v_numoutput++; 516 vfs_busy_pages(bp, 1); 517 if (curproc != NULL) 518 curproc->p_stats->p_ru.ru_oublock++; 519 splx(s); 520 if (oldflags & B_ASYNC) 521 BUF_KERNPROC(bp); 522 VOP_STRATEGY(bp->b_vp, bp); 523 524 /* 525 * Collect statistics on synchronous and asynchronous writes. 526 * Writes to block devices are charged to their associated 527 * filesystem (if any). 528 */ 529 if ((vp = bp->b_vp) != NULL) { 530 if (vp->v_type == VBLK) 531 mp = vp->v_specmountpoint; 532 else 533 mp = vp->v_mount; 534 if (mp != NULL) { 535 if ((oldflags & B_ASYNC) == 0) 536 mp->mnt_stat.f_syncwrites++; 537 else 538 mp->mnt_stat.f_asyncwrites++; 539 } 540 } 541 542 if ((oldflags & B_ASYNC) == 0) { 543 int rtval = biowait(bp); 544 brelse(bp); 545 return (rtval); 546 } 547 548 return (0); 549} 550 551/* 552 * Delayed write. (Buffer is marked dirty). Do not bother writing 553 * anything if the buffer is marked invalid. 554 * 555 * Note that since the buffer must be completely valid, we can safely 556 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 557 * biodone() in order to prevent getblk from writing the buffer 558 * out synchronously. 559 */ 560void 561bdwrite(struct buf * bp) 562{ 563 struct vnode *vp; 564 565#if !defined(MAX_PERF) 566 if (BUF_REFCNT(bp) == 0) 567 panic("bdwrite: buffer is not busy"); 568#endif 569 570 if (bp->b_flags & B_INVAL) { 571 brelse(bp); 572 return; 573 } 574 bdirty(bp); 575 576 /* 577 * Set B_CACHE, indicating that the buffer is fully valid. This is 578 * true even of NFS now. 579 */ 580 bp->b_flags |= B_CACHE; 581 582 /* 583 * This bmap keeps the system from needing to do the bmap later, 584 * perhaps when the system is attempting to do a sync. Since it 585 * is likely that the indirect block -- or whatever other datastructure 586 * that the filesystem needs is still in memory now, it is a good 587 * thing to do this. Note also, that if the pageout daemon is 588 * requesting a sync -- there might not be enough memory to do 589 * the bmap then... So, this is important to do. 590 */ 591 if (bp->b_lblkno == bp->b_blkno) { 592 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 593 } 594 595 /* 596 * Set the *dirty* buffer range based upon the VM system dirty pages. 597 */ 598 vfs_setdirty(bp); 599 600 /* 601 * We need to do this here to satisfy the vnode_pager and the 602 * pageout daemon, so that it thinks that the pages have been 603 * "cleaned". Note that since the pages are in a delayed write 604 * buffer -- the VFS layer "will" see that the pages get written 605 * out on the next sync, or perhaps the cluster will be completed. 606 */ 607 vfs_clean_pages(bp); 608 bqrelse(bp); 609 610 /* 611 * XXX The soft dependency code is not prepared to 612 * have I/O done when a bdwrite is requested. For 613 * now we just let the write be delayed if it is 614 * requested by the soft dependency code. 615 */ 616 if ((vp = bp->b_vp) && 617 ((vp->v_type == VBLK && vp->v_specmountpoint && 618 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 619 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)))) 620 return; 621 622 if (numdirtybuffers >= hidirtybuffers) 623 flushdirtybuffers(0, 0); 624} 625 626/* 627 * bdirty: 628 * 629 * Turn buffer into delayed write request. We must clear B_READ and 630 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 631 * itself to properly update it in the dirty/clean lists. We mark it 632 * B_DONE to ensure that any asynchronization of the buffer properly 633 * clears B_DONE ( else a panic will occur later ). 634 * 635 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 636 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 637 * should only be called if the buffer is known-good. 638 * 639 * Since the buffer is not on a queue, we do not update the numfreebuffers 640 * count. 641 * 642 * Must be called at splbio(). 643 * The buffer must be on QUEUE_NONE. 644 */ 645void 646bdirty(bp) 647 struct buf *bp; 648{ 649 KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 650 bp->b_flags &= ~(B_READ|B_RELBUF); 651 652 if ((bp->b_flags & B_DELWRI) == 0) { 653 bp->b_flags |= B_DONE | B_DELWRI; 654 reassignbuf(bp, bp->b_vp); 655 ++numdirtybuffers; 656 } 657} 658 659/* 660 * bundirty: 661 * 662 * Clear B_DELWRI for buffer. 663 * 664 * Since the buffer is not on a queue, we do not update the numfreebuffers 665 * count. 666 * 667 * Must be called at splbio(). 668 * The buffer must be on QUEUE_NONE. 669 */ 670 671void 672bundirty(bp) 673 struct buf *bp; 674{ 675 KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 676 677 if (bp->b_flags & B_DELWRI) { 678 bp->b_flags &= ~B_DELWRI; 679 reassignbuf(bp, bp->b_vp); 680 --numdirtybuffers; 681 } 682} 683 684/* 685 * bawrite: 686 * 687 * Asynchronous write. Start output on a buffer, but do not wait for 688 * it to complete. The buffer is released when the output completes. 689 * 690 * bwrite() ( or the VOP routine anyway ) is responsible for handling 691 * B_INVAL buffers. Not us. 692 */ 693void 694bawrite(struct buf * bp) 695{ 696 bp->b_flags |= B_ASYNC; 697 (void) VOP_BWRITE(bp->b_vp, bp); 698} 699 700/* 701 * bowrite: 702 * 703 * Ordered write. Start output on a buffer, and flag it so that the 704 * device will write it in the order it was queued. The buffer is 705 * released when the output completes. bwrite() ( or the VOP routine 706 * anyway ) is responsible for handling B_INVAL buffers. 707 */ 708int 709bowrite(struct buf * bp) 710{ 711 bp->b_flags |= B_ORDERED | B_ASYNC; 712 return (VOP_BWRITE(bp->b_vp, bp)); 713} 714 715/* 716 * brelse: 717 * 718 * Release a busy buffer and, if requested, free its resources. The 719 * buffer will be stashed in the appropriate bufqueue[] allowing it 720 * to be accessed later as a cache entity or reused for other purposes. 721 */ 722void 723brelse(struct buf * bp) 724{ 725 int s; 726 727 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 728 729#if 0 730 if (bp->b_flags & B_CLUSTER) { 731 relpbuf(bp, NULL); 732 return; 733 } 734#endif 735 736 s = splbio(); 737 738 if (bp->b_flags & B_LOCKED) 739 bp->b_flags &= ~B_ERROR; 740 741 if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) { 742 /* 743 * Failed write, redirty. Must clear B_ERROR to prevent 744 * pages from being scrapped. Note: B_INVAL is ignored 745 * here but will presumably be dealt with later. 746 */ 747 bp->b_flags &= ~B_ERROR; 748 bdirty(bp); 749 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 750 (bp->b_bufsize <= 0)) { 751 /* 752 * Either a failed I/O or we were asked to free or not 753 * cache the buffer. 754 */ 755 bp->b_flags |= B_INVAL; 756 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 757 (*bioops.io_deallocate)(bp); 758 if (bp->b_flags & B_DELWRI) 759 --numdirtybuffers; 760 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 761 if ((bp->b_flags & B_VMIO) == 0) { 762 if (bp->b_bufsize) 763 allocbuf(bp, 0); 764 if (bp->b_vp) 765 brelvp(bp); 766 } 767 } 768 769 /* 770 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 771 * is called with B_DELWRI set, the underlying pages may wind up 772 * getting freed causing a previous write (bdwrite()) to get 'lost' 773 * because pages associated with a B_DELWRI bp are marked clean. 774 * 775 * We still allow the B_INVAL case to call vfs_vmio_release(), even 776 * if B_DELWRI is set. 777 */ 778 779 if (bp->b_flags & B_DELWRI) 780 bp->b_flags &= ~B_RELBUF; 781 782 /* 783 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 784 * constituted, not even NFS buffers now. Two flags effect this. If 785 * B_INVAL, the struct buf is invalidated but the VM object is kept 786 * around ( i.e. so it is trivial to reconstitute the buffer later ). 787 * 788 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be 789 * invalidated. B_ERROR cannot be set for a failed write unless the 790 * buffer is also B_INVAL because it hits the re-dirtying code above. 791 * 792 * Normally we can do this whether a buffer is B_DELWRI or not. If 793 * the buffer is an NFS buffer, it is tracking piecemeal writes or 794 * the commit state and we cannot afford to lose the buffer. 795 */ 796 if ((bp->b_flags & B_VMIO) 797 && !(bp->b_vp->v_tag == VT_NFS && 798 bp->b_vp->v_type != VBLK && 799 (bp->b_flags & B_DELWRI)) 800 ) { 801 802 int i, j, resid; 803 vm_page_t m; 804 off_t foff; 805 vm_pindex_t poff; 806 vm_object_t obj; 807 struct vnode *vp; 808 809 vp = bp->b_vp; 810 811 /* 812 * Get the base offset and length of the buffer. Note that 813 * for block sizes that are less then PAGE_SIZE, the b_data 814 * base of the buffer does not represent exactly b_offset and 815 * neither b_offset nor b_size are necessarily page aligned. 816 * Instead, the starting position of b_offset is: 817 * 818 * b_data + (b_offset & PAGE_MASK) 819 * 820 * block sizes less then DEV_BSIZE (usually 512) are not 821 * supported due to the page granularity bits (m->valid, 822 * m->dirty, etc...). 823 * 824 * See man buf(9) for more information 825 */ 826 827 resid = bp->b_bufsize; 828 foff = bp->b_offset; 829 830 for (i = 0; i < bp->b_npages; i++) { 831 m = bp->b_pages[i]; 832 vm_page_flag_clear(m, PG_ZERO); 833 if (m == bogus_page) { 834 835 obj = (vm_object_t) vp->v_object; 836 poff = OFF_TO_IDX(bp->b_offset); 837 838 for (j = i; j < bp->b_npages; j++) { 839 m = bp->b_pages[j]; 840 if (m == bogus_page) { 841 m = vm_page_lookup(obj, poff + j); 842#if !defined(MAX_PERF) 843 if (!m) { 844 panic("brelse: page missing\n"); 845 } 846#endif 847 bp->b_pages[j] = m; 848 } 849 } 850 851 if ((bp->b_flags & B_INVAL) == 0) { 852 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 853 } 854 } 855 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 856 int poffset = foff & PAGE_MASK; 857 int presid = resid > (PAGE_SIZE - poffset) ? 858 (PAGE_SIZE - poffset) : resid; 859 860 KASSERT(presid >= 0, ("brelse: extra page")); 861 vm_page_set_invalid(m, poffset, presid); 862 } 863 resid -= PAGE_SIZE - (foff & PAGE_MASK); 864 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 865 } 866 867 if (bp->b_flags & (B_INVAL | B_RELBUF)) 868 vfs_vmio_release(bp); 869 870 } else if (bp->b_flags & B_VMIO) { 871 872 if (bp->b_flags & (B_INVAL | B_RELBUF)) 873 vfs_vmio_release(bp); 874 875 } 876 877#if !defined(MAX_PERF) 878 if (bp->b_qindex != QUEUE_NONE) 879 panic("brelse: free buffer onto another queue???"); 880#endif 881 if (BUF_REFCNT(bp) > 1) { 882 /* Temporary panic to verify exclusive locking */ 883 /* This panic goes away when we allow shared refs */ 884 panic("brelse: multiple refs"); 885 /* do not release to free list */ 886 BUF_UNLOCK(bp); 887 splx(s); 888 return; 889 } 890 891 /* enqueue */ 892 893 /* buffers with no memory */ 894 if (bp->b_bufsize == 0) { 895 bp->b_flags |= B_INVAL; 896 bp->b_qindex = QUEUE_EMPTY; 897 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 898 LIST_REMOVE(bp, b_hash); 899 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 900 bp->b_dev = NODEV; 901 kvafreespace += bp->b_kvasize; 902 if (bp->b_kvasize) 903 kvaspacewakeup(); 904 /* buffers with junk contents */ 905 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 906 bp->b_flags |= B_INVAL; 907 bp->b_qindex = QUEUE_AGE; 908 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 909 LIST_REMOVE(bp, b_hash); 910 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 911 bp->b_dev = NODEV; 912 913 /* buffers that are locked */ 914 } else if (bp->b_flags & B_LOCKED) { 915 bp->b_qindex = QUEUE_LOCKED; 916 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 917 918 /* buffers with stale but valid contents */ 919 } else if (bp->b_flags & B_AGE) { 920 bp->b_qindex = QUEUE_AGE; 921 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 922 923 /* buffers with valid and quite potentially reuseable contents */ 924 } else { 925 bp->b_qindex = QUEUE_LRU; 926 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 927 } 928 929 /* 930 * If B_INVAL, clear B_DELWRI. 931 */ 932 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) { 933 bp->b_flags &= ~B_DELWRI; 934 --numdirtybuffers; 935 } 936 937 runningbufspace -= bp->b_bufsize; 938 939 /* 940 * Fixup numfreebuffers count. The bp is on an appropriate queue 941 * unless locked. We then bump numfreebuffers if it is not B_DELWRI. 942 * We've already handled the B_INVAL case ( B_DELWRI will be clear 943 * if B_INVAL is set ). 944 */ 945 946 if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI)) 947 bufcountwakeup(); 948 949 /* 950 * Something we can maybe free. 951 */ 952 953 if (bp->b_bufsize) 954 bufspacewakeup(); 955 956 /* unlock */ 957 BUF_UNLOCK(bp); 958 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 959 splx(s); 960} 961 962/* 963 * Release a buffer back to the appropriate queue but do not try to free 964 * it. 965 * 966 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 967 * biodone() to requeue an async I/O on completion. It is also used when 968 * known good buffers need to be requeued but we think we may need the data 969 * again soon. 970 */ 971void 972bqrelse(struct buf * bp) 973{ 974 int s; 975 976 s = splbio(); 977 978 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 979 980#if !defined(MAX_PERF) 981 if (bp->b_qindex != QUEUE_NONE) 982 panic("bqrelse: free buffer onto another queue???"); 983#endif 984 if (BUF_REFCNT(bp) > 1) { 985 /* do not release to free list */ 986 panic("bqrelse: multiple refs"); 987 BUF_UNLOCK(bp); 988 splx(s); 989 return; 990 } 991 if (bp->b_flags & B_LOCKED) { 992 bp->b_flags &= ~B_ERROR; 993 bp->b_qindex = QUEUE_LOCKED; 994 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 995 /* buffers with stale but valid contents */ 996 } else { 997 bp->b_qindex = QUEUE_LRU; 998 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 999 } 1000 1001 runningbufspace -= bp->b_bufsize; 1002 1003 if ((bp->b_flags & B_LOCKED) == 0 && 1004 ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 1005 ) { 1006 bufcountwakeup(); 1007 } 1008 1009 /* 1010 * Something we can maybe wakeup 1011 */ 1012 if (bp->b_bufsize) 1013 bufspacewakeup(); 1014 1015 /* unlock */ 1016 BUF_UNLOCK(bp); 1017 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1018 splx(s); 1019} 1020 1021static void 1022vfs_vmio_release(bp) 1023 struct buf *bp; 1024{ 1025 int i, s; 1026 vm_page_t m; 1027 1028 s = splvm(); 1029 for (i = 0; i < bp->b_npages; i++) { 1030 m = bp->b_pages[i]; 1031 bp->b_pages[i] = NULL; 1032 /* 1033 * In order to keep page LRU ordering consistent, put 1034 * everything on the inactive queue. 1035 */ 1036 vm_page_unwire(m, 0); 1037 /* 1038 * We don't mess with busy pages, it is 1039 * the responsibility of the process that 1040 * busied the pages to deal with them. 1041 */ 1042 if ((m->flags & PG_BUSY) || (m->busy != 0)) 1043 continue; 1044 1045 if (m->wire_count == 0) { 1046 vm_page_flag_clear(m, PG_ZERO); 1047 /* 1048 * Might as well free the page if we can and it has 1049 * no valid data. 1050 */ 1051 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { 1052 vm_page_busy(m); 1053 vm_page_protect(m, VM_PROT_NONE); 1054 vm_page_free(m); 1055 } 1056 } 1057 } 1058 bufspace -= bp->b_bufsize; 1059 vmiospace -= bp->b_bufsize; 1060 runningbufspace -= bp->b_bufsize; 1061 splx(s); 1062 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 1063 if (bp->b_bufsize) 1064 bufspacewakeup(); 1065 bp->b_npages = 0; 1066 bp->b_bufsize = 0; 1067 bp->b_flags &= ~B_VMIO; 1068 if (bp->b_vp) 1069 brelvp(bp); 1070} 1071 1072/* 1073 * Check to see if a block is currently memory resident. 1074 */ 1075struct buf * 1076gbincore(struct vnode * vp, daddr_t blkno) 1077{ 1078 struct buf *bp; 1079 struct bufhashhdr *bh; 1080 1081 bh = BUFHASH(vp, blkno); 1082 bp = bh->lh_first; 1083 1084 /* Search hash chain */ 1085 while (bp != NULL) { 1086 /* hit */ 1087 if (bp->b_vp == vp && bp->b_lblkno == blkno && 1088 (bp->b_flags & B_INVAL) == 0) { 1089 break; 1090 } 1091 bp = bp->b_hash.le_next; 1092 } 1093 return (bp); 1094} 1095 1096/* 1097 * this routine implements clustered async writes for 1098 * clearing out B_DELWRI buffers... This is much better 1099 * than the old way of writing only one buffer at a time. 1100 */ 1101int 1102vfs_bio_awrite(struct buf * bp) 1103{ 1104 int i; 1105 daddr_t lblkno = bp->b_lblkno; 1106 struct vnode *vp = bp->b_vp; 1107 int s; 1108 int ncl; 1109 struct buf *bpa; 1110 int nwritten; 1111 int size; 1112 int maxcl; 1113 1114 s = splbio(); 1115 /* 1116 * right now we support clustered writing only to regular files, and 1117 * then only if our I/O system is not saturated. 1118 */ 1119 if ((vp->v_type == VREG) && 1120 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1121 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1122 1123 size = vp->v_mount->mnt_stat.f_iosize; 1124 maxcl = MAXPHYS / size; 1125 1126 for (i = 1; i < maxcl; i++) { 1127 if ((bpa = gbincore(vp, lblkno + i)) && 1128 BUF_REFCNT(bpa) == 0 && 1129 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1130 (B_DELWRI | B_CLUSTEROK)) && 1131 (bpa->b_bufsize == size)) { 1132 if ((bpa->b_blkno == bpa->b_lblkno) || 1133 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 1134 break; 1135 } else { 1136 break; 1137 } 1138 } 1139 ncl = i; 1140 /* 1141 * this is a possible cluster write 1142 */ 1143 if (ncl != 1) { 1144 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 1145 splx(s); 1146 return nwritten; 1147 } 1148 } 1149 1150 BUF_LOCK(bp, LK_EXCLUSIVE); 1151 bremfree(bp); 1152 bp->b_flags |= B_ASYNC; 1153 1154 splx(s); 1155 /* 1156 * default (old) behavior, writing out only one block 1157 * 1158 * XXX returns b_bufsize instead of b_bcount for nwritten? 1159 */ 1160 nwritten = bp->b_bufsize; 1161 (void) VOP_BWRITE(bp->b_vp, bp); 1162 1163 return nwritten; 1164} 1165 1166/* 1167 * getnewbuf: 1168 * 1169 * Find and initialize a new buffer header, freeing up existing buffers 1170 * in the bufqueues as necessary. The new buffer is returned locked. 1171 * 1172 * Important: B_INVAL is not set. If the caller wishes to throw the 1173 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1174 * 1175 * We block if: 1176 * We have insufficient buffer headers 1177 * We have insufficient buffer space 1178 * buffer_map is too fragmented ( space reservation fails ) 1179 * 1180 * We do *not* attempt to flush dirty buffers more then one level deep. 1181 * I.e., if P_FLSINPROG is set we do not flush dirty buffers at all. 1182 * 1183 * If P_FLSINPROG is set, we are allowed to dip into our emergency 1184 * reserve. 1185 */ 1186static struct buf * 1187getnewbuf(struct vnode *vp, daddr_t blkno, 1188 int slpflag, int slptimeo, int size, int maxsize) 1189{ 1190 struct buf *bp; 1191 struct buf *nbp; 1192 struct buf *dbp; 1193 int outofspace; 1194 int nqindex; 1195 int defrag = 0; 1196 static int newbufcnt = 0; 1197 int lastnewbuf = newbufcnt; 1198 1199restart: 1200 /* 1201 * Calculate whether we are out of buffer space. This state is 1202 * recalculated on every restart. If we are out of space, we 1203 * have to turn off defragmentation. The outofspace code will 1204 * defragment too, but the looping conditionals will be messed up 1205 * if both outofspace and defrag are on. 1206 */ 1207 1208 dbp = NULL; 1209 outofspace = 0; 1210 if (bufspace >= hibufspace) { 1211 if ((curproc->p_flag & P_FLSINPROG) == 0 || 1212 bufspace >= maxbufspace 1213 ) { 1214 outofspace = 1; 1215 defrag = 0; 1216 } 1217 } 1218 1219 /* 1220 * defrag state is semi-persistant. 1 means we are flagged for 1221 * defragging. -1 means we actually defragged something. 1222 */ 1223 /* nop */ 1224 1225 /* 1226 * Setup for scan. If we do not have enough free buffers, 1227 * we setup a degenerate case that falls through the while. 1228 * 1229 * If we are in the middle of a flush, we can dip into the 1230 * emergency reserve. 1231 * 1232 * If we are out of space, we skip trying to scan QUEUE_EMPTY 1233 * because those buffers are, well, empty. 1234 */ 1235 1236 if ((curproc->p_flag & P_FLSINPROG) == 0 && 1237 numfreebuffers < lofreebuffers) { 1238 nqindex = QUEUE_LRU; 1239 nbp = NULL; 1240 } else { 1241 nqindex = QUEUE_EMPTY; 1242 if (outofspace || 1243 (nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY])) == NULL) { 1244 nqindex = QUEUE_AGE; 1245 nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1246 if (nbp == NULL) { 1247 nqindex = QUEUE_LRU; 1248 nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1249 } 1250 } 1251 } 1252 1253 /* 1254 * Run scan, possibly freeing data and/or kva mappings on the fly 1255 * depending. 1256 */ 1257 1258 while ((bp = nbp) != NULL) { 1259 int qindex = nqindex; 1260 /* 1261 * Calculate next bp ( we can only use it if we do not block 1262 * or do other fancy things ). 1263 */ 1264 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 1265 switch(qindex) { 1266 case QUEUE_EMPTY: 1267 nqindex = QUEUE_AGE; 1268 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) 1269 break; 1270 /* fall through */ 1271 case QUEUE_AGE: 1272 nqindex = QUEUE_LRU; 1273 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) 1274 break; 1275 /* fall through */ 1276 case QUEUE_LRU: 1277 /* 1278 * nbp is NULL. 1279 */ 1280 break; 1281 } 1282 } 1283 1284 /* 1285 * Sanity Checks 1286 */ 1287 KASSERT(BUF_REFCNT(bp) == 0, ("getnewbuf: busy buffer %p on free list", bp)); 1288 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp)); 1289 1290 /* 1291 * Here we try to move NON VMIO buffers to the end of the 1292 * LRU queue in order to make VMIO buffers more readily 1293 * freeable. We also try to move buffers with a positive 1294 * usecount to the end. 1295 * 1296 * Note that by moving the bp to the end, we setup a following 1297 * loop. Since we continue to decrement b_usecount this 1298 * is ok and, in fact, desireable. 1299 * 1300 * If we are at the end of the list, we move ourself to the 1301 * same place and need to fixup nbp and nqindex to handle 1302 * the following case. 1303 */ 1304 1305 if ((qindex == QUEUE_LRU) && bp->b_usecount > 0) { 1306 if ((bp->b_flags & B_VMIO) == 0 || 1307 (vmiospace < maxvmiobufspace) 1308 ) { 1309 --bp->b_usecount; 1310 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1311 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1312 if (nbp == NULL) { 1313 nqindex = qindex; 1314 nbp = bp; 1315 } 1316 continue; 1317 } 1318 } 1319 1320 /* 1321 * If we come across a delayed write and numdirtybuffers should 1322 * be flushed, try to write it out. Only if P_FLSINPROG is 1323 * not set. We can't afford to recursively stack more then 1324 * one deep due to the possibility of having deep VFS call 1325 * stacks. 1326 * 1327 * Limit the number of dirty buffers we are willing to try 1328 * to recover since it really isn't our job here. 1329 */ 1330 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1331 /* 1332 * This is rather complex, but necessary. If we come 1333 * across a B_DELWRI buffer we have to flush it in 1334 * order to use it. We only do this if we absolutely 1335 * need to. We must also protect against too much 1336 * recursion which might run us out of stack due to 1337 * deep VFS call stacks. 1338 * 1339 * In heavy-writing situations, QUEUE_LRU can contain 1340 * a large number of DELWRI buffers at its head. These 1341 * buffers must be moved to the tail if they cannot be 1342 * written async in order to reduce the scanning time 1343 * required to skip past these buffers in later 1344 * getnewbuf() calls. 1345 */ 1346 if ((curproc->p_flag & P_FLSINPROG) || 1347 numdirtybuffers < hidirtybuffers) { 1348 if (qindex == QUEUE_LRU) { 1349 /* 1350 * dbp prevents us from looping forever 1351 * if all bps in QUEUE_LRU are dirty. 1352 */ 1353 if (bp == dbp) { 1354 bp = NULL; 1355 break; 1356 } 1357 if (dbp == NULL) 1358 dbp = TAILQ_LAST(&bufqueues[QUEUE_LRU], bqueues); 1359 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1360 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1361 } 1362 continue; 1363 } 1364 curproc->p_flag |= P_FLSINPROG; 1365 vfs_bio_awrite(bp); 1366 curproc->p_flag &= ~P_FLSINPROG; 1367 goto restart; 1368 } 1369 1370 if (defrag > 0 && bp->b_kvasize == 0) 1371 continue; 1372 if (outofspace > 0 && bp->b_bufsize == 0) 1373 continue; 1374 1375 /* 1376 * Start freeing the bp. This is somewhat involved. nbp 1377 * remains valid only for QUEUE_EMPTY bp's. 1378 */ 1379 1380 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1381 panic("getnewbuf: locked buf"); 1382 bremfree(bp); 1383 1384 if (qindex == QUEUE_LRU || qindex == QUEUE_AGE) { 1385 if (bp->b_flags & B_VMIO) { 1386 bp->b_flags &= ~B_ASYNC; 1387 vfs_vmio_release(bp); 1388 } 1389 if (bp->b_vp) 1390 brelvp(bp); 1391 } 1392 1393 /* 1394 * NOTE: nbp is now entirely invalid. We can only restart 1395 * the scan from this point on. 1396 * 1397 * Get the rest of the buffer freed up. b_kva* is still 1398 * valid after this operation. 1399 */ 1400 1401 if (bp->b_rcred != NOCRED) { 1402 crfree(bp->b_rcred); 1403 bp->b_rcred = NOCRED; 1404 } 1405 if (bp->b_wcred != NOCRED) { 1406 crfree(bp->b_wcred); 1407 bp->b_wcred = NOCRED; 1408 } 1409 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1410 (*bioops.io_deallocate)(bp); 1411 1412 LIST_REMOVE(bp, b_hash); 1413 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1414 1415 if (bp->b_bufsize) 1416 allocbuf(bp, 0); 1417 1418 bp->b_flags = 0; 1419 bp->b_dev = NODEV; 1420 bp->b_vp = NULL; 1421 bp->b_blkno = bp->b_lblkno = 0; 1422 bp->b_offset = NOOFFSET; 1423 bp->b_iodone = 0; 1424 bp->b_error = 0; 1425 bp->b_resid = 0; 1426 bp->b_bcount = 0; 1427 bp->b_npages = 0; 1428 bp->b_dirtyoff = bp->b_dirtyend = 0; 1429 bp->b_usecount = 5; 1430 1431 LIST_INIT(&bp->b_dep); 1432 1433 /* 1434 * Ok, now that we have a free buffer, if we are defragging 1435 * we have to recover the kvaspace. 1436 */ 1437 1438 if (defrag > 0) { 1439 defrag = -1; 1440 bp->b_flags |= B_INVAL; 1441 bfreekva(bp); 1442 brelse(bp); 1443 goto restart; 1444 } 1445 1446 if (outofspace > 0) { 1447 outofspace = -1; 1448 bp->b_flags |= B_INVAL; 1449 bfreekva(bp); 1450 brelse(bp); 1451 goto restart; 1452 } 1453 1454 /* 1455 * We are done 1456 */ 1457 break; 1458 } 1459 1460 /* 1461 * If we exhausted our list, sleep as appropriate. 1462 */ 1463 1464 if (bp == NULL) { 1465 int flags; 1466 1467dosleep: 1468 if (defrag > 0) 1469 flags = VFS_BIO_NEED_KVASPACE; 1470 else if (outofspace > 0) 1471 flags = VFS_BIO_NEED_BUFSPACE; 1472 else 1473 flags = VFS_BIO_NEED_ANY; 1474 1475 (void) speedup_syncer(); 1476 needsbuffer |= flags; 1477 while (needsbuffer & flags) { 1478 if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, 1479 "newbuf", slptimeo)) 1480 return (NULL); 1481 } 1482 } else { 1483 /* 1484 * We finally have a valid bp. We aren't quite out of the 1485 * woods, we still have to reserve kva space. 1486 */ 1487 vm_offset_t addr = 0; 1488 1489 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1490 1491 if (maxsize != bp->b_kvasize) { 1492 bfreekva(bp); 1493 1494 if (vm_map_findspace(buffer_map, 1495 vm_map_min(buffer_map), maxsize, &addr) 1496 ) { 1497 /* 1498 * Uh oh. Buffer map is to fragmented. Try 1499 * to defragment. 1500 */ 1501 if (defrag <= 0) { 1502 defrag = 1; 1503 bp->b_flags |= B_INVAL; 1504 brelse(bp); 1505 goto restart; 1506 } 1507 /* 1508 * Uh oh. We couldn't seem to defragment 1509 */ 1510 bp = NULL; 1511 goto dosleep; 1512 } 1513 } 1514 if (addr) { 1515 vm_map_insert(buffer_map, NULL, 0, 1516 addr, addr + maxsize, 1517 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1518 1519 bp->b_kvabase = (caddr_t) addr; 1520 bp->b_kvasize = maxsize; 1521 } 1522 bp->b_data = bp->b_kvabase; 1523 } 1524 1525 /* 1526 * If we have slept at some point in this process and another 1527 * process has managed to allocate a new buffer while we slept, 1528 * we have to return NULL so that our caller can recheck to 1529 * ensure that the other process did not create an identically 1530 * identified buffer to the one we were requesting. We make this 1531 * check by incrementing the static int newbufcnt each time we 1532 * successfully allocate a new buffer. By saving the value of 1533 * newbufcnt in our local lastnewbuf, we can compare newbufcnt 1534 * with lastnewbuf to see if any other process managed to 1535 * allocate a buffer while we were doing so ourselves. 1536 * 1537 * Note that bp, if valid, is locked. 1538 */ 1539 if (lastnewbuf == newbufcnt) { 1540 /* 1541 * No buffers allocated, so we can return one if we were 1542 * successful, or continue trying if we were not successful. 1543 */ 1544 if (bp != NULL) { 1545 newbufcnt += 1; 1546 return (bp); 1547 } 1548 goto restart; 1549 } 1550 /* 1551 * Another process allocated a buffer since we were called, so 1552 * we have to free the one we allocated and return NULL to let 1553 * our caller recheck to see if a new buffer is still needed. 1554 */ 1555 if (bp != NULL) { 1556 bp->b_flags |= B_INVAL; 1557 brelse(bp); 1558 } 1559 return (NULL); 1560} 1561 1562/* 1563 * waitfreebuffers: 1564 * 1565 * Wait for sufficient free buffers. This routine is not called if 1566 * curproc is the update process so we do not have to do anything 1567 * fancy. 1568 */ 1569 1570static void 1571waitfreebuffers(int slpflag, int slptimeo) 1572{ 1573 while (numfreebuffers < hifreebuffers) { 1574 flushdirtybuffers(slpflag, slptimeo); 1575 if (numfreebuffers >= hifreebuffers) 1576 break; 1577 needsbuffer |= VFS_BIO_NEED_FREE; 1578 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1579 break; 1580 } 1581} 1582 1583/* 1584 * flushdirtybuffers: 1585 * 1586 * This routine is called when we get too many dirty buffers. 1587 * 1588 * We have to protect ourselves from recursion, but we also do not want 1589 * other process's flushdirtybuffers() to interfere with the syncer if 1590 * it decides to flushdirtybuffers(). 1591 * 1592 * In order to maximize operations, we allow any process to flush 1593 * dirty buffers and use P_FLSINPROG to prevent recursion. 1594 */ 1595 1596static void 1597flushdirtybuffers(int slpflag, int slptimeo) 1598{ 1599 int s; 1600 1601 s = splbio(); 1602 1603 if (curproc->p_flag & P_FLSINPROG) { 1604 splx(s); 1605 return; 1606 } 1607 curproc->p_flag |= P_FLSINPROG; 1608 1609 while (numdirtybuffers > lodirtybuffers) { 1610 if (flushbufqueues() == 0) 1611 break; 1612 } 1613 1614 curproc->p_flag &= ~P_FLSINPROG; 1615 1616 splx(s); 1617} 1618 1619static int 1620flushbufqueues(void) 1621{ 1622 struct buf *bp; 1623 int qindex; 1624 int r = 0; 1625 1626 qindex = QUEUE_AGE; 1627 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1628 1629 for (;;) { 1630 if (bp == NULL) { 1631 if (qindex == QUEUE_LRU) 1632 break; 1633 qindex = QUEUE_LRU; 1634 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL) 1635 break; 1636 } 1637 1638 /* 1639 * Try to free up B_INVAL delayed-write buffers rather then 1640 * writing them out. Note also that NFS is somewhat sensitive 1641 * to B_INVAL buffers so it is doubly important that we do 1642 * this. 1643 */ 1644 if ((bp->b_flags & B_DELWRI) != 0) { 1645 if (bp->b_flags & B_INVAL) { 1646 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1647 panic("flushbufqueues: locked buf"); 1648 bremfree(bp); 1649 brelse(bp); 1650 } else { 1651 vfs_bio_awrite(bp); 1652 } 1653 ++r; 1654 break; 1655 } 1656 bp = TAILQ_NEXT(bp, b_freelist); 1657 } 1658 return(r); 1659} 1660 1661/* 1662 * Check to see if a block is currently memory resident. 1663 */ 1664struct buf * 1665incore(struct vnode * vp, daddr_t blkno) 1666{ 1667 struct buf *bp; 1668 1669 int s = splbio(); 1670 bp = gbincore(vp, blkno); 1671 splx(s); 1672 return (bp); 1673} 1674 1675/* 1676 * Returns true if no I/O is needed to access the 1677 * associated VM object. This is like incore except 1678 * it also hunts around in the VM system for the data. 1679 */ 1680 1681int 1682inmem(struct vnode * vp, daddr_t blkno) 1683{ 1684 vm_object_t obj; 1685 vm_offset_t toff, tinc, size; 1686 vm_page_t m; 1687 vm_ooffset_t off; 1688 1689 if (incore(vp, blkno)) 1690 return 1; 1691 if (vp->v_mount == NULL) 1692 return 0; 1693 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1694 return 0; 1695 1696 obj = vp->v_object; 1697 size = PAGE_SIZE; 1698 if (size > vp->v_mount->mnt_stat.f_iosize) 1699 size = vp->v_mount->mnt_stat.f_iosize; 1700 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 1701 1702 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1703 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1704 if (!m) 1705 return 0; 1706 tinc = size; 1707 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 1708 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 1709 if (vm_page_is_valid(m, 1710 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1711 return 0; 1712 } 1713 return 1; 1714} 1715 1716/* 1717 * vfs_setdirty: 1718 * 1719 * Sets the dirty range for a buffer based on the status of the dirty 1720 * bits in the pages comprising the buffer. 1721 * 1722 * The range is limited to the size of the buffer. 1723 * 1724 * This routine is primarily used by NFS, but is generalized for the 1725 * B_VMIO case. 1726 */ 1727static void 1728vfs_setdirty(struct buf *bp) 1729{ 1730 int i; 1731 vm_object_t object; 1732 1733 /* 1734 * Degenerate case - empty buffer 1735 */ 1736 1737 if (bp->b_bufsize == 0) 1738 return; 1739 1740 /* 1741 * We qualify the scan for modified pages on whether the 1742 * object has been flushed yet. The OBJ_WRITEABLE flag 1743 * is not cleared simply by protecting pages off. 1744 */ 1745 1746 if ((bp->b_flags & B_VMIO) == 0) 1747 return; 1748 1749 object = bp->b_pages[0]->object; 1750 1751 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY)) 1752 printf("Warning: object %p writeable but not mightbedirty\n", object); 1753 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY)) 1754 printf("Warning: object %p mightbedirty but not writeable\n", object); 1755 1756 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 1757 vm_offset_t boffset; 1758 vm_offset_t eoffset; 1759 1760 /* 1761 * test the pages to see if they have been modified directly 1762 * by users through the VM system. 1763 */ 1764 for (i = 0; i < bp->b_npages; i++) { 1765 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1766 vm_page_test_dirty(bp->b_pages[i]); 1767 } 1768 1769 /* 1770 * Calculate the encompassing dirty range, boffset and eoffset, 1771 * (eoffset - boffset) bytes. 1772 */ 1773 1774 for (i = 0; i < bp->b_npages; i++) { 1775 if (bp->b_pages[i]->dirty) 1776 break; 1777 } 1778 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1779 1780 for (i = bp->b_npages - 1; i >= 0; --i) { 1781 if (bp->b_pages[i]->dirty) { 1782 break; 1783 } 1784 } 1785 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1786 1787 /* 1788 * Fit it to the buffer. 1789 */ 1790 1791 if (eoffset > bp->b_bcount) 1792 eoffset = bp->b_bcount; 1793 1794 /* 1795 * If we have a good dirty range, merge with the existing 1796 * dirty range. 1797 */ 1798 1799 if (boffset < eoffset) { 1800 if (bp->b_dirtyoff > boffset) 1801 bp->b_dirtyoff = boffset; 1802 if (bp->b_dirtyend < eoffset) 1803 bp->b_dirtyend = eoffset; 1804 } 1805 } 1806} 1807 1808/* 1809 * getblk: 1810 * 1811 * Get a block given a specified block and offset into a file/device. 1812 * The buffers B_DONE bit will be cleared on return, making it almost 1813 * ready for an I/O initiation. B_INVAL may or may not be set on 1814 * return. The caller should clear B_INVAL prior to initiating a 1815 * READ. 1816 * 1817 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 1818 * an existing buffer. 1819 * 1820 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 1821 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 1822 * and then cleared based on the backing VM. If the previous buffer is 1823 * non-0-sized but invalid, B_CACHE will be cleared. 1824 * 1825 * If getblk() must create a new buffer, the new buffer is returned with 1826 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 1827 * case it is returned with B_INVAL clear and B_CACHE set based on the 1828 * backing VM. 1829 * 1830 * getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos 1831 * B_CACHE bit is clear. 1832 * 1833 * What this means, basically, is that the caller should use B_CACHE to 1834 * determine whether the buffer is fully valid or not and should clear 1835 * B_INVAL prior to issuing a read. If the caller intends to validate 1836 * the buffer by loading its data area with something, the caller needs 1837 * to clear B_INVAL. If the caller does this without issuing an I/O, 1838 * the caller should set B_CACHE ( as an optimization ), else the caller 1839 * should issue the I/O and biodone() will set B_CACHE if the I/O was 1840 * a write attempt or if it was a successfull read. If the caller 1841 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 1842 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 1843 */ 1844struct buf * 1845getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1846{ 1847 struct buf *bp; 1848 int s; 1849 struct bufhashhdr *bh; 1850 1851#if !defined(MAX_PERF) 1852 if (size > MAXBSIZE) 1853 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1854#endif 1855 1856 s = splbio(); 1857loop: 1858 /* 1859 * Block if we are low on buffers. The syncer is allowed more 1860 * buffers in order to avoid a deadlock. 1861 */ 1862 if (curproc == updateproc && numfreebuffers == 0) { 1863 needsbuffer |= VFS_BIO_NEED_ANY; 1864 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 1865 slptimeo); 1866 } else if (curproc != updateproc && numfreebuffers < lofreebuffers) { 1867 waitfreebuffers(slpflag, slptimeo); 1868 } 1869 1870 if ((bp = gbincore(vp, blkno))) { 1871 /* 1872 * Buffer is in-core 1873 */ 1874 1875 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1876 if (bp->b_usecount < BUF_MAXUSE) 1877 ++bp->b_usecount; 1878 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL, 1879 "getblk", slpflag, slptimeo) == ENOLCK) 1880 goto loop; 1881 splx(s); 1882 return (struct buf *) NULL; 1883 } 1884 1885 /* 1886 * The buffer is locked. B_CACHE is cleared if the buffer is 1887 * invalid. Ohterwise, for a non-VMIO buffer, B_CACHE is set 1888 * and for a VMIO buffer B_CACHE is adjusted according to the 1889 * backing VM cache. 1890 */ 1891 if (bp->b_flags & B_INVAL) 1892 bp->b_flags &= ~B_CACHE; 1893 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 1894 bp->b_flags |= B_CACHE; 1895 bremfree(bp); 1896 1897 /* 1898 * check for size inconsistancies for non-VMIO case. 1899 */ 1900 1901 if (bp->b_bcount != size) { 1902 if ((bp->b_flags & B_VMIO) == 0 || 1903 (size > bp->b_kvasize) 1904 ) { 1905 if (bp->b_flags & B_DELWRI) { 1906 bp->b_flags |= B_NOCACHE; 1907 VOP_BWRITE(bp->b_vp, bp); 1908 } else { 1909 if ((bp->b_flags & B_VMIO) && 1910 (LIST_FIRST(&bp->b_dep) == NULL)) { 1911 bp->b_flags |= B_RELBUF; 1912 brelse(bp); 1913 } else { 1914 bp->b_flags |= B_NOCACHE; 1915 VOP_BWRITE(bp->b_vp, bp); 1916 } 1917 } 1918 goto loop; 1919 } 1920 } 1921 1922 /* 1923 * If the size is inconsistant in the VMIO case, we can resize 1924 * the buffer. This might lead to B_CACHE getting set or 1925 * cleared. If the size has not changed, B_CACHE remains 1926 * unchanged from its previous state. 1927 */ 1928 1929 if (bp->b_bcount != size) 1930 allocbuf(bp, size); 1931 1932 KASSERT(bp->b_offset != NOOFFSET, 1933 ("getblk: no buffer offset")); 1934 1935 /* 1936 * A buffer with B_DELWRI set and B_CACHE clear must 1937 * be committed before we can return the buffer in 1938 * order to prevent the caller from issuing a read 1939 * ( due to B_CACHE not being set ) and overwriting 1940 * it. 1941 * 1942 * Most callers, including NFS and FFS, need this to 1943 * operate properly either because they assume they 1944 * can issue a read if B_CACHE is not set, or because 1945 * ( for example ) an uncached B_DELWRI might loop due 1946 * to softupdates re-dirtying the buffer. In the latter 1947 * case, B_CACHE is set after the first write completes, 1948 * preventing further loops. 1949 */ 1950 1951 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 1952 VOP_BWRITE(bp->b_vp, bp); 1953 goto loop; 1954 } 1955 1956 if (bp->b_usecount < BUF_MAXUSE) 1957 ++bp->b_usecount; 1958 splx(s); 1959 bp->b_flags &= ~B_DONE; 1960 } else { 1961 /* 1962 * Buffer is not in-core, create new buffer. The buffer 1963 * returned by getnewbuf() is locked. Note that the returned 1964 * buffer is also considered valid (not marked B_INVAL). 1965 */ 1966 int bsize, maxsize, vmio; 1967 off_t offset; 1968 1969 if (vp->v_type == VBLK) 1970 bsize = DEV_BSIZE; 1971 else if (vp->v_mountedhere) 1972 bsize = vp->v_mountedhere->mnt_stat.f_iosize; 1973 else if (vp->v_mount) 1974 bsize = vp->v_mount->mnt_stat.f_iosize; 1975 else 1976 bsize = size; 1977 1978 offset = (off_t)blkno * bsize; 1979 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF); 1980 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 1981 maxsize = imax(maxsize, bsize); 1982 1983 if ((bp = getnewbuf(vp, blkno, 1984 slpflag, slptimeo, size, maxsize)) == NULL) { 1985 if (slpflag || slptimeo) { 1986 splx(s); 1987 return NULL; 1988 } 1989 goto loop; 1990 } 1991 1992 /* 1993 * This code is used to make sure that a buffer is not 1994 * created while the getnewbuf routine is blocked. 1995 * This can be a problem whether the vnode is locked or not. 1996 * If the buffer is created out from under us, we have to 1997 * throw away the one we just created. There is now window 1998 * race because we are safely running at splbio() from the 1999 * point of the duplicate buffer creation through to here. 2000 */ 2001 if (gbincore(vp, blkno)) { 2002 bp->b_flags |= B_INVAL; 2003 brelse(bp); 2004 goto loop; 2005 } 2006 2007 /* 2008 * Insert the buffer into the hash, so that it can 2009 * be found by incore. 2010 */ 2011 bp->b_blkno = bp->b_lblkno = blkno; 2012 bp->b_offset = offset; 2013 2014 bgetvp(vp, bp); 2015 LIST_REMOVE(bp, b_hash); 2016 bh = BUFHASH(vp, blkno); 2017 LIST_INSERT_HEAD(bh, bp, b_hash); 2018 2019 /* 2020 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 2021 * buffer size starts out as 0, B_CACHE will be set by 2022 * allocbuf() for the VMIO case prior to it testing the 2023 * backing store for validity. 2024 */ 2025 2026 if (vmio) { 2027 bp->b_flags |= B_VMIO; 2028#if defined(VFS_BIO_DEBUG) 2029 if (vp->v_type != VREG && vp->v_type != VBLK) 2030 printf("getblk: vmioing file type %d???\n", vp->v_type); 2031#endif 2032 } else { 2033 bp->b_flags &= ~B_VMIO; 2034 } 2035 2036 allocbuf(bp, size); 2037 2038 splx(s); 2039 bp->b_flags &= ~B_DONE; 2040 } 2041 return (bp); 2042} 2043 2044/* 2045 * Get an empty, disassociated buffer of given size. The buffer is initially 2046 * set to B_INVAL. 2047 */ 2048struct buf * 2049geteblk(int size) 2050{ 2051 struct buf *bp; 2052 int s; 2053 2054 s = splbio(); 2055 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 2056 splx(s); 2057 allocbuf(bp, size); 2058 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 2059 return (bp); 2060} 2061 2062 2063/* 2064 * This code constitutes the buffer memory from either anonymous system 2065 * memory (in the case of non-VMIO operations) or from an associated 2066 * VM object (in the case of VMIO operations). This code is able to 2067 * resize a buffer up or down. 2068 * 2069 * Note that this code is tricky, and has many complications to resolve 2070 * deadlock or inconsistant data situations. Tread lightly!!! 2071 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 2072 * the caller. Calling this code willy nilly can result in the loss of data. 2073 * 2074 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 2075 * B_CACHE for the non-VMIO case. 2076 */ 2077 2078int 2079allocbuf(struct buf *bp, int size) 2080{ 2081 int newbsize, mbsize; 2082 int i; 2083 2084#if !defined(MAX_PERF) 2085 if (BUF_REFCNT(bp) == 0) 2086 panic("allocbuf: buffer not busy"); 2087 2088 if (bp->b_kvasize < size) 2089 panic("allocbuf: buffer too small"); 2090#endif 2091 2092 if ((bp->b_flags & B_VMIO) == 0) { 2093 caddr_t origbuf; 2094 int origbufsize; 2095 /* 2096 * Just get anonymous memory from the kernel. Don't 2097 * mess with B_CACHE. 2098 */ 2099 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2100#if !defined(NO_B_MALLOC) 2101 if (bp->b_flags & B_MALLOC) 2102 newbsize = mbsize; 2103 else 2104#endif 2105 newbsize = round_page(size); 2106 2107 if (newbsize < bp->b_bufsize) { 2108#if !defined(NO_B_MALLOC) 2109 /* 2110 * malloced buffers are not shrunk 2111 */ 2112 if (bp->b_flags & B_MALLOC) { 2113 if (newbsize) { 2114 bp->b_bcount = size; 2115 } else { 2116 free(bp->b_data, M_BIOBUF); 2117 bufspace -= bp->b_bufsize; 2118 bufmallocspace -= bp->b_bufsize; 2119 runningbufspace -= bp->b_bufsize; 2120 if (bp->b_bufsize) 2121 bufspacewakeup(); 2122 bp->b_data = bp->b_kvabase; 2123 bp->b_bufsize = 0; 2124 bp->b_bcount = 0; 2125 bp->b_flags &= ~B_MALLOC; 2126 } 2127 return 1; 2128 } 2129#endif 2130 vm_hold_free_pages( 2131 bp, 2132 (vm_offset_t) bp->b_data + newbsize, 2133 (vm_offset_t) bp->b_data + bp->b_bufsize); 2134 } else if (newbsize > bp->b_bufsize) { 2135#if !defined(NO_B_MALLOC) 2136 /* 2137 * We only use malloced memory on the first allocation. 2138 * and revert to page-allocated memory when the buffer grows. 2139 */ 2140 if ( (bufmallocspace < maxbufmallocspace) && 2141 (bp->b_bufsize == 0) && 2142 (mbsize <= PAGE_SIZE/2)) { 2143 2144 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 2145 bp->b_bufsize = mbsize; 2146 bp->b_bcount = size; 2147 bp->b_flags |= B_MALLOC; 2148 bufspace += mbsize; 2149 bufmallocspace += mbsize; 2150 runningbufspace += bp->b_bufsize; 2151 return 1; 2152 } 2153#endif 2154 origbuf = NULL; 2155 origbufsize = 0; 2156#if !defined(NO_B_MALLOC) 2157 /* 2158 * If the buffer is growing on its other-than-first allocation, 2159 * then we revert to the page-allocation scheme. 2160 */ 2161 if (bp->b_flags & B_MALLOC) { 2162 origbuf = bp->b_data; 2163 origbufsize = bp->b_bufsize; 2164 bp->b_data = bp->b_kvabase; 2165 bufspace -= bp->b_bufsize; 2166 bufmallocspace -= bp->b_bufsize; 2167 runningbufspace -= bp->b_bufsize; 2168 if (bp->b_bufsize) 2169 bufspacewakeup(); 2170 bp->b_bufsize = 0; 2171 bp->b_flags &= ~B_MALLOC; 2172 newbsize = round_page(newbsize); 2173 } 2174#endif 2175 vm_hold_load_pages( 2176 bp, 2177 (vm_offset_t) bp->b_data + bp->b_bufsize, 2178 (vm_offset_t) bp->b_data + newbsize); 2179#if !defined(NO_B_MALLOC) 2180 if (origbuf) { 2181 bcopy(origbuf, bp->b_data, origbufsize); 2182 free(origbuf, M_BIOBUF); 2183 } 2184#endif 2185 } 2186 } else { 2187 vm_page_t m; 2188 int desiredpages; 2189 2190 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2191 desiredpages = (size == 0) ? 0 : 2192 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 2193 2194#if !defined(NO_B_MALLOC) 2195 if (bp->b_flags & B_MALLOC) 2196 panic("allocbuf: VMIO buffer can't be malloced"); 2197#endif 2198 /* 2199 * Set B_CACHE initially if buffer is 0 length or will become 2200 * 0-length. 2201 */ 2202 if (size == 0 || bp->b_bufsize == 0) 2203 bp->b_flags |= B_CACHE; 2204 2205 if (newbsize < bp->b_bufsize) { 2206 /* 2207 * DEV_BSIZE aligned new buffer size is less then the 2208 * DEV_BSIZE aligned existing buffer size. Figure out 2209 * if we have to remove any pages. 2210 */ 2211 if (desiredpages < bp->b_npages) { 2212 for (i = desiredpages; i < bp->b_npages; i++) { 2213 /* 2214 * the page is not freed here -- it 2215 * is the responsibility of 2216 * vnode_pager_setsize 2217 */ 2218 m = bp->b_pages[i]; 2219 KASSERT(m != bogus_page, 2220 ("allocbuf: bogus page found")); 2221 while (vm_page_sleep_busy(m, TRUE, "biodep")) 2222 ; 2223 2224 bp->b_pages[i] = NULL; 2225 vm_page_unwire(m, 0); 2226 } 2227 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2228 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 2229 bp->b_npages = desiredpages; 2230 } 2231 } else if (size > bp->b_bcount) { 2232 /* 2233 * We are growing the buffer, possibly in a 2234 * byte-granular fashion. 2235 */ 2236 struct vnode *vp; 2237 vm_object_t obj; 2238 vm_offset_t toff; 2239 vm_offset_t tinc; 2240 2241 /* 2242 * Step 1, bring in the VM pages from the object, 2243 * allocating them if necessary. We must clear 2244 * B_CACHE if these pages are not valid for the 2245 * range covered by the buffer. 2246 */ 2247 2248 vp = bp->b_vp; 2249 obj = vp->v_object; 2250 2251 while (bp->b_npages < desiredpages) { 2252 vm_page_t m; 2253 vm_pindex_t pi; 2254 2255 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages; 2256 if ((m = vm_page_lookup(obj, pi)) == NULL) { 2257 m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL); 2258 if (m == NULL) { 2259 VM_WAIT; 2260 vm_pageout_deficit += desiredpages - bp->b_npages; 2261 } else { 2262 vm_page_wire(m); 2263 vm_page_wakeup(m); 2264 bp->b_flags &= ~B_CACHE; 2265 bp->b_pages[bp->b_npages] = m; 2266 ++bp->b_npages; 2267 } 2268 continue; 2269 } 2270 2271 /* 2272 * We found a page. If we have to sleep on it, 2273 * retry because it might have gotten freed out 2274 * from under us. 2275 * 2276 * We can only test PG_BUSY here. Blocking on 2277 * m->busy might lead to a deadlock: 2278 * 2279 * vm_fault->getpages->cluster_read->allocbuf 2280 * 2281 */ 2282 2283 if (vm_page_sleep_busy(m, FALSE, "pgtblk")) 2284 continue; 2285 2286 /* 2287 * We have a good page. Should we wakeup the 2288 * page daemon? 2289 */ 2290 if ((curproc != pageproc) && 2291 ((m->queue - m->pc) == PQ_CACHE) && 2292 ((cnt.v_free_count + cnt.v_cache_count) < 2293 (cnt.v_free_min + cnt.v_cache_min)) 2294 ) { 2295 pagedaemon_wakeup(); 2296 } 2297 vm_page_flag_clear(m, PG_ZERO); 2298 vm_page_wire(m); 2299 bp->b_pages[bp->b_npages] = m; 2300 ++bp->b_npages; 2301 } 2302 2303 /* 2304 * Step 2. We've loaded the pages into the buffer, 2305 * we have to figure out if we can still have B_CACHE 2306 * set. Note that B_CACHE is set according to the 2307 * byte-granular range ( bcount and size ), new the 2308 * aligned range ( newbsize ). 2309 * 2310 * The VM test is against m->valid, which is DEV_BSIZE 2311 * aligned. Needless to say, the validity of the data 2312 * needs to also be DEV_BSIZE aligned. Note that this 2313 * fails with NFS if the server or some other client 2314 * extends the file's EOF. If our buffer is resized, 2315 * B_CACHE may remain set! XXX 2316 */ 2317 2318 toff = bp->b_bcount; 2319 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 2320 2321 while ((bp->b_flags & B_CACHE) && toff < size) { 2322 vm_pindex_t pi; 2323 2324 if (tinc > (size - toff)) 2325 tinc = size - toff; 2326 2327 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 2328 PAGE_SHIFT; 2329 2330 vfs_buf_test_cache( 2331 bp, 2332 bp->b_offset, 2333 toff, 2334 tinc, 2335 bp->b_pages[pi] 2336 ); 2337 toff += tinc; 2338 tinc = PAGE_SIZE; 2339 } 2340 2341 /* 2342 * Step 3, fixup the KVM pmap. Remember that 2343 * bp->b_data is relative to bp->b_offset, but 2344 * bp->b_offset may be offset into the first page. 2345 */ 2346 2347 bp->b_data = (caddr_t) 2348 trunc_page((vm_offset_t)bp->b_data); 2349 pmap_qenter( 2350 (vm_offset_t)bp->b_data, 2351 bp->b_pages, 2352 bp->b_npages 2353 ); 2354 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 2355 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 2356 } 2357 } 2358 if (bp->b_flags & B_VMIO) 2359 vmiospace += (newbsize - bp->b_bufsize); 2360 bufspace += (newbsize - bp->b_bufsize); 2361 runningbufspace += (newbsize - bp->b_bufsize); 2362 if (newbsize < bp->b_bufsize) 2363 bufspacewakeup(); 2364 bp->b_bufsize = newbsize; /* actual buffer allocation */ 2365 bp->b_bcount = size; /* requested buffer size */ 2366 return 1; 2367} 2368 2369/* 2370 * biowait: 2371 * 2372 * Wait for buffer I/O completion, returning error status. The buffer 2373 * is left locked and B_DONE on return. B_EINTR is converted into a EINTR 2374 * error and cleared. 2375 */ 2376int 2377biowait(register struct buf * bp) 2378{ 2379 int s; 2380 2381 s = splbio(); 2382 while ((bp->b_flags & B_DONE) == 0) 2383#if defined(NO_SCHEDULE_MODS) 2384 tsleep(bp, PRIBIO, "biowait", 0); 2385#else 2386 if (bp->b_flags & B_READ) 2387 tsleep(bp, PRIBIO, "biord", 0); 2388 else 2389 tsleep(bp, PRIBIO, "biowr", 0); 2390#endif 2391 splx(s); 2392 if (bp->b_flags & B_EINTR) { 2393 bp->b_flags &= ~B_EINTR; 2394 return (EINTR); 2395 } 2396 if (bp->b_flags & B_ERROR) { 2397 return (bp->b_error ? bp->b_error : EIO); 2398 } else { 2399 return (0); 2400 } 2401} 2402 2403/* 2404 * biodone: 2405 * 2406 * Finish I/O on a buffer, optionally calling a completion function. 2407 * This is usually called from an interrupt so process blocking is 2408 * not allowed. 2409 * 2410 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 2411 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 2412 * assuming B_INVAL is clear. 2413 * 2414 * For the VMIO case, we set B_CACHE if the op was a read and no 2415 * read error occured, or if the op was a write. B_CACHE is never 2416 * set if the buffer is invalid or otherwise uncacheable. 2417 * 2418 * biodone does not mess with B_INVAL, allowing the I/O routine or the 2419 * initiator to leave B_INVAL set to brelse the buffer out of existance 2420 * in the biodone routine. 2421 */ 2422void 2423biodone(register struct buf * bp) 2424{ 2425 int s; 2426 2427 s = splbio(); 2428 2429 KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy", bp)); 2430 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 2431 2432 bp->b_flags |= B_DONE; 2433 2434 if (bp->b_flags & B_FREEBUF) { 2435 brelse(bp); 2436 splx(s); 2437 return; 2438 } 2439 2440 if ((bp->b_flags & B_READ) == 0) { 2441 vwakeup(bp); 2442 } 2443 2444 /* call optional completion function if requested */ 2445 if (bp->b_flags & B_CALL) { 2446 bp->b_flags &= ~B_CALL; 2447 (*bp->b_iodone) (bp); 2448 splx(s); 2449 return; 2450 } 2451 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 2452 (*bioops.io_complete)(bp); 2453 2454 if (bp->b_flags & B_VMIO) { 2455 int i, resid; 2456 vm_ooffset_t foff; 2457 vm_page_t m; 2458 vm_object_t obj; 2459 int iosize; 2460 struct vnode *vp = bp->b_vp; 2461 2462 obj = vp->v_object; 2463 2464#if defined(VFS_BIO_DEBUG) 2465 if (vp->v_usecount == 0) { 2466 panic("biodone: zero vnode ref count"); 2467 } 2468 2469 if (vp->v_object == NULL) { 2470 panic("biodone: missing VM object"); 2471 } 2472 2473 if ((vp->v_flag & VOBJBUF) == 0) { 2474 panic("biodone: vnode is not setup for merged cache"); 2475 } 2476#endif 2477 2478 foff = bp->b_offset; 2479 KASSERT(bp->b_offset != NOOFFSET, 2480 ("biodone: no buffer offset")); 2481 2482#if !defined(MAX_PERF) 2483 if (!obj) { 2484 panic("biodone: no object"); 2485 } 2486#endif 2487#if defined(VFS_BIO_DEBUG) 2488 if (obj->paging_in_progress < bp->b_npages) { 2489 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 2490 obj->paging_in_progress, bp->b_npages); 2491 } 2492#endif 2493 2494 /* 2495 * Set B_CACHE if the op was a normal read and no error 2496 * occured. B_CACHE is set for writes in the b*write() 2497 * routines. 2498 */ 2499 iosize = bp->b_bcount; 2500 if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) { 2501 bp->b_flags |= B_CACHE; 2502 } 2503 2504 for (i = 0; i < bp->b_npages; i++) { 2505 int bogusflag = 0; 2506 m = bp->b_pages[i]; 2507 if (m == bogus_page) { 2508 bogusflag = 1; 2509 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 2510 if (!m) { 2511#if defined(VFS_BIO_DEBUG) 2512 printf("biodone: page disappeared\n"); 2513#endif 2514 vm_object_pip_subtract(obj, 1); 2515 bp->b_flags &= ~B_CACHE; 2516 continue; 2517 } 2518 bp->b_pages[i] = m; 2519 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2520 } 2521#if defined(VFS_BIO_DEBUG) 2522 if (OFF_TO_IDX(foff) != m->pindex) { 2523 printf( 2524"biodone: foff(%lu)/m->pindex(%d) mismatch\n", 2525 (unsigned long)foff, m->pindex); 2526 } 2527#endif 2528 resid = IDX_TO_OFF(m->pindex + 1) - foff; 2529 if (resid > iosize) 2530 resid = iosize; 2531 2532 /* 2533 * In the write case, the valid and clean bits are 2534 * already changed correctly ( see bdwrite() ), so we 2535 * only need to do this here in the read case. 2536 */ 2537 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 2538 vfs_page_set_valid(bp, foff, i, m); 2539 } 2540 vm_page_flag_clear(m, PG_ZERO); 2541 2542 /* 2543 * when debugging new filesystems or buffer I/O methods, this 2544 * is the most common error that pops up. if you see this, you 2545 * have not set the page busy flag correctly!!! 2546 */ 2547 if (m->busy == 0) { 2548#if !defined(MAX_PERF) 2549 printf("biodone: page busy < 0, " 2550 "pindex: %d, foff: 0x(%x,%x), " 2551 "resid: %d, index: %d\n", 2552 (int) m->pindex, (int)(foff >> 32), 2553 (int) foff & 0xffffffff, resid, i); 2554#endif 2555 if (vp->v_type != VBLK) 2556#if !defined(MAX_PERF) 2557 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2558 bp->b_vp->v_mount->mnt_stat.f_iosize, 2559 (int) bp->b_lblkno, 2560 bp->b_flags, bp->b_npages); 2561 else 2562 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2563 (int) bp->b_lblkno, 2564 bp->b_flags, bp->b_npages); 2565 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2566 m->valid, m->dirty, m->wire_count); 2567#endif 2568 panic("biodone: page busy < 0\n"); 2569 } 2570 vm_page_io_finish(m); 2571 vm_object_pip_subtract(obj, 1); 2572 foff += resid; 2573 iosize -= resid; 2574 } 2575 if (obj) 2576 vm_object_pip_wakeupn(obj, 0); 2577 } 2578 /* 2579 * For asynchronous completions, release the buffer now. The brelse 2580 * will do a wakeup there if necessary - so no need to do a wakeup 2581 * here in the async case. The sync case always needs to do a wakeup. 2582 */ 2583 2584 if (bp->b_flags & B_ASYNC) { 2585 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2586 brelse(bp); 2587 else 2588 bqrelse(bp); 2589 } else { 2590 wakeup(bp); 2591 } 2592 splx(s); 2593} 2594 2595/* 2596 * This routine is called in lieu of iodone in the case of 2597 * incomplete I/O. This keeps the busy status for pages 2598 * consistant. 2599 */ 2600void 2601vfs_unbusy_pages(struct buf * bp) 2602{ 2603 int i; 2604 2605 if (bp->b_flags & B_VMIO) { 2606 struct vnode *vp = bp->b_vp; 2607 vm_object_t obj = vp->v_object; 2608 2609 for (i = 0; i < bp->b_npages; i++) { 2610 vm_page_t m = bp->b_pages[i]; 2611 2612 if (m == bogus_page) { 2613 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2614#if !defined(MAX_PERF) 2615 if (!m) { 2616 panic("vfs_unbusy_pages: page missing\n"); 2617 } 2618#endif 2619 bp->b_pages[i] = m; 2620 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2621 } 2622 vm_object_pip_subtract(obj, 1); 2623 vm_page_flag_clear(m, PG_ZERO); 2624 vm_page_io_finish(m); 2625 } 2626 vm_object_pip_wakeupn(obj, 0); 2627 } 2628} 2629 2630/* 2631 * vfs_page_set_valid: 2632 * 2633 * Set the valid bits in a page based on the supplied offset. The 2634 * range is restricted to the buffer's size. 2635 * 2636 * This routine is typically called after a read completes. 2637 */ 2638static void 2639vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2640{ 2641 vm_ooffset_t soff, eoff; 2642 2643 /* 2644 * Start and end offsets in buffer. eoff - soff may not cross a 2645 * page boundry or cross the end of the buffer. The end of the 2646 * buffer, in this case, is our file EOF, not the allocation size 2647 * of the buffer. 2648 */ 2649 soff = off; 2650 eoff = (off + PAGE_SIZE) & ~PAGE_MASK; 2651 if (eoff > bp->b_offset + bp->b_bcount) 2652 eoff = bp->b_offset + bp->b_bcount; 2653 2654 /* 2655 * Set valid range. This is typically the entire buffer and thus the 2656 * entire page. 2657 */ 2658 if (eoff > soff) { 2659 vm_page_set_validclean( 2660 m, 2661 (vm_offset_t) (soff & PAGE_MASK), 2662 (vm_offset_t) (eoff - soff) 2663 ); 2664 } 2665} 2666 2667/* 2668 * This routine is called before a device strategy routine. 2669 * It is used to tell the VM system that paging I/O is in 2670 * progress, and treat the pages associated with the buffer 2671 * almost as being PG_BUSY. Also the object paging_in_progress 2672 * flag is handled to make sure that the object doesn't become 2673 * inconsistant. 2674 * 2675 * Since I/O has not been initiated yet, certain buffer flags 2676 * such as B_ERROR or B_INVAL may be in an inconsistant state 2677 * and should be ignored. 2678 */ 2679void 2680vfs_busy_pages(struct buf * bp, int clear_modify) 2681{ 2682 int i, bogus; 2683 2684 if (bp->b_flags & B_VMIO) { 2685 struct vnode *vp = bp->b_vp; 2686 vm_object_t obj = vp->v_object; 2687 vm_ooffset_t foff; 2688 2689 foff = bp->b_offset; 2690 KASSERT(bp->b_offset != NOOFFSET, 2691 ("vfs_busy_pages: no buffer offset")); 2692 vfs_setdirty(bp); 2693 2694retry: 2695 for (i = 0; i < bp->b_npages; i++) { 2696 vm_page_t m = bp->b_pages[i]; 2697 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 2698 goto retry; 2699 } 2700 2701 bogus = 0; 2702 for (i = 0; i < bp->b_npages; i++) { 2703 vm_page_t m = bp->b_pages[i]; 2704 2705 vm_page_flag_clear(m, PG_ZERO); 2706 if ((bp->b_flags & B_CLUSTER) == 0) { 2707 vm_object_pip_add(obj, 1); 2708 vm_page_io_start(m); 2709 } 2710 2711 /* 2712 * When readying a buffer for a read ( i.e 2713 * clear_modify == 0 ), it is important to do 2714 * bogus_page replacement for valid pages in 2715 * partially instantiated buffers. Partially 2716 * instantiated buffers can, in turn, occur when 2717 * reconstituting a buffer from its VM backing store 2718 * base. We only have to do this if B_CACHE is 2719 * clear ( which causes the I/O to occur in the 2720 * first place ). The replacement prevents the read 2721 * I/O from overwriting potentially dirty VM-backed 2722 * pages. XXX bogus page replacement is, uh, bogus. 2723 * It may not work properly with small-block devices. 2724 * We need to find a better way. 2725 */ 2726 2727 vm_page_protect(m, VM_PROT_NONE); 2728 if (clear_modify) 2729 vfs_page_set_valid(bp, foff, i, m); 2730 else if (m->valid == VM_PAGE_BITS_ALL && 2731 (bp->b_flags & B_CACHE) == 0) { 2732 bp->b_pages[i] = bogus_page; 2733 bogus++; 2734 } 2735 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2736 } 2737 if (bogus) 2738 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2739 } 2740} 2741 2742/* 2743 * Tell the VM system that the pages associated with this buffer 2744 * are clean. This is used for delayed writes where the data is 2745 * going to go to disk eventually without additional VM intevention. 2746 * 2747 * Note that while we only really need to clean through to b_bcount, we 2748 * just go ahead and clean through to b_bufsize. 2749 */ 2750static void 2751vfs_clean_pages(struct buf * bp) 2752{ 2753 int i; 2754 2755 if (bp->b_flags & B_VMIO) { 2756 vm_ooffset_t foff; 2757 2758 foff = bp->b_offset; 2759 KASSERT(bp->b_offset != NOOFFSET, 2760 ("vfs_clean_pages: no buffer offset")); 2761 for (i = 0; i < bp->b_npages; i++) { 2762 vm_page_t m = bp->b_pages[i]; 2763 vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2764 vm_ooffset_t eoff = noff; 2765 2766 if (eoff > bp->b_offset + bp->b_bufsize) 2767 eoff = bp->b_offset + bp->b_bufsize; 2768 vfs_page_set_valid(bp, foff, i, m); 2769 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 2770 foff = noff; 2771 } 2772 } 2773} 2774 2775/* 2776 * vfs_bio_set_validclean: 2777 * 2778 * Set the range within the buffer to valid and clean. The range is 2779 * relative to the beginning of the buffer, b_offset. Note that b_offset 2780 * itself may be offset from the beginning of the first page. 2781 */ 2782 2783void 2784vfs_bio_set_validclean(struct buf *bp, int base, int size) 2785{ 2786 if (bp->b_flags & B_VMIO) { 2787 int i; 2788 int n; 2789 2790 /* 2791 * Fixup base to be relative to beginning of first page. 2792 * Set initial n to be the maximum number of bytes in the 2793 * first page that can be validated. 2794 */ 2795 2796 base += (bp->b_offset & PAGE_MASK); 2797 n = PAGE_SIZE - (base & PAGE_MASK); 2798 2799 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 2800 vm_page_t m = bp->b_pages[i]; 2801 2802 if (n > size) 2803 n = size; 2804 2805 vm_page_set_validclean(m, base & PAGE_MASK, n); 2806 base += n; 2807 size -= n; 2808 n = PAGE_SIZE; 2809 } 2810 } 2811} 2812 2813/* 2814 * vfs_bio_clrbuf: 2815 * 2816 * clear a buffer. This routine essentially fakes an I/O, so we need 2817 * to clear B_ERROR and B_INVAL. 2818 * 2819 * Note that while we only theoretically need to clear through b_bcount, 2820 * we go ahead and clear through b_bufsize. 2821 */ 2822 2823void 2824vfs_bio_clrbuf(struct buf *bp) { 2825 int i, mask = 0; 2826 caddr_t sa, ea; 2827 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2828 bp->b_flags &= ~(B_INVAL|B_ERROR); 2829 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 2830 (bp->b_offset & PAGE_MASK) == 0) { 2831 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 2832 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2833 ((bp->b_pages[0]->valid & mask) != mask)) { 2834 bzero(bp->b_data, bp->b_bufsize); 2835 } 2836 bp->b_pages[0]->valid |= mask; 2837 bp->b_resid = 0; 2838 return; 2839 } 2840 ea = sa = bp->b_data; 2841 for(i=0;i<bp->b_npages;i++,sa=ea) { 2842 int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE; 2843 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 2844 ea = (caddr_t)ulmin((u_long)ea, 2845 (u_long)bp->b_data + bp->b_bufsize); 2846 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 2847 if ((bp->b_pages[i]->valid & mask) == mask) 2848 continue; 2849 if ((bp->b_pages[i]->valid & mask) == 0) { 2850 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2851 bzero(sa, ea - sa); 2852 } 2853 } else { 2854 for (; sa < ea; sa += DEV_BSIZE, j++) { 2855 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2856 (bp->b_pages[i]->valid & (1<<j)) == 0) 2857 bzero(sa, DEV_BSIZE); 2858 } 2859 } 2860 bp->b_pages[i]->valid |= mask; 2861 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2862 } 2863 bp->b_resid = 0; 2864 } else { 2865 clrbuf(bp); 2866 } 2867} 2868 2869/* 2870 * vm_hold_load_pages and vm_hold_unload pages get pages into 2871 * a buffers address space. The pages are anonymous and are 2872 * not associated with a file object. 2873 */ 2874void 2875vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2876{ 2877 vm_offset_t pg; 2878 vm_page_t p; 2879 int index; 2880 2881 to = round_page(to); 2882 from = round_page(from); 2883 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2884 2885 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2886 2887tryagain: 2888 2889 p = vm_page_alloc(kernel_object, 2890 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2891 VM_ALLOC_NORMAL); 2892 if (!p) { 2893 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2894 VM_WAIT; 2895 goto tryagain; 2896 } 2897 vm_page_wire(p); 2898 p->valid = VM_PAGE_BITS_ALL; 2899 vm_page_flag_clear(p, PG_ZERO); 2900 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2901 bp->b_pages[index] = p; 2902 vm_page_wakeup(p); 2903 } 2904 bp->b_npages = index; 2905} 2906 2907void 2908vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2909{ 2910 vm_offset_t pg; 2911 vm_page_t p; 2912 int index, newnpages; 2913 2914 from = round_page(from); 2915 to = round_page(to); 2916 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2917 2918 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2919 p = bp->b_pages[index]; 2920 if (p && (index < bp->b_npages)) { 2921#if !defined(MAX_PERF) 2922 if (p->busy) { 2923 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2924 bp->b_blkno, bp->b_lblkno); 2925 } 2926#endif 2927 bp->b_pages[index] = NULL; 2928 pmap_kremove(pg); 2929 vm_page_busy(p); 2930 vm_page_unwire(p, 0); 2931 vm_page_free(p); 2932 } 2933 } 2934 bp->b_npages = newnpages; 2935} 2936 2937 2938#include "opt_ddb.h" 2939#ifdef DDB 2940#include <ddb/ddb.h> 2941 2942DB_SHOW_COMMAND(buffer, db_show_buffer) 2943{ 2944 /* get args */ 2945 struct buf *bp = (struct buf *)addr; 2946 2947 if (!have_addr) { 2948 db_printf("usage: show buffer <addr>\n"); 2949 return; 2950 } 2951 2952 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 2953 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2954 "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, " 2955 "b_blkno = %d, b_pblkno = %d\n", 2956 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2957 major(bp->b_dev), minor(bp->b_dev), 2958 bp->b_data, bp->b_blkno, bp->b_pblkno); 2959 if (bp->b_npages) { 2960 int i; 2961 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2962 for (i = 0; i < bp->b_npages; i++) { 2963 vm_page_t m; 2964 m = bp->b_pages[i]; 2965 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 2966 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 2967 if ((i + 1) < bp->b_npages) 2968 db_printf(","); 2969 } 2970 db_printf("\n"); 2971 } 2972} 2973#endif /* DDB */ 2974