vfs_bio.c revision 48251
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.216 1999/06/26 02:46:06 mckusick Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <miscfs/specfs/specdev.h> 41#include <vm/vm.h> 42#include <vm/vm_param.h> 43#include <vm/vm_prot.h> 44#include <vm/vm_kern.h> 45#include <vm/vm_pageout.h> 46#include <vm/vm_page.h> 47#include <vm/vm_object.h> 48#include <vm/vm_extern.h> 49#include <vm/vm_map.h> 50#include <sys/buf.h> 51#include <sys/mount.h> 52#include <sys/malloc.h> 53#include <sys/resourcevar.h> 54 55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56 57struct bio_ops bioops; /* I/O operation notification */ 58 59struct buf *buf; /* buffer header pool */ 60struct swqueue bswlist; 61 62static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 63 vm_offset_t to); 64static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 65 vm_offset_t to); 66static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 67 int pageno, vm_page_t m); 68static void vfs_clean_pages(struct buf * bp); 69static void vfs_setdirty(struct buf *bp); 70static void vfs_vmio_release(struct buf *bp); 71static void flushdirtybuffers(int slpflag, int slptimeo); 72static int flushbufqueues(void); 73 74/* 75 * bogus page -- for I/O to/from partially complete buffers 76 * this is a temporary solution to the problem, but it is not 77 * really that bad. it would be better to split the buffer 78 * for input in the case of buffers partially already in memory, 79 * but the code is intricate enough already. 80 */ 81vm_page_t bogus_page; 82int runningbufspace; 83static vm_offset_t bogus_offset; 84 85static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 86 bufmallocspace, maxbufmallocspace, hibufspace; 87static int needsbuffer; 88static int numdirtybuffers, lodirtybuffers, hidirtybuffers; 89static int numfreebuffers, lofreebuffers, hifreebuffers; 90static int kvafreespace; 91 92SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 93 &numdirtybuffers, 0, ""); 94SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 95 &lodirtybuffers, 0, ""); 96SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 97 &hidirtybuffers, 0, ""); 98SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 99 &numfreebuffers, 0, ""); 100SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 101 &lofreebuffers, 0, ""); 102SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 103 &hifreebuffers, 0, ""); 104SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, 105 &runningbufspace, 0, ""); 106SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 107 &maxbufspace, 0, ""); 108SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, 109 &hibufspace, 0, ""); 110SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 111 &bufspace, 0, ""); 112SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 113 &maxvmiobufspace, 0, ""); 114SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 115 &vmiospace, 0, ""); 116SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 117 &maxbufmallocspace, 0, ""); 118SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 119 &bufmallocspace, 0, ""); 120SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 121 &kvafreespace, 0, ""); 122 123static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 124struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } }; 125char *buf_wmesg = BUF_WMESG; 126 127extern int vm_swap_size; 128 129#define BUF_MAXUSE 24 130 131#define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 132#define VFS_BIO_NEED_RESERVED02 0x02 /* unused */ 133#define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 134#define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 135#define VFS_BIO_NEED_KVASPACE 0x10 /* wait for buffer_map space, emerg */ 136 137/* 138 * kvaspacewakeup: 139 * 140 * Called when kva space is potential available for recovery or when 141 * kva space is recovered in the buffer_map. This function wakes up 142 * anyone waiting for buffer_map kva space. Even though the buffer_map 143 * is larger then maxbufspace, this situation will typically occur 144 * when the buffer_map gets fragmented. 145 */ 146 147static __inline void 148kvaspacewakeup(void) 149{ 150 /* 151 * If someone is waiting for KVA space, wake them up. Even 152 * though we haven't freed the kva space yet, the waiting 153 * process will be able to now. 154 */ 155 if (needsbuffer & VFS_BIO_NEED_KVASPACE) { 156 needsbuffer &= ~VFS_BIO_NEED_KVASPACE; 157 wakeup(&needsbuffer); 158 } 159} 160 161/* 162 * bufspacewakeup: 163 * 164 * Called when buffer space is potentially available for recovery or when 165 * buffer space is recovered. getnewbuf() will block on this flag when 166 * it is unable to free sufficient buffer space. Buffer space becomes 167 * recoverable when bp's get placed back in the queues. 168 */ 169 170static __inline void 171bufspacewakeup(void) 172{ 173 /* 174 * If someone is waiting for BUF space, wake them up. Even 175 * though we haven't freed the kva space yet, the waiting 176 * process will be able to now. 177 */ 178 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 179 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 180 wakeup(&needsbuffer); 181 } 182} 183 184/* 185 * bufcountwakeup: 186 * 187 * Called when a buffer has been added to one of the free queues to 188 * account for the buffer and to wakeup anyone waiting for free buffers. 189 * This typically occurs when large amounts of metadata are being handled 190 * by the buffer cache ( else buffer space runs out first, usually ). 191 */ 192 193static __inline void 194bufcountwakeup(void) 195{ 196 ++numfreebuffers; 197 if (needsbuffer) { 198 needsbuffer &= ~VFS_BIO_NEED_ANY; 199 if (numfreebuffers >= hifreebuffers) 200 needsbuffer &= ~VFS_BIO_NEED_FREE; 201 wakeup(&needsbuffer); 202 } 203} 204 205/* 206 * vfs_buf_test_cache: 207 * 208 * Called when a buffer is extended. This function clears the B_CACHE 209 * bit if the newly extended portion of the buffer does not contain 210 * valid data. 211 */ 212static __inline__ 213void 214vfs_buf_test_cache(struct buf *bp, 215 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 216 vm_page_t m) 217{ 218 if (bp->b_flags & B_CACHE) { 219 int base = (foff + off) & PAGE_MASK; 220 if (vm_page_is_valid(m, base, size) == 0) 221 bp->b_flags &= ~B_CACHE; 222 } 223} 224 225 226/* 227 * Initialize buffer headers and related structures. 228 */ 229void 230bufinit() 231{ 232 struct buf *bp; 233 int i; 234 235 TAILQ_INIT(&bswlist); 236 LIST_INIT(&invalhash); 237 simple_lock_init(&buftimelock); 238 239 /* first, make a null hash table */ 240 for (i = 0; i < BUFHSZ; i++) 241 LIST_INIT(&bufhashtbl[i]); 242 243 /* next, make a null set of free lists */ 244 for (i = 0; i < BUFFER_QUEUES; i++) 245 TAILQ_INIT(&bufqueues[i]); 246 247 /* finally, initialize each buffer header and stick on empty q */ 248 for (i = 0; i < nbuf; i++) { 249 bp = &buf[i]; 250 bzero(bp, sizeof *bp); 251 bp->b_flags = B_INVAL; /* we're just an empty header */ 252 bp->b_dev = NODEV; 253 bp->b_rcred = NOCRED; 254 bp->b_wcred = NOCRED; 255 bp->b_qindex = QUEUE_EMPTY; 256 bp->b_xflags = 0; 257 LIST_INIT(&bp->b_dep); 258 BUF_LOCKINIT(bp); 259 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 260 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 261 } 262 263 /* 264 * maxbufspace is currently calculated to support all filesystem 265 * blocks to be 8K. If you happen to use a 16K filesystem, the size 266 * of the buffer cache is still the same as it would be for 8K 267 * filesystems. This keeps the size of the buffer cache "in check" 268 * for big block filesystems. 269 * 270 * maxbufspace is calculated as around 50% of the KVA available in 271 * the buffer_map ( DFLTSIZE vs BKVASIZE ), I presume to reduce the 272 * effect of fragmentation. 273 */ 274 maxbufspace = (nbuf + 8) * DFLTBSIZE; 275 if ((hibufspace = maxbufspace - MAXBSIZE * 5) <= MAXBSIZE) 276 hibufspace = 3 * maxbufspace / 4; 277/* 278 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 279 */ 280 maxvmiobufspace = 2 * hibufspace / 3; 281/* 282 * Limit the amount of malloc memory since it is wired permanently into 283 * the kernel space. Even though this is accounted for in the buffer 284 * allocation, we don't want the malloced region to grow uncontrolled. 285 * The malloc scheme improves memory utilization significantly on average 286 * (small) directories. 287 */ 288 maxbufmallocspace = hibufspace / 20; 289 290/* 291 * Reduce the chance of a deadlock occuring by limiting the number 292 * of delayed-write dirty buffers we allow to stack up. 293 */ 294 lodirtybuffers = nbuf / 16 + 10; 295 hidirtybuffers = nbuf / 8 + 20; 296 numdirtybuffers = 0; 297 298/* 299 * Try to keep the number of free buffers in the specified range, 300 * and give the syncer access to an emergency reserve. 301 */ 302 lofreebuffers = nbuf / 18 + 5; 303 hifreebuffers = 2 * lofreebuffers; 304 numfreebuffers = nbuf; 305 306 kvafreespace = 0; 307 308 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 309 bogus_page = vm_page_alloc(kernel_object, 310 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 311 VM_ALLOC_NORMAL); 312 313} 314 315/* 316 * Free the kva allocation for a buffer 317 * Must be called only at splbio or higher, 318 * as this is the only locking for buffer_map. 319 */ 320static void 321bfreekva(struct buf * bp) 322{ 323 if (bp->b_kvasize) { 324 vm_map_delete(buffer_map, 325 (vm_offset_t) bp->b_kvabase, 326 (vm_offset_t) bp->b_kvabase + bp->b_kvasize 327 ); 328 bp->b_kvasize = 0; 329 kvaspacewakeup(); 330 } 331} 332 333/* 334 * bremfree: 335 * 336 * Remove the buffer from the appropriate free list. 337 */ 338void 339bremfree(struct buf * bp) 340{ 341 int s = splbio(); 342 int old_qindex = bp->b_qindex; 343 344 if (bp->b_qindex != QUEUE_NONE) { 345 if (bp->b_qindex == QUEUE_EMPTY) { 346 kvafreespace -= bp->b_kvasize; 347 } 348 if (BUF_REFCNT(bp) == 1) 349 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 350 else if (BUF_REFCNT(bp) == 0) 351 panic("bremfree: not locked"); 352 else 353 /* Temporary panic to verify exclusive locking */ 354 /* This panic goes away when we allow shared refs */ 355 panic("bremfree: multiple refs"); 356 bp->b_qindex = QUEUE_NONE; 357 runningbufspace += bp->b_bufsize; 358 } else { 359#if !defined(MAX_PERF) 360 panic("bremfree: removing a buffer when not on a queue"); 361#endif 362 } 363 364 /* 365 * Fixup numfreebuffers count. If the buffer is invalid or not 366 * delayed-write, and it was on the EMPTY, LRU, or AGE queues, 367 * the buffer was free and we must decrement numfreebuffers. 368 */ 369 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 370 switch(old_qindex) { 371 case QUEUE_EMPTY: 372 case QUEUE_LRU: 373 case QUEUE_AGE: 374 --numfreebuffers; 375 break; 376 default: 377 break; 378 } 379 } 380 splx(s); 381} 382 383 384/* 385 * Get a buffer with the specified data. Look in the cache first. We 386 * must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 387 * is set, the buffer is valid and we do not have to do anything ( see 388 * getblk() ). 389 */ 390int 391bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 392 struct buf ** bpp) 393{ 394 struct buf *bp; 395 396 bp = getblk(vp, blkno, size, 0, 0); 397 *bpp = bp; 398 399 /* if not found in cache, do some I/O */ 400 if ((bp->b_flags & B_CACHE) == 0) { 401 if (curproc != NULL) 402 curproc->p_stats->p_ru.ru_inblock++; 403 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); 404 bp->b_flags |= B_READ; 405 bp->b_flags &= ~(B_ERROR | B_INVAL); 406 if (bp->b_rcred == NOCRED) { 407 if (cred != NOCRED) 408 crhold(cred); 409 bp->b_rcred = cred; 410 } 411 vfs_busy_pages(bp, 0); 412 VOP_STRATEGY(vp, bp); 413 return (biowait(bp)); 414 } 415 return (0); 416} 417 418/* 419 * Operates like bread, but also starts asynchronous I/O on 420 * read-ahead blocks. We must clear B_ERROR and B_INVAL prior 421 * to initiating I/O . If B_CACHE is set, the buffer is valid 422 * and we do not have to do anything. 423 */ 424int 425breadn(struct vnode * vp, daddr_t blkno, int size, 426 daddr_t * rablkno, int *rabsize, 427 int cnt, struct ucred * cred, struct buf ** bpp) 428{ 429 struct buf *bp, *rabp; 430 int i; 431 int rv = 0, readwait = 0; 432 433 *bpp = bp = getblk(vp, blkno, size, 0, 0); 434 435 /* if not found in cache, do some I/O */ 436 if ((bp->b_flags & B_CACHE) == 0) { 437 if (curproc != NULL) 438 curproc->p_stats->p_ru.ru_inblock++; 439 bp->b_flags |= B_READ; 440 bp->b_flags &= ~(B_ERROR | B_INVAL); 441 if (bp->b_rcred == NOCRED) { 442 if (cred != NOCRED) 443 crhold(cred); 444 bp->b_rcred = cred; 445 } 446 vfs_busy_pages(bp, 0); 447 VOP_STRATEGY(vp, bp); 448 ++readwait; 449 } 450 451 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 452 if (inmem(vp, *rablkno)) 453 continue; 454 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 455 456 if ((rabp->b_flags & B_CACHE) == 0) { 457 if (curproc != NULL) 458 curproc->p_stats->p_ru.ru_inblock++; 459 rabp->b_flags |= B_READ | B_ASYNC; 460 rabp->b_flags &= ~(B_ERROR | B_INVAL); 461 if (rabp->b_rcred == NOCRED) { 462 if (cred != NOCRED) 463 crhold(cred); 464 rabp->b_rcred = cred; 465 } 466 vfs_busy_pages(rabp, 0); 467 BUF_KERNPROC(bp); 468 VOP_STRATEGY(vp, rabp); 469 } else { 470 brelse(rabp); 471 } 472 } 473 474 if (readwait) { 475 rv = biowait(bp); 476 } 477 return (rv); 478} 479 480/* 481 * Write, release buffer on completion. (Done by iodone 482 * if async). Do not bother writing anything if the buffer 483 * is invalid. 484 * 485 * Note that we set B_CACHE here, indicating that buffer is 486 * fully valid and thus cacheable. This is true even of NFS 487 * now so we set it generally. This could be set either here 488 * or in biodone() since the I/O is synchronous. We put it 489 * here. 490 */ 491int 492bwrite(struct buf * bp) 493{ 494 int oldflags, s; 495 struct vnode *vp; 496 struct mount *mp; 497 498 if (bp->b_flags & B_INVAL) { 499 brelse(bp); 500 return (0); 501 } 502 503 oldflags = bp->b_flags; 504 505#if !defined(MAX_PERF) 506 if (BUF_REFCNT(bp) == 0) 507 panic("bwrite: buffer is not busy???"); 508#endif 509 s = splbio(); 510 bundirty(bp); 511 512 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 513 bp->b_flags |= B_WRITEINPROG | B_CACHE; 514 515 bp->b_vp->v_numoutput++; 516 vfs_busy_pages(bp, 1); 517 if (curproc != NULL) 518 curproc->p_stats->p_ru.ru_oublock++; 519 splx(s); 520 BUF_KERNPROC(bp); 521 VOP_STRATEGY(bp->b_vp, bp); 522 523 /* 524 * Collect statistics on synchronous and asynchronous writes. 525 * Writes to block devices are charged to their associated 526 * filesystem (if any). 527 */ 528 if ((vp = bp->b_vp) != NULL) { 529 if (vp->v_type == VBLK) 530 mp = vp->v_specmountpoint; 531 else 532 mp = vp->v_mount; 533 if (mp != NULL) { 534 if ((oldflags & B_ASYNC) == 0) 535 mp->mnt_stat.f_syncwrites++; 536 else 537 mp->mnt_stat.f_asyncwrites++; 538 } 539 } 540 541 if ((oldflags & B_ASYNC) == 0) { 542 int rtval = biowait(bp); 543 brelse(bp); 544 return (rtval); 545 } 546 547 return (0); 548} 549 550/* 551 * Delayed write. (Buffer is marked dirty). Do not bother writing 552 * anything if the buffer is marked invalid. 553 * 554 * Note that since the buffer must be completely valid, we can safely 555 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 556 * biodone() in order to prevent getblk from writing the buffer 557 * out synchronously. 558 */ 559void 560bdwrite(struct buf * bp) 561{ 562 struct vnode *vp; 563 564#if !defined(MAX_PERF) 565 if (BUF_REFCNT(bp) == 0) 566 panic("bdwrite: buffer is not busy"); 567#endif 568 569 if (bp->b_flags & B_INVAL) { 570 brelse(bp); 571 return; 572 } 573 bdirty(bp); 574 575 /* 576 * Set B_CACHE, indicating that the buffer is fully valid. This is 577 * true even of NFS now. 578 */ 579 bp->b_flags |= B_CACHE; 580 581 /* 582 * This bmap keeps the system from needing to do the bmap later, 583 * perhaps when the system is attempting to do a sync. Since it 584 * is likely that the indirect block -- or whatever other datastructure 585 * that the filesystem needs is still in memory now, it is a good 586 * thing to do this. Note also, that if the pageout daemon is 587 * requesting a sync -- there might not be enough memory to do 588 * the bmap then... So, this is important to do. 589 */ 590 if (bp->b_lblkno == bp->b_blkno) { 591 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 592 } 593 594 /* 595 * Set the *dirty* buffer range based upon the VM system dirty pages. 596 */ 597 vfs_setdirty(bp); 598 599 /* 600 * We need to do this here to satisfy the vnode_pager and the 601 * pageout daemon, so that it thinks that the pages have been 602 * "cleaned". Note that since the pages are in a delayed write 603 * buffer -- the VFS layer "will" see that the pages get written 604 * out on the next sync, or perhaps the cluster will be completed. 605 */ 606 vfs_clean_pages(bp); 607 bqrelse(bp); 608 609 /* 610 * XXX The soft dependency code is not prepared to 611 * have I/O done when a bdwrite is requested. For 612 * now we just let the write be delayed if it is 613 * requested by the soft dependency code. 614 */ 615 if ((vp = bp->b_vp) && 616 ((vp->v_type == VBLK && vp->v_specmountpoint && 617 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 618 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)))) 619 return; 620 621 if (numdirtybuffers >= hidirtybuffers) 622 flushdirtybuffers(0, 0); 623} 624 625/* 626 * bdirty: 627 * 628 * Turn buffer into delayed write request. We must clear B_READ and 629 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 630 * itself to properly update it in the dirty/clean lists. We mark it 631 * B_DONE to ensure that any asynchronization of the buffer properly 632 * clears B_DONE ( else a panic will occur later ). 633 * 634 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 635 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 636 * should only be called if the buffer is known-good. 637 * 638 * Since the buffer is not on a queue, we do not update the numfreebuffers 639 * count. 640 * 641 * Must be called at splbio(). 642 * The buffer must be on QUEUE_NONE. 643 */ 644void 645bdirty(bp) 646 struct buf *bp; 647{ 648 KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 649 bp->b_flags &= ~(B_READ|B_RELBUF); 650 651 if ((bp->b_flags & B_DELWRI) == 0) { 652 bp->b_flags |= B_DONE | B_DELWRI; 653 reassignbuf(bp, bp->b_vp); 654 ++numdirtybuffers; 655 } 656} 657 658/* 659 * bundirty: 660 * 661 * Clear B_DELWRI for buffer. 662 * 663 * Since the buffer is not on a queue, we do not update the numfreebuffers 664 * count. 665 * 666 * Must be called at splbio(). 667 * The buffer must be on QUEUE_NONE. 668 */ 669 670void 671bundirty(bp) 672 struct buf *bp; 673{ 674 KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 675 676 if (bp->b_flags & B_DELWRI) { 677 bp->b_flags &= ~B_DELWRI; 678 reassignbuf(bp, bp->b_vp); 679 --numdirtybuffers; 680 } 681} 682 683/* 684 * bawrite: 685 * 686 * Asynchronous write. Start output on a buffer, but do not wait for 687 * it to complete. The buffer is released when the output completes. 688 * 689 * bwrite() ( or the VOP routine anyway ) is responsible for handling 690 * B_INVAL buffers. Not us. 691 */ 692void 693bawrite(struct buf * bp) 694{ 695 bp->b_flags |= B_ASYNC; 696 (void) VOP_BWRITE(bp->b_vp, bp); 697} 698 699/* 700 * bowrite: 701 * 702 * Ordered write. Start output on a buffer, and flag it so that the 703 * device will write it in the order it was queued. The buffer is 704 * released when the output completes. bwrite() ( or the VOP routine 705 * anyway ) is responsible for handling B_INVAL buffers. 706 */ 707int 708bowrite(struct buf * bp) 709{ 710 bp->b_flags |= B_ORDERED | B_ASYNC; 711 return (VOP_BWRITE(bp->b_vp, bp)); 712} 713 714/* 715 * brelse: 716 * 717 * Release a busy buffer and, if requested, free its resources. The 718 * buffer will be stashed in the appropriate bufqueue[] allowing it 719 * to be accessed later as a cache entity or reused for other purposes. 720 */ 721void 722brelse(struct buf * bp) 723{ 724 int s; 725 726 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 727 728#if 0 729 if (bp->b_flags & B_CLUSTER) { 730 relpbuf(bp, NULL); 731 return; 732 } 733#endif 734 735 s = splbio(); 736 737 if (bp->b_flags & B_LOCKED) 738 bp->b_flags &= ~B_ERROR; 739 740 if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) { 741 /* 742 * Failed write, redirty. Must clear B_ERROR to prevent 743 * pages from being scrapped. Note: B_INVAL is ignored 744 * here but will presumably be dealt with later. 745 */ 746 bp->b_flags &= ~B_ERROR; 747 bdirty(bp); 748 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 749 (bp->b_bufsize <= 0)) { 750 /* 751 * Either a failed I/O or we were asked to free or not 752 * cache the buffer. 753 */ 754 bp->b_flags |= B_INVAL; 755 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 756 (*bioops.io_deallocate)(bp); 757 if (bp->b_flags & B_DELWRI) 758 --numdirtybuffers; 759 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 760 if ((bp->b_flags & B_VMIO) == 0) { 761 if (bp->b_bufsize) 762 allocbuf(bp, 0); 763 if (bp->b_vp) 764 brelvp(bp); 765 } 766 } 767 768 /* 769 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 770 * is called with B_DELWRI set, the underlying pages may wind up 771 * getting freed causing a previous write (bdwrite()) to get 'lost' 772 * because pages associated with a B_DELWRI bp are marked clean. 773 * 774 * We still allow the B_INVAL case to call vfs_vmio_release(), even 775 * if B_DELWRI is set. 776 */ 777 778 if (bp->b_flags & B_DELWRI) 779 bp->b_flags &= ~B_RELBUF; 780 781 /* 782 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 783 * constituted, not even NFS buffers now. Two flags effect this. If 784 * B_INVAL, the struct buf is invalidated but the VM object is kept 785 * around ( i.e. so it is trivial to reconstitute the buffer later ). 786 * 787 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be 788 * invalidated. B_ERROR cannot be set for a failed write unless the 789 * buffer is also B_INVAL because it hits the re-dirtying code above. 790 * 791 * Normally we can do this whether a buffer is B_DELWRI or not. If 792 * the buffer is an NFS buffer, it is tracking piecemeal writes or 793 * the commit state and we cannot afford to lose the buffer. 794 */ 795 if ((bp->b_flags & B_VMIO) 796 && !(bp->b_vp->v_tag == VT_NFS && 797 bp->b_vp->v_type != VBLK && 798 (bp->b_flags & B_DELWRI)) 799 ) { 800 801 int i, j, resid; 802 vm_page_t m; 803 off_t foff; 804 vm_pindex_t poff; 805 vm_object_t obj; 806 struct vnode *vp; 807 808 vp = bp->b_vp; 809 810 /* 811 * Get the base offset and length of the buffer. Note that 812 * for block sizes that are less then PAGE_SIZE, the b_data 813 * base of the buffer does not represent exactly b_offset and 814 * neither b_offset nor b_size are necessarily page aligned. 815 * Instead, the starting position of b_offset is: 816 * 817 * b_data + (b_offset & PAGE_MASK) 818 * 819 * block sizes less then DEV_BSIZE (usually 512) are not 820 * supported due to the page granularity bits (m->valid, 821 * m->dirty, etc...). 822 * 823 * See man buf(9) for more information 824 */ 825 826 resid = bp->b_bufsize; 827 foff = bp->b_offset; 828 829 for (i = 0; i < bp->b_npages; i++) { 830 m = bp->b_pages[i]; 831 vm_page_flag_clear(m, PG_ZERO); 832 if (m == bogus_page) { 833 834 obj = (vm_object_t) vp->v_object; 835 poff = OFF_TO_IDX(bp->b_offset); 836 837 for (j = i; j < bp->b_npages; j++) { 838 m = bp->b_pages[j]; 839 if (m == bogus_page) { 840 m = vm_page_lookup(obj, poff + j); 841#if !defined(MAX_PERF) 842 if (!m) { 843 panic("brelse: page missing\n"); 844 } 845#endif 846 bp->b_pages[j] = m; 847 } 848 } 849 850 if ((bp->b_flags & B_INVAL) == 0) { 851 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 852 } 853 } 854 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 855 int poffset = foff & PAGE_MASK; 856 int presid = resid > (PAGE_SIZE - poffset) ? 857 (PAGE_SIZE - poffset) : resid; 858 859 KASSERT(presid >= 0, ("brelse: extra page")); 860 vm_page_set_invalid(m, poffset, presid); 861 } 862 resid -= PAGE_SIZE - (foff & PAGE_MASK); 863 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 864 } 865 866 if (bp->b_flags & (B_INVAL | B_RELBUF)) 867 vfs_vmio_release(bp); 868 869 } else if (bp->b_flags & B_VMIO) { 870 871 if (bp->b_flags & (B_INVAL | B_RELBUF)) 872 vfs_vmio_release(bp); 873 874 } 875 876#if !defined(MAX_PERF) 877 if (bp->b_qindex != QUEUE_NONE) 878 panic("brelse: free buffer onto another queue???"); 879#endif 880 if (BUF_REFCNT(bp) > 1) { 881 /* Temporary panic to verify exclusive locking */ 882 /* This panic goes away when we allow shared refs */ 883 panic("brelse: multiple refs"); 884 /* do not release to free list */ 885 BUF_UNLOCK(bp); 886 splx(s); 887 return; 888 } 889 890 /* enqueue */ 891 892 /* buffers with no memory */ 893 if (bp->b_bufsize == 0) { 894 bp->b_flags |= B_INVAL; 895 bp->b_qindex = QUEUE_EMPTY; 896 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 897 LIST_REMOVE(bp, b_hash); 898 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 899 bp->b_dev = NODEV; 900 kvafreespace += bp->b_kvasize; 901 if (bp->b_kvasize) 902 kvaspacewakeup(); 903 /* buffers with junk contents */ 904 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 905 bp->b_flags |= B_INVAL; 906 bp->b_qindex = QUEUE_AGE; 907 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 908 LIST_REMOVE(bp, b_hash); 909 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 910 bp->b_dev = NODEV; 911 912 /* buffers that are locked */ 913 } else if (bp->b_flags & B_LOCKED) { 914 bp->b_qindex = QUEUE_LOCKED; 915 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 916 917 /* buffers with stale but valid contents */ 918 } else if (bp->b_flags & B_AGE) { 919 bp->b_qindex = QUEUE_AGE; 920 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 921 922 /* buffers with valid and quite potentially reuseable contents */ 923 } else { 924 bp->b_qindex = QUEUE_LRU; 925 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 926 } 927 928 /* 929 * If B_INVAL, clear B_DELWRI. 930 */ 931 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) { 932 bp->b_flags &= ~B_DELWRI; 933 --numdirtybuffers; 934 } 935 936 runningbufspace -= bp->b_bufsize; 937 938 /* 939 * Fixup numfreebuffers count. The bp is on an appropriate queue 940 * unless locked. We then bump numfreebuffers if it is not B_DELWRI. 941 * We've already handled the B_INVAL case ( B_DELWRI will be clear 942 * if B_INVAL is set ). 943 */ 944 945 if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI)) 946 bufcountwakeup(); 947 948 /* 949 * Something we can maybe free. 950 */ 951 952 if (bp->b_bufsize) 953 bufspacewakeup(); 954 955 /* unlock */ 956 BUF_UNLOCK(bp); 957 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 958 splx(s); 959} 960 961/* 962 * Release a buffer back to the appropriate queue but do not try to free 963 * it. 964 * 965 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 966 * biodone() to requeue an async I/O on completion. It is also used when 967 * known good buffers need to be requeued but we think we may need the data 968 * again soon. 969 */ 970void 971bqrelse(struct buf * bp) 972{ 973 int s; 974 975 s = splbio(); 976 977 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 978 979#if !defined(MAX_PERF) 980 if (bp->b_qindex != QUEUE_NONE) 981 panic("bqrelse: free buffer onto another queue???"); 982#endif 983 if (BUF_REFCNT(bp) > 1) { 984 /* do not release to free list */ 985 panic("bqrelse: multiple refs"); 986 BUF_UNLOCK(bp); 987 splx(s); 988 return; 989 } 990 if (bp->b_flags & B_LOCKED) { 991 bp->b_flags &= ~B_ERROR; 992 bp->b_qindex = QUEUE_LOCKED; 993 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 994 /* buffers with stale but valid contents */ 995 } else { 996 bp->b_qindex = QUEUE_LRU; 997 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 998 } 999 1000 runningbufspace -= bp->b_bufsize; 1001 1002 if ((bp->b_flags & B_LOCKED) == 0 && 1003 ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 1004 ) { 1005 bufcountwakeup(); 1006 } 1007 1008 /* 1009 * Something we can maybe wakeup 1010 */ 1011 if (bp->b_bufsize) 1012 bufspacewakeup(); 1013 1014 /* unlock */ 1015 BUF_UNLOCK(bp); 1016 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1017 splx(s); 1018} 1019 1020static void 1021vfs_vmio_release(bp) 1022 struct buf *bp; 1023{ 1024 int i, s; 1025 vm_page_t m; 1026 1027 s = splvm(); 1028 for (i = 0; i < bp->b_npages; i++) { 1029 m = bp->b_pages[i]; 1030 bp->b_pages[i] = NULL; 1031 /* 1032 * In order to keep page LRU ordering consistent, put 1033 * everything on the inactive queue. 1034 */ 1035 vm_page_unwire(m, 0); 1036 /* 1037 * We don't mess with busy pages, it is 1038 * the responsibility of the process that 1039 * busied the pages to deal with them. 1040 */ 1041 if ((m->flags & PG_BUSY) || (m->busy != 0)) 1042 continue; 1043 1044 if (m->wire_count == 0) { 1045 vm_page_flag_clear(m, PG_ZERO); 1046 /* 1047 * Might as well free the page if we can and it has 1048 * no valid data. 1049 */ 1050 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { 1051 vm_page_busy(m); 1052 vm_page_protect(m, VM_PROT_NONE); 1053 vm_page_free(m); 1054 } 1055 } 1056 } 1057 bufspace -= bp->b_bufsize; 1058 vmiospace -= bp->b_bufsize; 1059 runningbufspace -= bp->b_bufsize; 1060 splx(s); 1061 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 1062 if (bp->b_bufsize) 1063 bufspacewakeup(); 1064 bp->b_npages = 0; 1065 bp->b_bufsize = 0; 1066 bp->b_flags &= ~B_VMIO; 1067 if (bp->b_vp) 1068 brelvp(bp); 1069} 1070 1071/* 1072 * Check to see if a block is currently memory resident. 1073 */ 1074struct buf * 1075gbincore(struct vnode * vp, daddr_t blkno) 1076{ 1077 struct buf *bp; 1078 struct bufhashhdr *bh; 1079 1080 bh = BUFHASH(vp, blkno); 1081 bp = bh->lh_first; 1082 1083 /* Search hash chain */ 1084 while (bp != NULL) { 1085 /* hit */ 1086 if (bp->b_vp == vp && bp->b_lblkno == blkno && 1087 (bp->b_flags & B_INVAL) == 0) { 1088 break; 1089 } 1090 bp = bp->b_hash.le_next; 1091 } 1092 return (bp); 1093} 1094 1095/* 1096 * this routine implements clustered async writes for 1097 * clearing out B_DELWRI buffers... This is much better 1098 * than the old way of writing only one buffer at a time. 1099 */ 1100int 1101vfs_bio_awrite(struct buf * bp) 1102{ 1103 int i; 1104 daddr_t lblkno = bp->b_lblkno; 1105 struct vnode *vp = bp->b_vp; 1106 int s; 1107 int ncl; 1108 struct buf *bpa; 1109 int nwritten; 1110 int size; 1111 int maxcl; 1112 1113 s = splbio(); 1114 /* 1115 * right now we support clustered writing only to regular files, and 1116 * then only if our I/O system is not saturated. 1117 */ 1118 if ((vp->v_type == VREG) && 1119 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1120 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1121 1122 size = vp->v_mount->mnt_stat.f_iosize; 1123 maxcl = MAXPHYS / size; 1124 1125 for (i = 1; i < maxcl; i++) { 1126 if ((bpa = gbincore(vp, lblkno + i)) && 1127 BUF_REFCNT(bpa) == 0 && 1128 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1129 (B_DELWRI | B_CLUSTEROK)) && 1130 (bpa->b_bufsize == size)) { 1131 if ((bpa->b_blkno == bpa->b_lblkno) || 1132 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 1133 break; 1134 } else { 1135 break; 1136 } 1137 } 1138 ncl = i; 1139 /* 1140 * this is a possible cluster write 1141 */ 1142 if (ncl != 1) { 1143 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 1144 splx(s); 1145 return nwritten; 1146 } 1147 } 1148 1149 BUF_LOCK(bp, LK_EXCLUSIVE); 1150 bremfree(bp); 1151 bp->b_flags |= B_ASYNC; 1152 1153 splx(s); 1154 /* 1155 * default (old) behavior, writing out only one block 1156 * 1157 * XXX returns b_bufsize instead of b_bcount for nwritten? 1158 */ 1159 nwritten = bp->b_bufsize; 1160 (void) VOP_BWRITE(bp->b_vp, bp); 1161 1162 return nwritten; 1163} 1164 1165/* 1166 * getnewbuf: 1167 * 1168 * Find and initialize a new buffer header, freeing up existing buffers 1169 * in the bufqueues as necessary. The new buffer is returned locked. 1170 * 1171 * Important: B_INVAL is not set. If the caller wishes to throw the 1172 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1173 * 1174 * We block if: 1175 * We have insufficient buffer headers 1176 * We have insufficient buffer space 1177 * buffer_map is too fragmented ( space reservation fails ) 1178 * 1179 * We do *not* attempt to flush dirty buffers more then one level deep. 1180 * I.e., if P_FLSINPROG is set we do not flush dirty buffers at all. 1181 * 1182 * If P_FLSINPROG is set, we are allowed to dip into our emergency 1183 * reserve. 1184 */ 1185static struct buf * 1186getnewbuf(struct vnode *vp, daddr_t blkno, 1187 int slpflag, int slptimeo, int size, int maxsize) 1188{ 1189 struct buf *bp; 1190 struct buf *nbp; 1191 struct buf *dbp; 1192 int outofspace; 1193 int nqindex; 1194 int defrag = 0; 1195 static int newbufcnt = 0; 1196 int lastnewbuf = newbufcnt; 1197 1198restart: 1199 /* 1200 * Calculate whether we are out of buffer space. This state is 1201 * recalculated on every restart. If we are out of space, we 1202 * have to turn off defragmentation. The outofspace code will 1203 * defragment too, but the looping conditionals will be messed up 1204 * if both outofspace and defrag are on. 1205 */ 1206 1207 dbp = NULL; 1208 outofspace = 0; 1209 if (bufspace >= hibufspace) { 1210 if ((curproc->p_flag & P_FLSINPROG) == 0 || 1211 bufspace >= maxbufspace 1212 ) { 1213 outofspace = 1; 1214 defrag = 0; 1215 } 1216 } 1217 1218 /* 1219 * defrag state is semi-persistant. 1 means we are flagged for 1220 * defragging. -1 means we actually defragged something. 1221 */ 1222 /* nop */ 1223 1224 /* 1225 * Setup for scan. If we do not have enough free buffers, 1226 * we setup a degenerate case that falls through the while. 1227 * 1228 * If we are in the middle of a flush, we can dip into the 1229 * emergency reserve. 1230 * 1231 * If we are out of space, we skip trying to scan QUEUE_EMPTY 1232 * because those buffers are, well, empty. 1233 */ 1234 1235 if ((curproc->p_flag & P_FLSINPROG) == 0 && 1236 numfreebuffers < lofreebuffers) { 1237 nqindex = QUEUE_LRU; 1238 nbp = NULL; 1239 } else { 1240 nqindex = QUEUE_EMPTY; 1241 if (outofspace || 1242 (nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY])) == NULL) { 1243 nqindex = QUEUE_AGE; 1244 nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1245 if (nbp == NULL) { 1246 nqindex = QUEUE_LRU; 1247 nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1248 } 1249 } 1250 } 1251 1252 /* 1253 * Run scan, possibly freeing data and/or kva mappings on the fly 1254 * depending. 1255 */ 1256 1257 while ((bp = nbp) != NULL) { 1258 int qindex = nqindex; 1259 /* 1260 * Calculate next bp ( we can only use it if we do not block 1261 * or do other fancy things ). 1262 */ 1263 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 1264 switch(qindex) { 1265 case QUEUE_EMPTY: 1266 nqindex = QUEUE_AGE; 1267 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) 1268 break; 1269 /* fall through */ 1270 case QUEUE_AGE: 1271 nqindex = QUEUE_LRU; 1272 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) 1273 break; 1274 /* fall through */ 1275 case QUEUE_LRU: 1276 /* 1277 * nbp is NULL. 1278 */ 1279 break; 1280 } 1281 } 1282 1283 /* 1284 * Sanity Checks 1285 */ 1286 KASSERT(BUF_REFCNT(bp) == 0, ("getnewbuf: busy buffer %p on free list", bp)); 1287 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp)); 1288 1289 /* 1290 * Here we try to move NON VMIO buffers to the end of the 1291 * LRU queue in order to make VMIO buffers more readily 1292 * freeable. We also try to move buffers with a positive 1293 * usecount to the end. 1294 * 1295 * Note that by moving the bp to the end, we setup a following 1296 * loop. Since we continue to decrement b_usecount this 1297 * is ok and, in fact, desireable. 1298 * 1299 * If we are at the end of the list, we move ourself to the 1300 * same place and need to fixup nbp and nqindex to handle 1301 * the following case. 1302 */ 1303 1304 if ((qindex == QUEUE_LRU) && bp->b_usecount > 0) { 1305 if ((bp->b_flags & B_VMIO) == 0 || 1306 (vmiospace < maxvmiobufspace) 1307 ) { 1308 --bp->b_usecount; 1309 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1310 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1311 if (nbp == NULL) { 1312 nqindex = qindex; 1313 nbp = bp; 1314 } 1315 continue; 1316 } 1317 } 1318 1319 /* 1320 * If we come across a delayed write and numdirtybuffers should 1321 * be flushed, try to write it out. Only if P_FLSINPROG is 1322 * not set. We can't afford to recursively stack more then 1323 * one deep due to the possibility of having deep VFS call 1324 * stacks. 1325 * 1326 * Limit the number of dirty buffers we are willing to try 1327 * to recover since it really isn't our job here. 1328 */ 1329 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1330 /* 1331 * This is rather complex, but necessary. If we come 1332 * across a B_DELWRI buffer we have to flush it in 1333 * order to use it. We only do this if we absolutely 1334 * need to. We must also protect against too much 1335 * recursion which might run us out of stack due to 1336 * deep VFS call stacks. 1337 * 1338 * In heavy-writing situations, QUEUE_LRU can contain 1339 * a large number of DELWRI buffers at its head. These 1340 * buffers must be moved to the tail if they cannot be 1341 * written async in order to reduce the scanning time 1342 * required to skip past these buffers in later 1343 * getnewbuf() calls. 1344 */ 1345 if ((curproc->p_flag & P_FLSINPROG) || 1346 numdirtybuffers < hidirtybuffers) { 1347 if (qindex == QUEUE_LRU) { 1348 /* 1349 * dbp prevents us from looping forever 1350 * if all bps in QUEUE_LRU are dirty. 1351 */ 1352 if (bp == dbp) { 1353 bp = NULL; 1354 break; 1355 } 1356 if (dbp == NULL) 1357 dbp = TAILQ_LAST(&bufqueues[QUEUE_LRU], bqueues); 1358 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1359 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1360 } 1361 continue; 1362 } 1363 curproc->p_flag |= P_FLSINPROG; 1364 vfs_bio_awrite(bp); 1365 curproc->p_flag &= ~P_FLSINPROG; 1366 goto restart; 1367 } 1368 1369 if (defrag > 0 && bp->b_kvasize == 0) 1370 continue; 1371 if (outofspace > 0 && bp->b_bufsize == 0) 1372 continue; 1373 1374 /* 1375 * Start freeing the bp. This is somewhat involved. nbp 1376 * remains valid only for QUEUE_EMPTY bp's. 1377 */ 1378 1379 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1380 panic("getnewbuf: locked buf"); 1381 bremfree(bp); 1382 1383 if (qindex == QUEUE_LRU || qindex == QUEUE_AGE) { 1384 if (bp->b_flags & B_VMIO) { 1385 bp->b_flags &= ~B_ASYNC; 1386 vfs_vmio_release(bp); 1387 } 1388 if (bp->b_vp) 1389 brelvp(bp); 1390 } 1391 1392 /* 1393 * NOTE: nbp is now entirely invalid. We can only restart 1394 * the scan from this point on. 1395 * 1396 * Get the rest of the buffer freed up. b_kva* is still 1397 * valid after this operation. 1398 */ 1399 1400 if (bp->b_rcred != NOCRED) { 1401 crfree(bp->b_rcred); 1402 bp->b_rcred = NOCRED; 1403 } 1404 if (bp->b_wcred != NOCRED) { 1405 crfree(bp->b_wcred); 1406 bp->b_wcred = NOCRED; 1407 } 1408 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1409 (*bioops.io_deallocate)(bp); 1410 1411 LIST_REMOVE(bp, b_hash); 1412 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1413 1414 if (bp->b_bufsize) 1415 allocbuf(bp, 0); 1416 1417 bp->b_flags = 0; 1418 bp->b_dev = NODEV; 1419 bp->b_vp = NULL; 1420 bp->b_blkno = bp->b_lblkno = 0; 1421 bp->b_offset = NOOFFSET; 1422 bp->b_iodone = 0; 1423 bp->b_error = 0; 1424 bp->b_resid = 0; 1425 bp->b_bcount = 0; 1426 bp->b_npages = 0; 1427 bp->b_dirtyoff = bp->b_dirtyend = 0; 1428 bp->b_usecount = 5; 1429 1430 LIST_INIT(&bp->b_dep); 1431 1432 /* 1433 * Ok, now that we have a free buffer, if we are defragging 1434 * we have to recover the kvaspace. 1435 */ 1436 1437 if (defrag > 0) { 1438 defrag = -1; 1439 bp->b_flags |= B_INVAL; 1440 bfreekva(bp); 1441 brelse(bp); 1442 goto restart; 1443 } 1444 1445 if (outofspace > 0) { 1446 outofspace = -1; 1447 bp->b_flags |= B_INVAL; 1448 bfreekva(bp); 1449 brelse(bp); 1450 goto restart; 1451 } 1452 1453 /* 1454 * We are done 1455 */ 1456 break; 1457 } 1458 1459 /* 1460 * If we exhausted our list, sleep as appropriate. 1461 */ 1462 1463 if (bp == NULL) { 1464 int flags; 1465 1466dosleep: 1467 if (defrag > 0) 1468 flags = VFS_BIO_NEED_KVASPACE; 1469 else if (outofspace > 0) 1470 flags = VFS_BIO_NEED_BUFSPACE; 1471 else 1472 flags = VFS_BIO_NEED_ANY; 1473 1474 (void) speedup_syncer(); 1475 needsbuffer |= flags; 1476 while (needsbuffer & flags) { 1477 if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, 1478 "newbuf", slptimeo)) 1479 return (NULL); 1480 } 1481 } else { 1482 /* 1483 * We finally have a valid bp. We aren't quite out of the 1484 * woods, we still have to reserve kva space. 1485 */ 1486 vm_offset_t addr = 0; 1487 1488 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1489 1490 if (maxsize != bp->b_kvasize) { 1491 bfreekva(bp); 1492 1493 if (vm_map_findspace(buffer_map, 1494 vm_map_min(buffer_map), maxsize, &addr) 1495 ) { 1496 /* 1497 * Uh oh. Buffer map is to fragmented. Try 1498 * to defragment. 1499 */ 1500 if (defrag <= 0) { 1501 defrag = 1; 1502 bp->b_flags |= B_INVAL; 1503 brelse(bp); 1504 goto restart; 1505 } 1506 /* 1507 * Uh oh. We couldn't seem to defragment 1508 */ 1509 bp = NULL; 1510 goto dosleep; 1511 } 1512 } 1513 if (addr) { 1514 vm_map_insert(buffer_map, NULL, 0, 1515 addr, addr + maxsize, 1516 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1517 1518 bp->b_kvabase = (caddr_t) addr; 1519 bp->b_kvasize = maxsize; 1520 } 1521 bp->b_data = bp->b_kvabase; 1522 } 1523 1524 /* 1525 * If we have slept at some point in this process and another 1526 * process has managed to allocate a new buffer while we slept, 1527 * we have to return NULL so that our caller can recheck to 1528 * ensure that the other process did not create an identically 1529 * identified buffer to the one we were requesting. We make this 1530 * check by incrementing the static int newbufcnt each time we 1531 * successfully allocate a new buffer. By saving the value of 1532 * newbufcnt in our local lastnewbuf, we can compare newbufcnt 1533 * with lastnewbuf to see if any other process managed to 1534 * allocate a buffer while we were doing so ourselves. 1535 * 1536 * Note that bp, if valid, is locked. 1537 */ 1538 if (lastnewbuf == newbufcnt) { 1539 /* 1540 * No buffers allocated, so we can return one if we were 1541 * successful, or continue trying if we were not successful. 1542 */ 1543 if (bp != NULL) { 1544 newbufcnt += 1; 1545 return (bp); 1546 } 1547 goto restart; 1548 } 1549 /* 1550 * Another process allocated a buffer since we were called, so 1551 * we have to free the one we allocated and return NULL to let 1552 * our caller recheck to see if a new buffer is still needed. 1553 */ 1554 if (bp != NULL) { 1555 bp->b_flags |= B_INVAL; 1556 brelse(bp); 1557 } 1558 return (NULL); 1559} 1560 1561/* 1562 * waitfreebuffers: 1563 * 1564 * Wait for sufficient free buffers. This routine is not called if 1565 * curproc is the update process so we do not have to do anything 1566 * fancy. 1567 */ 1568 1569static void 1570waitfreebuffers(int slpflag, int slptimeo) 1571{ 1572 while (numfreebuffers < hifreebuffers) { 1573 flushdirtybuffers(slpflag, slptimeo); 1574 if (numfreebuffers >= hifreebuffers) 1575 break; 1576 needsbuffer |= VFS_BIO_NEED_FREE; 1577 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1578 break; 1579 } 1580} 1581 1582/* 1583 * flushdirtybuffers: 1584 * 1585 * This routine is called when we get too many dirty buffers. 1586 * 1587 * We have to protect ourselves from recursion, but we also do not want 1588 * other process's flushdirtybuffers() to interfere with the syncer if 1589 * it decides to flushdirtybuffers(). 1590 * 1591 * In order to maximize operations, we allow any process to flush 1592 * dirty buffers and use P_FLSINPROG to prevent recursion. 1593 */ 1594 1595static void 1596flushdirtybuffers(int slpflag, int slptimeo) 1597{ 1598 int s; 1599 1600 s = splbio(); 1601 1602 if (curproc->p_flag & P_FLSINPROG) { 1603 splx(s); 1604 return; 1605 } 1606 curproc->p_flag |= P_FLSINPROG; 1607 1608 while (numdirtybuffers > lodirtybuffers) { 1609 if (flushbufqueues() == 0) 1610 break; 1611 } 1612 1613 curproc->p_flag &= ~P_FLSINPROG; 1614 1615 splx(s); 1616} 1617 1618static int 1619flushbufqueues(void) 1620{ 1621 struct buf *bp; 1622 int qindex; 1623 int r = 0; 1624 1625 qindex = QUEUE_AGE; 1626 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1627 1628 for (;;) { 1629 if (bp == NULL) { 1630 if (qindex == QUEUE_LRU) 1631 break; 1632 qindex = QUEUE_LRU; 1633 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL) 1634 break; 1635 } 1636 1637 /* 1638 * Try to free up B_INVAL delayed-write buffers rather then 1639 * writing them out. Note also that NFS is somewhat sensitive 1640 * to B_INVAL buffers so it is doubly important that we do 1641 * this. 1642 */ 1643 if ((bp->b_flags & B_DELWRI) != 0) { 1644 if (bp->b_flags & B_INVAL) { 1645 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1646 panic("flushbufqueues: locked buf"); 1647 bremfree(bp); 1648 brelse(bp); 1649 } else { 1650 vfs_bio_awrite(bp); 1651 } 1652 ++r; 1653 break; 1654 } 1655 bp = TAILQ_NEXT(bp, b_freelist); 1656 } 1657 return(r); 1658} 1659 1660/* 1661 * Check to see if a block is currently memory resident. 1662 */ 1663struct buf * 1664incore(struct vnode * vp, daddr_t blkno) 1665{ 1666 struct buf *bp; 1667 1668 int s = splbio(); 1669 bp = gbincore(vp, blkno); 1670 splx(s); 1671 return (bp); 1672} 1673 1674/* 1675 * Returns true if no I/O is needed to access the 1676 * associated VM object. This is like incore except 1677 * it also hunts around in the VM system for the data. 1678 */ 1679 1680int 1681inmem(struct vnode * vp, daddr_t blkno) 1682{ 1683 vm_object_t obj; 1684 vm_offset_t toff, tinc, size; 1685 vm_page_t m; 1686 vm_ooffset_t off; 1687 1688 if (incore(vp, blkno)) 1689 return 1; 1690 if (vp->v_mount == NULL) 1691 return 0; 1692 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1693 return 0; 1694 1695 obj = vp->v_object; 1696 size = PAGE_SIZE; 1697 if (size > vp->v_mount->mnt_stat.f_iosize) 1698 size = vp->v_mount->mnt_stat.f_iosize; 1699 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 1700 1701 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1702 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1703 if (!m) 1704 return 0; 1705 tinc = size; 1706 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 1707 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 1708 if (vm_page_is_valid(m, 1709 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1710 return 0; 1711 } 1712 return 1; 1713} 1714 1715/* 1716 * vfs_setdirty: 1717 * 1718 * Sets the dirty range for a buffer based on the status of the dirty 1719 * bits in the pages comprising the buffer. 1720 * 1721 * The range is limited to the size of the buffer. 1722 * 1723 * This routine is primarily used by NFS, but is generalized for the 1724 * B_VMIO case. 1725 */ 1726static void 1727vfs_setdirty(struct buf *bp) 1728{ 1729 int i; 1730 vm_object_t object; 1731 1732 /* 1733 * Degenerate case - empty buffer 1734 */ 1735 1736 if (bp->b_bufsize == 0) 1737 return; 1738 1739 /* 1740 * We qualify the scan for modified pages on whether the 1741 * object has been flushed yet. The OBJ_WRITEABLE flag 1742 * is not cleared simply by protecting pages off. 1743 */ 1744 1745 if ((bp->b_flags & B_VMIO) == 0) 1746 return; 1747 1748 object = bp->b_pages[0]->object; 1749 1750 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY)) 1751 printf("Warning: object %p writeable but not mightbedirty\n", object); 1752 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY)) 1753 printf("Warning: object %p mightbedirty but not writeable\n", object); 1754 1755 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 1756 vm_offset_t boffset; 1757 vm_offset_t eoffset; 1758 1759 /* 1760 * test the pages to see if they have been modified directly 1761 * by users through the VM system. 1762 */ 1763 for (i = 0; i < bp->b_npages; i++) { 1764 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1765 vm_page_test_dirty(bp->b_pages[i]); 1766 } 1767 1768 /* 1769 * Calculate the encompassing dirty range, boffset and eoffset, 1770 * (eoffset - boffset) bytes. 1771 */ 1772 1773 for (i = 0; i < bp->b_npages; i++) { 1774 if (bp->b_pages[i]->dirty) 1775 break; 1776 } 1777 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1778 1779 for (i = bp->b_npages - 1; i >= 0; --i) { 1780 if (bp->b_pages[i]->dirty) { 1781 break; 1782 } 1783 } 1784 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1785 1786 /* 1787 * Fit it to the buffer. 1788 */ 1789 1790 if (eoffset > bp->b_bcount) 1791 eoffset = bp->b_bcount; 1792 1793 /* 1794 * If we have a good dirty range, merge with the existing 1795 * dirty range. 1796 */ 1797 1798 if (boffset < eoffset) { 1799 if (bp->b_dirtyoff > boffset) 1800 bp->b_dirtyoff = boffset; 1801 if (bp->b_dirtyend < eoffset) 1802 bp->b_dirtyend = eoffset; 1803 } 1804 } 1805} 1806 1807/* 1808 * getblk: 1809 * 1810 * Get a block given a specified block and offset into a file/device. 1811 * The buffers B_DONE bit will be cleared on return, making it almost 1812 * ready for an I/O initiation. B_INVAL may or may not be set on 1813 * return. The caller should clear B_INVAL prior to initiating a 1814 * READ. 1815 * 1816 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 1817 * an existing buffer. 1818 * 1819 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 1820 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 1821 * and then cleared based on the backing VM. If the previous buffer is 1822 * non-0-sized but invalid, B_CACHE will be cleared. 1823 * 1824 * If getblk() must create a new buffer, the new buffer is returned with 1825 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 1826 * case it is returned with B_INVAL clear and B_CACHE set based on the 1827 * backing VM. 1828 * 1829 * getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos 1830 * B_CACHE bit is clear. 1831 * 1832 * What this means, basically, is that the caller should use B_CACHE to 1833 * determine whether the buffer is fully valid or not and should clear 1834 * B_INVAL prior to issuing a read. If the caller intends to validate 1835 * the buffer by loading its data area with something, the caller needs 1836 * to clear B_INVAL. If the caller does this without issuing an I/O, 1837 * the caller should set B_CACHE ( as an optimization ), else the caller 1838 * should issue the I/O and biodone() will set B_CACHE if the I/O was 1839 * a write attempt or if it was a successfull read. If the caller 1840 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 1841 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 1842 */ 1843struct buf * 1844getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1845{ 1846 struct buf *bp; 1847 int s; 1848 struct bufhashhdr *bh; 1849 1850#if !defined(MAX_PERF) 1851 if (size > MAXBSIZE) 1852 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1853#endif 1854 1855 s = splbio(); 1856loop: 1857 /* 1858 * Block if we are low on buffers. The syncer is allowed more 1859 * buffers in order to avoid a deadlock. 1860 */ 1861 if (curproc == updateproc && numfreebuffers == 0) { 1862 needsbuffer |= VFS_BIO_NEED_ANY; 1863 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 1864 slptimeo); 1865 } else if (curproc != updateproc && numfreebuffers < lofreebuffers) { 1866 waitfreebuffers(slpflag, slptimeo); 1867 } 1868 1869 if ((bp = gbincore(vp, blkno))) { 1870 /* 1871 * Buffer is in-core 1872 */ 1873 1874 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1875 if (bp->b_usecount < BUF_MAXUSE) 1876 ++bp->b_usecount; 1877 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL, 1878 "getblk", slpflag, slptimeo) == ENOLCK) 1879 goto loop; 1880 splx(s); 1881 return (struct buf *) NULL; 1882 } 1883 1884 /* 1885 * The buffer is locked. B_CACHE is cleared if the buffer is 1886 * invalid. Ohterwise, for a non-VMIO buffer, B_CACHE is set 1887 * and for a VMIO buffer B_CACHE is adjusted according to the 1888 * backing VM cache. 1889 */ 1890 if (bp->b_flags & B_INVAL) 1891 bp->b_flags &= ~B_CACHE; 1892 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 1893 bp->b_flags |= B_CACHE; 1894 bremfree(bp); 1895 1896 /* 1897 * check for size inconsistancies for non-VMIO case. 1898 */ 1899 1900 if (bp->b_bcount != size) { 1901 if ((bp->b_flags & B_VMIO) == 0 || 1902 (size > bp->b_kvasize) 1903 ) { 1904 if (bp->b_flags & B_DELWRI) { 1905 bp->b_flags |= B_NOCACHE; 1906 VOP_BWRITE(bp->b_vp, bp); 1907 } else { 1908 if ((bp->b_flags & B_VMIO) && 1909 (LIST_FIRST(&bp->b_dep) == NULL)) { 1910 bp->b_flags |= B_RELBUF; 1911 brelse(bp); 1912 } else { 1913 bp->b_flags |= B_NOCACHE; 1914 VOP_BWRITE(bp->b_vp, bp); 1915 } 1916 } 1917 goto loop; 1918 } 1919 } 1920 1921 /* 1922 * If the size is inconsistant in the VMIO case, we can resize 1923 * the buffer. This might lead to B_CACHE getting set or 1924 * cleared. If the size has not changed, B_CACHE remains 1925 * unchanged from its previous state. 1926 */ 1927 1928 if (bp->b_bcount != size) 1929 allocbuf(bp, size); 1930 1931 KASSERT(bp->b_offset != NOOFFSET, 1932 ("getblk: no buffer offset")); 1933 1934 /* 1935 * A buffer with B_DELWRI set and B_CACHE clear must 1936 * be committed before we can return the buffer in 1937 * order to prevent the caller from issuing a read 1938 * ( due to B_CACHE not being set ) and overwriting 1939 * it. 1940 * 1941 * Most callers, including NFS and FFS, need this to 1942 * operate properly either because they assume they 1943 * can issue a read if B_CACHE is not set, or because 1944 * ( for example ) an uncached B_DELWRI might loop due 1945 * to softupdates re-dirtying the buffer. In the latter 1946 * case, B_CACHE is set after the first write completes, 1947 * preventing further loops. 1948 */ 1949 1950 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 1951 VOP_BWRITE(bp->b_vp, bp); 1952 goto loop; 1953 } 1954 1955 if (bp->b_usecount < BUF_MAXUSE) 1956 ++bp->b_usecount; 1957 splx(s); 1958 bp->b_flags &= ~B_DONE; 1959 } else { 1960 /* 1961 * Buffer is not in-core, create new buffer. The buffer 1962 * returned by getnewbuf() is locked. Note that the returned 1963 * buffer is also considered valid (not marked B_INVAL). 1964 */ 1965 int bsize, maxsize, vmio; 1966 off_t offset; 1967 1968 if (vp->v_type == VBLK) 1969 bsize = DEV_BSIZE; 1970 else if (vp->v_mountedhere) 1971 bsize = vp->v_mountedhere->mnt_stat.f_iosize; 1972 else if (vp->v_mount) 1973 bsize = vp->v_mount->mnt_stat.f_iosize; 1974 else 1975 bsize = size; 1976 1977 offset = (off_t)blkno * bsize; 1978 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF); 1979 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 1980 maxsize = imax(maxsize, bsize); 1981 1982 if ((bp = getnewbuf(vp, blkno, 1983 slpflag, slptimeo, size, maxsize)) == NULL) { 1984 if (slpflag || slptimeo) { 1985 splx(s); 1986 return NULL; 1987 } 1988 goto loop; 1989 } 1990 1991 /* 1992 * This code is used to make sure that a buffer is not 1993 * created while the getnewbuf routine is blocked. 1994 * This can be a problem whether the vnode is locked or not. 1995 * If the buffer is created out from under us, we have to 1996 * throw away the one we just created. There is now window 1997 * race because we are safely running at splbio() from the 1998 * point of the duplicate buffer creation through to here. 1999 */ 2000 if (gbincore(vp, blkno)) { 2001 bp->b_flags |= B_INVAL; 2002 brelse(bp); 2003 goto loop; 2004 } 2005 2006 /* 2007 * Insert the buffer into the hash, so that it can 2008 * be found by incore. 2009 */ 2010 bp->b_blkno = bp->b_lblkno = blkno; 2011 bp->b_offset = offset; 2012 2013 bgetvp(vp, bp); 2014 LIST_REMOVE(bp, b_hash); 2015 bh = BUFHASH(vp, blkno); 2016 LIST_INSERT_HEAD(bh, bp, b_hash); 2017 2018 /* 2019 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 2020 * buffer size starts out as 0, B_CACHE will be set by 2021 * allocbuf() for the VMIO case prior to it testing the 2022 * backing store for validity. 2023 */ 2024 2025 if (vmio) { 2026 bp->b_flags |= B_VMIO; 2027#if defined(VFS_BIO_DEBUG) 2028 if (vp->v_type != VREG && vp->v_type != VBLK) 2029 printf("getblk: vmioing file type %d???\n", vp->v_type); 2030#endif 2031 } else { 2032 bp->b_flags &= ~B_VMIO; 2033 } 2034 2035 allocbuf(bp, size); 2036 2037 splx(s); 2038 bp->b_flags &= ~B_DONE; 2039 } 2040 return (bp); 2041} 2042 2043/* 2044 * Get an empty, disassociated buffer of given size. The buffer is initially 2045 * set to B_INVAL. 2046 */ 2047struct buf * 2048geteblk(int size) 2049{ 2050 struct buf *bp; 2051 int s; 2052 2053 s = splbio(); 2054 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 2055 splx(s); 2056 allocbuf(bp, size); 2057 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 2058 return (bp); 2059} 2060 2061 2062/* 2063 * This code constitutes the buffer memory from either anonymous system 2064 * memory (in the case of non-VMIO operations) or from an associated 2065 * VM object (in the case of VMIO operations). This code is able to 2066 * resize a buffer up or down. 2067 * 2068 * Note that this code is tricky, and has many complications to resolve 2069 * deadlock or inconsistant data situations. Tread lightly!!! 2070 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 2071 * the caller. Calling this code willy nilly can result in the loss of data. 2072 * 2073 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 2074 * B_CACHE for the non-VMIO case. 2075 */ 2076 2077int 2078allocbuf(struct buf *bp, int size) 2079{ 2080 int newbsize, mbsize; 2081 int i; 2082 2083#if !defined(MAX_PERF) 2084 if (BUF_REFCNT(bp) == 0) 2085 panic("allocbuf: buffer not busy"); 2086 2087 if (bp->b_kvasize < size) 2088 panic("allocbuf: buffer too small"); 2089#endif 2090 2091 if ((bp->b_flags & B_VMIO) == 0) { 2092 caddr_t origbuf; 2093 int origbufsize; 2094 /* 2095 * Just get anonymous memory from the kernel. Don't 2096 * mess with B_CACHE. 2097 */ 2098 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2099#if !defined(NO_B_MALLOC) 2100 if (bp->b_flags & B_MALLOC) 2101 newbsize = mbsize; 2102 else 2103#endif 2104 newbsize = round_page(size); 2105 2106 if (newbsize < bp->b_bufsize) { 2107#if !defined(NO_B_MALLOC) 2108 /* 2109 * malloced buffers are not shrunk 2110 */ 2111 if (bp->b_flags & B_MALLOC) { 2112 if (newbsize) { 2113 bp->b_bcount = size; 2114 } else { 2115 free(bp->b_data, M_BIOBUF); 2116 bufspace -= bp->b_bufsize; 2117 bufmallocspace -= bp->b_bufsize; 2118 runningbufspace -= bp->b_bufsize; 2119 if (bp->b_bufsize) 2120 bufspacewakeup(); 2121 bp->b_data = bp->b_kvabase; 2122 bp->b_bufsize = 0; 2123 bp->b_bcount = 0; 2124 bp->b_flags &= ~B_MALLOC; 2125 } 2126 return 1; 2127 } 2128#endif 2129 vm_hold_free_pages( 2130 bp, 2131 (vm_offset_t) bp->b_data + newbsize, 2132 (vm_offset_t) bp->b_data + bp->b_bufsize); 2133 } else if (newbsize > bp->b_bufsize) { 2134#if !defined(NO_B_MALLOC) 2135 /* 2136 * We only use malloced memory on the first allocation. 2137 * and revert to page-allocated memory when the buffer grows. 2138 */ 2139 if ( (bufmallocspace < maxbufmallocspace) && 2140 (bp->b_bufsize == 0) && 2141 (mbsize <= PAGE_SIZE/2)) { 2142 2143 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 2144 bp->b_bufsize = mbsize; 2145 bp->b_bcount = size; 2146 bp->b_flags |= B_MALLOC; 2147 bufspace += mbsize; 2148 bufmallocspace += mbsize; 2149 runningbufspace += bp->b_bufsize; 2150 return 1; 2151 } 2152#endif 2153 origbuf = NULL; 2154 origbufsize = 0; 2155#if !defined(NO_B_MALLOC) 2156 /* 2157 * If the buffer is growing on its other-than-first allocation, 2158 * then we revert to the page-allocation scheme. 2159 */ 2160 if (bp->b_flags & B_MALLOC) { 2161 origbuf = bp->b_data; 2162 origbufsize = bp->b_bufsize; 2163 bp->b_data = bp->b_kvabase; 2164 bufspace -= bp->b_bufsize; 2165 bufmallocspace -= bp->b_bufsize; 2166 runningbufspace -= bp->b_bufsize; 2167 if (bp->b_bufsize) 2168 bufspacewakeup(); 2169 bp->b_bufsize = 0; 2170 bp->b_flags &= ~B_MALLOC; 2171 newbsize = round_page(newbsize); 2172 } 2173#endif 2174 vm_hold_load_pages( 2175 bp, 2176 (vm_offset_t) bp->b_data + bp->b_bufsize, 2177 (vm_offset_t) bp->b_data + newbsize); 2178#if !defined(NO_B_MALLOC) 2179 if (origbuf) { 2180 bcopy(origbuf, bp->b_data, origbufsize); 2181 free(origbuf, M_BIOBUF); 2182 } 2183#endif 2184 } 2185 } else { 2186 vm_page_t m; 2187 int desiredpages; 2188 2189 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2190 desiredpages = (size == 0) ? 0 : 2191 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 2192 2193#if !defined(NO_B_MALLOC) 2194 if (bp->b_flags & B_MALLOC) 2195 panic("allocbuf: VMIO buffer can't be malloced"); 2196#endif 2197 /* 2198 * Set B_CACHE initially if buffer is 0 length or will become 2199 * 0-length. 2200 */ 2201 if (size == 0 || bp->b_bufsize == 0) 2202 bp->b_flags |= B_CACHE; 2203 2204 if (newbsize < bp->b_bufsize) { 2205 /* 2206 * DEV_BSIZE aligned new buffer size is less then the 2207 * DEV_BSIZE aligned existing buffer size. Figure out 2208 * if we have to remove any pages. 2209 */ 2210 if (desiredpages < bp->b_npages) { 2211 for (i = desiredpages; i < bp->b_npages; i++) { 2212 /* 2213 * the page is not freed here -- it 2214 * is the responsibility of 2215 * vnode_pager_setsize 2216 */ 2217 m = bp->b_pages[i]; 2218 KASSERT(m != bogus_page, 2219 ("allocbuf: bogus page found")); 2220 while (vm_page_sleep_busy(m, TRUE, "biodep")) 2221 ; 2222 2223 bp->b_pages[i] = NULL; 2224 vm_page_unwire(m, 0); 2225 } 2226 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2227 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 2228 bp->b_npages = desiredpages; 2229 } 2230 } else if (size > bp->b_bcount) { 2231 /* 2232 * We are growing the buffer, possibly in a 2233 * byte-granular fashion. 2234 */ 2235 struct vnode *vp; 2236 vm_object_t obj; 2237 vm_offset_t toff; 2238 vm_offset_t tinc; 2239 2240 /* 2241 * Step 1, bring in the VM pages from the object, 2242 * allocating them if necessary. We must clear 2243 * B_CACHE if these pages are not valid for the 2244 * range covered by the buffer. 2245 */ 2246 2247 vp = bp->b_vp; 2248 obj = vp->v_object; 2249 2250 while (bp->b_npages < desiredpages) { 2251 vm_page_t m; 2252 vm_pindex_t pi; 2253 2254 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages; 2255 if ((m = vm_page_lookup(obj, pi)) == NULL) { 2256 m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL); 2257 if (m == NULL) { 2258 VM_WAIT; 2259 vm_pageout_deficit += desiredpages - bp->b_npages; 2260 } else { 2261 vm_page_wire(m); 2262 vm_page_wakeup(m); 2263 bp->b_flags &= ~B_CACHE; 2264 bp->b_pages[bp->b_npages] = m; 2265 ++bp->b_npages; 2266 } 2267 continue; 2268 } 2269 2270 /* 2271 * We found a page. If we have to sleep on it, 2272 * retry because it might have gotten freed out 2273 * from under us. 2274 * 2275 * We can only test PG_BUSY here. Blocking on 2276 * m->busy might lead to a deadlock: 2277 * 2278 * vm_fault->getpages->cluster_read->allocbuf 2279 * 2280 */ 2281 2282 if (vm_page_sleep_busy(m, FALSE, "pgtblk")) 2283 continue; 2284 2285 /* 2286 * We have a good page. Should we wakeup the 2287 * page daemon? 2288 */ 2289 if ((curproc != pageproc) && 2290 ((m->queue - m->pc) == PQ_CACHE) && 2291 ((cnt.v_free_count + cnt.v_cache_count) < 2292 (cnt.v_free_min + cnt.v_cache_min)) 2293 ) { 2294 pagedaemon_wakeup(); 2295 } 2296 vm_page_flag_clear(m, PG_ZERO); 2297 vm_page_wire(m); 2298 bp->b_pages[bp->b_npages] = m; 2299 ++bp->b_npages; 2300 } 2301 2302 /* 2303 * Step 2. We've loaded the pages into the buffer, 2304 * we have to figure out if we can still have B_CACHE 2305 * set. Note that B_CACHE is set according to the 2306 * byte-granular range ( bcount and size ), new the 2307 * aligned range ( newbsize ). 2308 * 2309 * The VM test is against m->valid, which is DEV_BSIZE 2310 * aligned. Needless to say, the validity of the data 2311 * needs to also be DEV_BSIZE aligned. Note that this 2312 * fails with NFS if the server or some other client 2313 * extends the file's EOF. If our buffer is resized, 2314 * B_CACHE may remain set! XXX 2315 */ 2316 2317 toff = bp->b_bcount; 2318 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 2319 2320 while ((bp->b_flags & B_CACHE) && toff < size) { 2321 vm_pindex_t pi; 2322 2323 if (tinc > (size - toff)) 2324 tinc = size - toff; 2325 2326 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 2327 PAGE_SHIFT; 2328 2329 vfs_buf_test_cache( 2330 bp, 2331 bp->b_offset, 2332 toff, 2333 tinc, 2334 bp->b_pages[pi] 2335 ); 2336 toff += tinc; 2337 tinc = PAGE_SIZE; 2338 } 2339 2340 /* 2341 * Step 3, fixup the KVM pmap. Remember that 2342 * bp->b_data is relative to bp->b_offset, but 2343 * bp->b_offset may be offset into the first page. 2344 */ 2345 2346 bp->b_data = (caddr_t) 2347 trunc_page((vm_offset_t)bp->b_data); 2348 pmap_qenter( 2349 (vm_offset_t)bp->b_data, 2350 bp->b_pages, 2351 bp->b_npages 2352 ); 2353 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 2354 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 2355 } 2356 } 2357 if (bp->b_flags & B_VMIO) 2358 vmiospace += (newbsize - bp->b_bufsize); 2359 bufspace += (newbsize - bp->b_bufsize); 2360 runningbufspace += (newbsize - bp->b_bufsize); 2361 if (newbsize < bp->b_bufsize) 2362 bufspacewakeup(); 2363 bp->b_bufsize = newbsize; /* actual buffer allocation */ 2364 bp->b_bcount = size; /* requested buffer size */ 2365 return 1; 2366} 2367 2368/* 2369 * biowait: 2370 * 2371 * Wait for buffer I/O completion, returning error status. The buffer 2372 * is left locked and B_DONE on return. B_EINTR is converted into a EINTR 2373 * error and cleared. 2374 */ 2375int 2376biowait(register struct buf * bp) 2377{ 2378 int s; 2379 2380 s = splbio(); 2381 while ((bp->b_flags & B_DONE) == 0) 2382#if defined(NO_SCHEDULE_MODS) 2383 tsleep(bp, PRIBIO, "biowait", 0); 2384#else 2385 if (bp->b_flags & B_READ) 2386 tsleep(bp, PRIBIO, "biord", 0); 2387 else 2388 tsleep(bp, PRIBIO, "biowr", 0); 2389#endif 2390 splx(s); 2391 if (bp->b_flags & B_EINTR) { 2392 bp->b_flags &= ~B_EINTR; 2393 return (EINTR); 2394 } 2395 if (bp->b_flags & B_ERROR) { 2396 return (bp->b_error ? bp->b_error : EIO); 2397 } else { 2398 return (0); 2399 } 2400} 2401 2402/* 2403 * biodone: 2404 * 2405 * Finish I/O on a buffer, optionally calling a completion function. 2406 * This is usually called from an interrupt so process blocking is 2407 * not allowed. 2408 * 2409 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 2410 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 2411 * assuming B_INVAL is clear. 2412 * 2413 * For the VMIO case, we set B_CACHE if the op was a read and no 2414 * read error occured, or if the op was a write. B_CACHE is never 2415 * set if the buffer is invalid or otherwise uncacheable. 2416 * 2417 * biodone does not mess with B_INVAL, allowing the I/O routine or the 2418 * initiator to leave B_INVAL set to brelse the buffer out of existance 2419 * in the biodone routine. 2420 */ 2421void 2422biodone(register struct buf * bp) 2423{ 2424 int s; 2425 2426 s = splbio(); 2427 2428 KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy", bp)); 2429 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 2430 2431 bp->b_flags |= B_DONE; 2432 2433 if (bp->b_flags & B_FREEBUF) { 2434 brelse(bp); 2435 splx(s); 2436 return; 2437 } 2438 2439 if ((bp->b_flags & B_READ) == 0) { 2440 vwakeup(bp); 2441 } 2442 2443 /* call optional completion function if requested */ 2444 if (bp->b_flags & B_CALL) { 2445 bp->b_flags &= ~B_CALL; 2446 (*bp->b_iodone) (bp); 2447 splx(s); 2448 return; 2449 } 2450 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 2451 (*bioops.io_complete)(bp); 2452 2453 if (bp->b_flags & B_VMIO) { 2454 int i, resid; 2455 vm_ooffset_t foff; 2456 vm_page_t m; 2457 vm_object_t obj; 2458 int iosize; 2459 struct vnode *vp = bp->b_vp; 2460 2461 obj = vp->v_object; 2462 2463#if defined(VFS_BIO_DEBUG) 2464 if (vp->v_usecount == 0) { 2465 panic("biodone: zero vnode ref count"); 2466 } 2467 2468 if (vp->v_object == NULL) { 2469 panic("biodone: missing VM object"); 2470 } 2471 2472 if ((vp->v_flag & VOBJBUF) == 0) { 2473 panic("biodone: vnode is not setup for merged cache"); 2474 } 2475#endif 2476 2477 foff = bp->b_offset; 2478 KASSERT(bp->b_offset != NOOFFSET, 2479 ("biodone: no buffer offset")); 2480 2481#if !defined(MAX_PERF) 2482 if (!obj) { 2483 panic("biodone: no object"); 2484 } 2485#endif 2486#if defined(VFS_BIO_DEBUG) 2487 if (obj->paging_in_progress < bp->b_npages) { 2488 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 2489 obj->paging_in_progress, bp->b_npages); 2490 } 2491#endif 2492 2493 /* 2494 * Set B_CACHE if the op was a normal read and no error 2495 * occured. B_CACHE is set for writes in the b*write() 2496 * routines. 2497 */ 2498 iosize = bp->b_bcount; 2499 if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) { 2500 bp->b_flags |= B_CACHE; 2501 } 2502 2503 for (i = 0; i < bp->b_npages; i++) { 2504 int bogusflag = 0; 2505 m = bp->b_pages[i]; 2506 if (m == bogus_page) { 2507 bogusflag = 1; 2508 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 2509 if (!m) { 2510#if defined(VFS_BIO_DEBUG) 2511 printf("biodone: page disappeared\n"); 2512#endif 2513 vm_object_pip_subtract(obj, 1); 2514 bp->b_flags &= ~B_CACHE; 2515 continue; 2516 } 2517 bp->b_pages[i] = m; 2518 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2519 } 2520#if defined(VFS_BIO_DEBUG) 2521 if (OFF_TO_IDX(foff) != m->pindex) { 2522 printf( 2523"biodone: foff(%lu)/m->pindex(%d) mismatch\n", 2524 (unsigned long)foff, m->pindex); 2525 } 2526#endif 2527 resid = IDX_TO_OFF(m->pindex + 1) - foff; 2528 if (resid > iosize) 2529 resid = iosize; 2530 2531 /* 2532 * In the write case, the valid and clean bits are 2533 * already changed correctly ( see bdwrite() ), so we 2534 * only need to do this here in the read case. 2535 */ 2536 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 2537 vfs_page_set_valid(bp, foff, i, m); 2538 } 2539 vm_page_flag_clear(m, PG_ZERO); 2540 2541 /* 2542 * when debugging new filesystems or buffer I/O methods, this 2543 * is the most common error that pops up. if you see this, you 2544 * have not set the page busy flag correctly!!! 2545 */ 2546 if (m->busy == 0) { 2547#if !defined(MAX_PERF) 2548 printf("biodone: page busy < 0, " 2549 "pindex: %d, foff: 0x(%x,%x), " 2550 "resid: %d, index: %d\n", 2551 (int) m->pindex, (int)(foff >> 32), 2552 (int) foff & 0xffffffff, resid, i); 2553#endif 2554 if (vp->v_type != VBLK) 2555#if !defined(MAX_PERF) 2556 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2557 bp->b_vp->v_mount->mnt_stat.f_iosize, 2558 (int) bp->b_lblkno, 2559 bp->b_flags, bp->b_npages); 2560 else 2561 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2562 (int) bp->b_lblkno, 2563 bp->b_flags, bp->b_npages); 2564 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2565 m->valid, m->dirty, m->wire_count); 2566#endif 2567 panic("biodone: page busy < 0\n"); 2568 } 2569 vm_page_io_finish(m); 2570 vm_object_pip_subtract(obj, 1); 2571 foff += resid; 2572 iosize -= resid; 2573 } 2574 if (obj) 2575 vm_object_pip_wakeupn(obj, 0); 2576 } 2577 /* 2578 * For asynchronous completions, release the buffer now. The brelse 2579 * will do a wakeup there if necessary - so no need to do a wakeup 2580 * here in the async case. The sync case always needs to do a wakeup. 2581 */ 2582 2583 if (bp->b_flags & B_ASYNC) { 2584 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2585 brelse(bp); 2586 else 2587 bqrelse(bp); 2588 } else { 2589 wakeup(bp); 2590 } 2591 splx(s); 2592} 2593 2594/* 2595 * This routine is called in lieu of iodone in the case of 2596 * incomplete I/O. This keeps the busy status for pages 2597 * consistant. 2598 */ 2599void 2600vfs_unbusy_pages(struct buf * bp) 2601{ 2602 int i; 2603 2604 if (bp->b_flags & B_VMIO) { 2605 struct vnode *vp = bp->b_vp; 2606 vm_object_t obj = vp->v_object; 2607 2608 for (i = 0; i < bp->b_npages; i++) { 2609 vm_page_t m = bp->b_pages[i]; 2610 2611 if (m == bogus_page) { 2612 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2613#if !defined(MAX_PERF) 2614 if (!m) { 2615 panic("vfs_unbusy_pages: page missing\n"); 2616 } 2617#endif 2618 bp->b_pages[i] = m; 2619 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2620 } 2621 vm_object_pip_subtract(obj, 1); 2622 vm_page_flag_clear(m, PG_ZERO); 2623 vm_page_io_finish(m); 2624 } 2625 vm_object_pip_wakeupn(obj, 0); 2626 } 2627} 2628 2629/* 2630 * vfs_page_set_valid: 2631 * 2632 * Set the valid bits in a page based on the supplied offset. The 2633 * range is restricted to the buffer's size. 2634 * 2635 * This routine is typically called after a read completes. 2636 */ 2637static void 2638vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2639{ 2640 vm_ooffset_t soff, eoff; 2641 2642 /* 2643 * Start and end offsets in buffer. eoff - soff may not cross a 2644 * page boundry or cross the end of the buffer. The end of the 2645 * buffer, in this case, is our file EOF, not the allocation size 2646 * of the buffer. 2647 */ 2648 soff = off; 2649 eoff = (off + PAGE_SIZE) & ~PAGE_MASK; 2650 if (eoff > bp->b_offset + bp->b_bcount) 2651 eoff = bp->b_offset + bp->b_bcount; 2652 2653 /* 2654 * Set valid range. This is typically the entire buffer and thus the 2655 * entire page. 2656 */ 2657 if (eoff > soff) { 2658 vm_page_set_validclean( 2659 m, 2660 (vm_offset_t) (soff & PAGE_MASK), 2661 (vm_offset_t) (eoff - soff) 2662 ); 2663 } 2664} 2665 2666/* 2667 * This routine is called before a device strategy routine. 2668 * It is used to tell the VM system that paging I/O is in 2669 * progress, and treat the pages associated with the buffer 2670 * almost as being PG_BUSY. Also the object paging_in_progress 2671 * flag is handled to make sure that the object doesn't become 2672 * inconsistant. 2673 * 2674 * Since I/O has not been initiated yet, certain buffer flags 2675 * such as B_ERROR or B_INVAL may be in an inconsistant state 2676 * and should be ignored. 2677 */ 2678void 2679vfs_busy_pages(struct buf * bp, int clear_modify) 2680{ 2681 int i, bogus; 2682 2683 if (bp->b_flags & B_VMIO) { 2684 struct vnode *vp = bp->b_vp; 2685 vm_object_t obj = vp->v_object; 2686 vm_ooffset_t foff; 2687 2688 foff = bp->b_offset; 2689 KASSERT(bp->b_offset != NOOFFSET, 2690 ("vfs_busy_pages: no buffer offset")); 2691 vfs_setdirty(bp); 2692 2693retry: 2694 for (i = 0; i < bp->b_npages; i++) { 2695 vm_page_t m = bp->b_pages[i]; 2696 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 2697 goto retry; 2698 } 2699 2700 bogus = 0; 2701 for (i = 0; i < bp->b_npages; i++) { 2702 vm_page_t m = bp->b_pages[i]; 2703 2704 vm_page_flag_clear(m, PG_ZERO); 2705 if ((bp->b_flags & B_CLUSTER) == 0) { 2706 vm_object_pip_add(obj, 1); 2707 vm_page_io_start(m); 2708 } 2709 2710 /* 2711 * When readying a buffer for a read ( i.e 2712 * clear_modify == 0 ), it is important to do 2713 * bogus_page replacement for valid pages in 2714 * partially instantiated buffers. Partially 2715 * instantiated buffers can, in turn, occur when 2716 * reconstituting a buffer from its VM backing store 2717 * base. We only have to do this if B_CACHE is 2718 * clear ( which causes the I/O to occur in the 2719 * first place ). The replacement prevents the read 2720 * I/O from overwriting potentially dirty VM-backed 2721 * pages. XXX bogus page replacement is, uh, bogus. 2722 * It may not work properly with small-block devices. 2723 * We need to find a better way. 2724 */ 2725 2726 vm_page_protect(m, VM_PROT_NONE); 2727 if (clear_modify) 2728 vfs_page_set_valid(bp, foff, i, m); 2729 else if (m->valid == VM_PAGE_BITS_ALL && 2730 (bp->b_flags & B_CACHE) == 0) { 2731 bp->b_pages[i] = bogus_page; 2732 bogus++; 2733 } 2734 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2735 } 2736 if (bogus) 2737 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2738 } 2739} 2740 2741/* 2742 * Tell the VM system that the pages associated with this buffer 2743 * are clean. This is used for delayed writes where the data is 2744 * going to go to disk eventually without additional VM intevention. 2745 * 2746 * Note that while we only really need to clean through to b_bcount, we 2747 * just go ahead and clean through to b_bufsize. 2748 */ 2749static void 2750vfs_clean_pages(struct buf * bp) 2751{ 2752 int i; 2753 2754 if (bp->b_flags & B_VMIO) { 2755 vm_ooffset_t foff; 2756 2757 foff = bp->b_offset; 2758 KASSERT(bp->b_offset != NOOFFSET, 2759 ("vfs_clean_pages: no buffer offset")); 2760 for (i = 0; i < bp->b_npages; i++) { 2761 vm_page_t m = bp->b_pages[i]; 2762 vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2763 vm_ooffset_t eoff = noff; 2764 2765 if (eoff > bp->b_offset + bp->b_bufsize) 2766 eoff = bp->b_offset + bp->b_bufsize; 2767 vfs_page_set_valid(bp, foff, i, m); 2768 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 2769 foff = noff; 2770 } 2771 } 2772} 2773 2774/* 2775 * vfs_bio_set_validclean: 2776 * 2777 * Set the range within the buffer to valid and clean. The range is 2778 * relative to the beginning of the buffer, b_offset. Note that b_offset 2779 * itself may be offset from the beginning of the first page. 2780 */ 2781 2782void 2783vfs_bio_set_validclean(struct buf *bp, int base, int size) 2784{ 2785 if (bp->b_flags & B_VMIO) { 2786 int i; 2787 int n; 2788 2789 /* 2790 * Fixup base to be relative to beginning of first page. 2791 * Set initial n to be the maximum number of bytes in the 2792 * first page that can be validated. 2793 */ 2794 2795 base += (bp->b_offset & PAGE_MASK); 2796 n = PAGE_SIZE - (base & PAGE_MASK); 2797 2798 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 2799 vm_page_t m = bp->b_pages[i]; 2800 2801 if (n > size) 2802 n = size; 2803 2804 vm_page_set_validclean(m, base & PAGE_MASK, n); 2805 base += n; 2806 size -= n; 2807 n = PAGE_SIZE; 2808 } 2809 } 2810} 2811 2812/* 2813 * vfs_bio_clrbuf: 2814 * 2815 * clear a buffer. This routine essentially fakes an I/O, so we need 2816 * to clear B_ERROR and B_INVAL. 2817 * 2818 * Note that while we only theoretically need to clear through b_bcount, 2819 * we go ahead and clear through b_bufsize. 2820 */ 2821 2822void 2823vfs_bio_clrbuf(struct buf *bp) { 2824 int i, mask = 0; 2825 caddr_t sa, ea; 2826 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2827 bp->b_flags &= ~(B_INVAL|B_ERROR); 2828 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 2829 (bp->b_offset & PAGE_MASK) == 0) { 2830 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 2831 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2832 ((bp->b_pages[0]->valid & mask) != mask)) { 2833 bzero(bp->b_data, bp->b_bufsize); 2834 } 2835 bp->b_pages[0]->valid |= mask; 2836 bp->b_resid = 0; 2837 return; 2838 } 2839 ea = sa = bp->b_data; 2840 for(i=0;i<bp->b_npages;i++,sa=ea) { 2841 int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE; 2842 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 2843 ea = (caddr_t)ulmin((u_long)ea, 2844 (u_long)bp->b_data + bp->b_bufsize); 2845 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 2846 if ((bp->b_pages[i]->valid & mask) == mask) 2847 continue; 2848 if ((bp->b_pages[i]->valid & mask) == 0) { 2849 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2850 bzero(sa, ea - sa); 2851 } 2852 } else { 2853 for (; sa < ea; sa += DEV_BSIZE, j++) { 2854 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2855 (bp->b_pages[i]->valid & (1<<j)) == 0) 2856 bzero(sa, DEV_BSIZE); 2857 } 2858 } 2859 bp->b_pages[i]->valid |= mask; 2860 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2861 } 2862 bp->b_resid = 0; 2863 } else { 2864 clrbuf(bp); 2865 } 2866} 2867 2868/* 2869 * vm_hold_load_pages and vm_hold_unload pages get pages into 2870 * a buffers address space. The pages are anonymous and are 2871 * not associated with a file object. 2872 */ 2873void 2874vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2875{ 2876 vm_offset_t pg; 2877 vm_page_t p; 2878 int index; 2879 2880 to = round_page(to); 2881 from = round_page(from); 2882 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2883 2884 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2885 2886tryagain: 2887 2888 p = vm_page_alloc(kernel_object, 2889 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2890 VM_ALLOC_NORMAL); 2891 if (!p) { 2892 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2893 VM_WAIT; 2894 goto tryagain; 2895 } 2896 vm_page_wire(p); 2897 p->valid = VM_PAGE_BITS_ALL; 2898 vm_page_flag_clear(p, PG_ZERO); 2899 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2900 bp->b_pages[index] = p; 2901 vm_page_wakeup(p); 2902 } 2903 bp->b_npages = index; 2904} 2905 2906void 2907vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2908{ 2909 vm_offset_t pg; 2910 vm_page_t p; 2911 int index, newnpages; 2912 2913 from = round_page(from); 2914 to = round_page(to); 2915 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2916 2917 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2918 p = bp->b_pages[index]; 2919 if (p && (index < bp->b_npages)) { 2920#if !defined(MAX_PERF) 2921 if (p->busy) { 2922 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2923 bp->b_blkno, bp->b_lblkno); 2924 } 2925#endif 2926 bp->b_pages[index] = NULL; 2927 pmap_kremove(pg); 2928 vm_page_busy(p); 2929 vm_page_unwire(p, 0); 2930 vm_page_free(p); 2931 } 2932 } 2933 bp->b_npages = newnpages; 2934} 2935 2936 2937#include "opt_ddb.h" 2938#ifdef DDB 2939#include <ddb/ddb.h> 2940 2941DB_SHOW_COMMAND(buffer, db_show_buffer) 2942{ 2943 /* get args */ 2944 struct buf *bp = (struct buf *)addr; 2945 2946 if (!have_addr) { 2947 db_printf("usage: show buffer <addr>\n"); 2948 return; 2949 } 2950 2951 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 2952 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2953 "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, " 2954 "b_blkno = %d, b_pblkno = %d\n", 2955 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2956 major(bp->b_dev), minor(bp->b_dev), 2957 bp->b_data, bp->b_blkno, bp->b_pblkno); 2958 if (bp->b_npages) { 2959 int i; 2960 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2961 for (i = 0; i < bp->b_npages; i++) { 2962 vm_page_t m; 2963 m = bp->b_pages[i]; 2964 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 2965 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 2966 if ((i + 1) < bp->b_npages) 2967 db_printf(","); 2968 } 2969 db_printf("\n"); 2970 } 2971} 2972#endif /* DDB */ 2973