vfs_bio.c revision 255986
1/*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * Copyright (c) 1994,1997 John S. Dyson 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Konstantin Belousov 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32/* 33 * this file contains a new buffer I/O scheme implementing a coherent 34 * VM object and buffer cache scheme. Pains have been taken to make 35 * sure that the performance degradation associated with schemes such 36 * as this is not realized. 37 * 38 * Author: John S. Dyson 39 * Significant help during the development and debugging phases 40 * had been provided by David Greenman, also of the FreeBSD core team. 41 * 42 * see man buf(9) for more info. 43 */ 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: head/sys/kern/vfs_bio.c 255986 2013-10-02 06:00:34Z kib $"); 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/bio.h> 51#include <sys/conf.h> 52#include <sys/buf.h> 53#include <sys/devicestat.h> 54#include <sys/eventhandler.h> 55#include <sys/fail.h> 56#include <sys/limits.h> 57#include <sys/lock.h> 58#include <sys/malloc.h> 59#include <sys/mount.h> 60#include <sys/mutex.h> 61#include <sys/kernel.h> 62#include <sys/kthread.h> 63#include <sys/proc.h> 64#include <sys/resourcevar.h> 65#include <sys/rwlock.h> 66#include <sys/sysctl.h> 67#include <sys/vmem.h> 68#include <sys/vmmeter.h> 69#include <sys/vnode.h> 70#include <geom/geom.h> 71#include <vm/vm.h> 72#include <vm/vm_param.h> 73#include <vm/vm_kern.h> 74#include <vm/vm_pageout.h> 75#include <vm/vm_page.h> 76#include <vm/vm_object.h> 77#include <vm/vm_extern.h> 78#include <vm/vm_map.h> 79#include "opt_compat.h" 80#include "opt_directio.h" 81#include "opt_swap.h" 82 83static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer"); 84 85struct bio_ops bioops; /* I/O operation notification */ 86 87struct buf_ops buf_ops_bio = { 88 .bop_name = "buf_ops_bio", 89 .bop_write = bufwrite, 90 .bop_strategy = bufstrategy, 91 .bop_sync = bufsync, 92 .bop_bdflush = bufbdflush, 93}; 94 95/* 96 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has 97 * carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c. 98 */ 99struct buf *buf; /* buffer header pool */ 100caddr_t unmapped_buf; 101 102static struct proc *bufdaemonproc; 103 104static int inmem(struct vnode *vp, daddr_t blkno); 105static void vm_hold_free_pages(struct buf *bp, int newbsize); 106static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, 107 vm_offset_t to); 108static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m); 109static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, 110 vm_page_t m); 111static void vfs_clean_pages_dirty_buf(struct buf *bp); 112static void vfs_setdirty_locked_object(struct buf *bp); 113static void vfs_vmio_release(struct buf *bp); 114static int vfs_bio_clcheck(struct vnode *vp, int size, 115 daddr_t lblkno, daddr_t blkno); 116static int buf_flush(int); 117static int flushbufqueues(int, int); 118static void buf_daemon(void); 119static void bremfreel(struct buf *bp); 120static __inline void bd_wakeup(void); 121#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 122 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 123static int sysctl_bufspace(SYSCTL_HANDLER_ARGS); 124#endif 125 126int vmiodirenable = TRUE; 127SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0, 128 "Use the VM system for directory writes"); 129long runningbufspace; 130SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 131 "Amount of presently outstanding async buffer io"); 132static long bufspace; 133#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 134 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 135SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD, 136 &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers"); 137#else 138SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 139 "Virtual memory used for buffers"); 140#endif 141static long unmapped_bufspace; 142SYSCTL_LONG(_vfs, OID_AUTO, unmapped_bufspace, CTLFLAG_RD, 143 &unmapped_bufspace, 0, 144 "Amount of unmapped buffers, inclusive in the bufspace"); 145static long maxbufspace; 146SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 147 "Maximum allowed value of bufspace (including buf_daemon)"); 148static long bufmallocspace; 149SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 150 "Amount of malloced memory for buffers"); 151static long maxbufmallocspace; 152SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0, 153 "Maximum amount of malloced memory for buffers"); 154static long lobufspace; 155SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 156 "Minimum amount of buffers we want to have"); 157long hibufspace; 158SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 159 "Maximum allowed value of bufspace (excluding buf_daemon)"); 160static int bufreusecnt; 161SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0, 162 "Number of times we have reused a buffer"); 163static int buffreekvacnt; 164SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0, 165 "Number of times we have freed the KVA space from some buffer"); 166static int bufdefragcnt; 167SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0, 168 "Number of times we have had to repeat buffer allocation to defragment"); 169static long lorunningspace; 170SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 171 "Minimum preferred space used for in-progress I/O"); 172static long hirunningspace; 173SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 174 "Maximum amount of space to use for in-progress I/O"); 175int dirtybufferflushes; 176SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes, 177 0, "Number of bdwrite to bawrite conversions to limit dirty buffers"); 178int bdwriteskip; 179SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip, 180 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk"); 181int altbufferflushes; 182SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes, 183 0, "Number of fsync flushes to limit dirty buffers"); 184static int recursiveflushes; 185SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes, 186 0, "Number of flushes skipped due to being recursive"); 187static int numdirtybuffers; 188SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0, 189 "Number of buffers that are dirty (has unwritten changes) at the moment"); 190static int lodirtybuffers; 191SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0, 192 "How many buffers we want to have free before bufdaemon can sleep"); 193static int hidirtybuffers; 194SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0, 195 "When the number of dirty buffers is considered severe"); 196int dirtybufthresh; 197SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh, 198 0, "Number of bdwrite to bawrite conversions to clear dirty buffers"); 199static int numfreebuffers; 200SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0, 201 "Number of free buffers"); 202static int lofreebuffers; 203SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0, 204 "XXX Unused"); 205static int hifreebuffers; 206SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0, 207 "XXX Complicatedly unused"); 208static int getnewbufcalls; 209SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0, 210 "Number of calls to getnewbuf"); 211static int getnewbufrestarts; 212SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0, 213 "Number of times getnewbuf has had to restart a buffer aquisition"); 214static int mappingrestarts; 215SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0, 216 "Number of times getblk has had to restart a buffer mapping for " 217 "unmapped buffer"); 218static int flushbufqtarget = 100; 219SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0, 220 "Amount of work to do in flushbufqueues when helping bufdaemon"); 221static long notbufdflushes; 222SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, ¬bufdflushes, 0, 223 "Number of dirty buffer flushes done by the bufdaemon helpers"); 224static long barrierwrites; 225SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0, 226 "Number of barrier writes"); 227SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD, 228 &unmapped_buf_allowed, 0, 229 "Permit the use of the unmapped i/o"); 230 231/* 232 * Lock for the non-dirty bufqueues 233 */ 234static struct mtx_padalign bqclean; 235 236/* 237 * Lock for the dirty queue. 238 */ 239static struct mtx_padalign bqdirty; 240 241/* 242 * This lock synchronizes access to bd_request. 243 */ 244static struct mtx_padalign bdlock; 245 246/* 247 * This lock protects the runningbufreq and synchronizes runningbufwakeup and 248 * waitrunningbufspace(). 249 */ 250static struct mtx_padalign rbreqlock; 251 252/* 253 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it. 254 */ 255static struct mtx_padalign nblock; 256 257/* 258 * Lock that protects bdirtywait. 259 */ 260static struct mtx_padalign bdirtylock; 261 262/* 263 * Wakeup point for bufdaemon, as well as indicator of whether it is already 264 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it 265 * is idling. 266 */ 267static int bd_request; 268 269/* 270 * Request for the buf daemon to write more buffers than is indicated by 271 * lodirtybuf. This may be necessary to push out excess dependencies or 272 * defragment the address space where a simple count of the number of dirty 273 * buffers is insufficient to characterize the demand for flushing them. 274 */ 275static int bd_speedupreq; 276 277/* 278 * bogus page -- for I/O to/from partially complete buffers 279 * this is a temporary solution to the problem, but it is not 280 * really that bad. it would be better to split the buffer 281 * for input in the case of buffers partially already in memory, 282 * but the code is intricate enough already. 283 */ 284vm_page_t bogus_page; 285 286/* 287 * Synchronization (sleep/wakeup) variable for active buffer space requests. 288 * Set when wait starts, cleared prior to wakeup(). 289 * Used in runningbufwakeup() and waitrunningbufspace(). 290 */ 291static int runningbufreq; 292 293/* 294 * Synchronization (sleep/wakeup) variable for buffer requests. 295 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done 296 * by and/or. 297 * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(), 298 * getnewbuf(), and getblk(). 299 */ 300static int needsbuffer; 301 302/* 303 * Synchronization for bwillwrite() waiters. 304 */ 305static int bdirtywait; 306 307/* 308 * Definitions for the buffer free lists. 309 */ 310#define BUFFER_QUEUES 5 /* number of free buffer queues */ 311 312#define QUEUE_NONE 0 /* on no queue */ 313#define QUEUE_CLEAN 1 /* non-B_DELWRI buffers */ 314#define QUEUE_DIRTY 2 /* B_DELWRI buffers */ 315#define QUEUE_EMPTYKVA 3 /* empty buffer headers w/KVA assignment */ 316#define QUEUE_EMPTY 4 /* empty buffer headers */ 317#define QUEUE_SENTINEL 1024 /* not an queue index, but mark for sentinel */ 318 319/* Queues for free buffers with various properties */ 320static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } }; 321#ifdef INVARIANTS 322static int bq_len[BUFFER_QUEUES]; 323#endif 324 325/* 326 * Single global constant for BUF_WMESG, to avoid getting multiple references. 327 * buf_wmesg is referred from macros. 328 */ 329const char *buf_wmesg = BUF_WMESG; 330 331#define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 332#define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 333#define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 334 335#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 336 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 337static int 338sysctl_bufspace(SYSCTL_HANDLER_ARGS) 339{ 340 long lvalue; 341 int ivalue; 342 343 if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long)) 344 return (sysctl_handle_long(oidp, arg1, arg2, req)); 345 lvalue = *(long *)arg1; 346 if (lvalue > INT_MAX) 347 /* On overflow, still write out a long to trigger ENOMEM. */ 348 return (sysctl_handle_long(oidp, &lvalue, 0, req)); 349 ivalue = lvalue; 350 return (sysctl_handle_int(oidp, &ivalue, 0, req)); 351} 352#endif 353 354#ifdef DIRECTIO 355extern void ffs_rawread_setup(void); 356#endif /* DIRECTIO */ 357 358/* 359 * bqlock: 360 * 361 * Return the appropriate queue lock based on the index. 362 */ 363static inline struct mtx * 364bqlock(int qindex) 365{ 366 367 if (qindex == QUEUE_DIRTY) 368 return (struct mtx *)(&bqdirty); 369 return (struct mtx *)(&bqclean); 370} 371 372/* 373 * bdirtywakeup: 374 * 375 * Wakeup any bwillwrite() waiters. 376 */ 377static void 378bdirtywakeup(void) 379{ 380 mtx_lock(&bdirtylock); 381 if (bdirtywait) { 382 bdirtywait = 0; 383 wakeup(&bdirtywait); 384 } 385 mtx_unlock(&bdirtylock); 386} 387 388/* 389 * bdirtysub: 390 * 391 * Decrement the numdirtybuffers count by one and wakeup any 392 * threads blocked in bwillwrite(). 393 */ 394static void 395bdirtysub(void) 396{ 397 398 if (atomic_fetchadd_int(&numdirtybuffers, -1) == 399 (lodirtybuffers + hidirtybuffers) / 2) 400 bdirtywakeup(); 401} 402 403/* 404 * bdirtyadd: 405 * 406 * Increment the numdirtybuffers count by one and wakeup the buf 407 * daemon if needed. 408 */ 409static void 410bdirtyadd(void) 411{ 412 413 /* 414 * Only do the wakeup once as we cross the boundary. The 415 * buf daemon will keep running until the condition clears. 416 */ 417 if (atomic_fetchadd_int(&numdirtybuffers, 1) == 418 (lodirtybuffers + hidirtybuffers) / 2) 419 bd_wakeup(); 420} 421 422/* 423 * bufspacewakeup: 424 * 425 * Called when buffer space is potentially available for recovery. 426 * getnewbuf() will block on this flag when it is unable to free 427 * sufficient buffer space. Buffer space becomes recoverable when 428 * bp's get placed back in the queues. 429 */ 430 431static __inline void 432bufspacewakeup(void) 433{ 434 435 /* 436 * If someone is waiting for BUF space, wake them up. Even 437 * though we haven't freed the kva space yet, the waiting 438 * process will be able to now. 439 */ 440 mtx_lock(&nblock); 441 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 442 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 443 wakeup(&needsbuffer); 444 } 445 mtx_unlock(&nblock); 446} 447 448/* 449 * runningwakeup: 450 * 451 * Wake up processes that are waiting on asynchronous writes to fall 452 * below lorunningspace. 453 */ 454static void 455runningwakeup(void) 456{ 457 458 mtx_lock(&rbreqlock); 459 if (runningbufreq) { 460 runningbufreq = 0; 461 wakeup(&runningbufreq); 462 } 463 mtx_unlock(&rbreqlock); 464} 465 466/* 467 * runningbufwakeup: 468 * 469 * Decrement the outstanding write count according. 470 */ 471void 472runningbufwakeup(struct buf *bp) 473{ 474 long space, bspace; 475 476 bspace = bp->b_runningbufspace; 477 if (bspace == 0) 478 return; 479 space = atomic_fetchadd_long(&runningbufspace, -bspace); 480 KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld", 481 space, bspace)); 482 bp->b_runningbufspace = 0; 483 /* 484 * Only acquire the lock and wakeup on the transition from exceeding 485 * the threshold to falling below it. 486 */ 487 if (space < lorunningspace) 488 return; 489 if (space - bspace > lorunningspace) 490 return; 491 runningwakeup(); 492} 493 494/* 495 * bufcountadd: 496 * 497 * Called when a buffer has been added to one of the free queues to 498 * account for the buffer and to wakeup anyone waiting for free buffers. 499 * This typically occurs when large amounts of metadata are being handled 500 * by the buffer cache ( else buffer space runs out first, usually ). 501 */ 502static __inline void 503bufcountadd(struct buf *bp) 504{ 505 int old; 506 507 KASSERT((bp->b_flags & B_INFREECNT) == 0, 508 ("buf %p already counted as free", bp)); 509 bp->b_flags |= B_INFREECNT; 510 old = atomic_fetchadd_int(&numfreebuffers, 1); 511 KASSERT(old >= 0 && old < nbuf, 512 ("numfreebuffers climbed to %d", old + 1)); 513 mtx_lock(&nblock); 514 if (needsbuffer) { 515 needsbuffer &= ~VFS_BIO_NEED_ANY; 516 if (numfreebuffers >= hifreebuffers) 517 needsbuffer &= ~VFS_BIO_NEED_FREE; 518 wakeup(&needsbuffer); 519 } 520 mtx_unlock(&nblock); 521} 522 523/* 524 * bufcountsub: 525 * 526 * Decrement the numfreebuffers count as needed. 527 */ 528static void 529bufcountsub(struct buf *bp) 530{ 531 int old; 532 533 /* 534 * Fixup numfreebuffers count. If the buffer is invalid or not 535 * delayed-write, the buffer was free and we must decrement 536 * numfreebuffers. 537 */ 538 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 539 KASSERT((bp->b_flags & B_INFREECNT) != 0, 540 ("buf %p not counted in numfreebuffers", bp)); 541 bp->b_flags &= ~B_INFREECNT; 542 old = atomic_fetchadd_int(&numfreebuffers, -1); 543 KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1)); 544 } 545} 546 547/* 548 * waitrunningbufspace() 549 * 550 * runningbufspace is a measure of the amount of I/O currently 551 * running. This routine is used in async-write situations to 552 * prevent creating huge backups of pending writes to a device. 553 * Only asynchronous writes are governed by this function. 554 * 555 * This does NOT turn an async write into a sync write. It waits 556 * for earlier writes to complete and generally returns before the 557 * caller's write has reached the device. 558 */ 559void 560waitrunningbufspace(void) 561{ 562 563 mtx_lock(&rbreqlock); 564 while (runningbufspace > hirunningspace) { 565 runningbufreq = 1; 566 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0); 567 } 568 mtx_unlock(&rbreqlock); 569} 570 571 572/* 573 * vfs_buf_test_cache: 574 * 575 * Called when a buffer is extended. This function clears the B_CACHE 576 * bit if the newly extended portion of the buffer does not contain 577 * valid data. 578 */ 579static __inline 580void 581vfs_buf_test_cache(struct buf *bp, 582 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 583 vm_page_t m) 584{ 585 586 VM_OBJECT_ASSERT_LOCKED(m->object); 587 if (bp->b_flags & B_CACHE) { 588 int base = (foff + off) & PAGE_MASK; 589 if (vm_page_is_valid(m, base, size) == 0) 590 bp->b_flags &= ~B_CACHE; 591 } 592} 593 594/* Wake up the buffer daemon if necessary */ 595static __inline void 596bd_wakeup(void) 597{ 598 599 mtx_lock(&bdlock); 600 if (bd_request == 0) { 601 bd_request = 1; 602 wakeup(&bd_request); 603 } 604 mtx_unlock(&bdlock); 605} 606 607/* 608 * bd_speedup - speedup the buffer cache flushing code 609 */ 610void 611bd_speedup(void) 612{ 613 int needwake; 614 615 mtx_lock(&bdlock); 616 needwake = 0; 617 if (bd_speedupreq == 0 || bd_request == 0) 618 needwake = 1; 619 bd_speedupreq = 1; 620 bd_request = 1; 621 if (needwake) 622 wakeup(&bd_request); 623 mtx_unlock(&bdlock); 624} 625 626#ifdef __i386__ 627#define TRANSIENT_DENOM 5 628#else 629#define TRANSIENT_DENOM 10 630#endif 631 632/* 633 * Calculating buffer cache scaling values and reserve space for buffer 634 * headers. This is called during low level kernel initialization and 635 * may be called more then once. We CANNOT write to the memory area 636 * being reserved at this time. 637 */ 638caddr_t 639kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) 640{ 641 int tuned_nbuf; 642 long maxbuf, maxbuf_sz, buf_sz, biotmap_sz; 643 644 /* 645 * physmem_est is in pages. Convert it to kilobytes (assumes 646 * PAGE_SIZE is >= 1K) 647 */ 648 physmem_est = physmem_est * (PAGE_SIZE / 1024); 649 650 /* 651 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 652 * For the first 64MB of ram nominally allocate sufficient buffers to 653 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 654 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing 655 * the buffer cache we limit the eventual kva reservation to 656 * maxbcache bytes. 657 * 658 * factor represents the 1/4 x ram conversion. 659 */ 660 if (nbuf == 0) { 661 int factor = 4 * BKVASIZE / 1024; 662 663 nbuf = 50; 664 if (physmem_est > 4096) 665 nbuf += min((physmem_est - 4096) / factor, 666 65536 / factor); 667 if (physmem_est > 65536) 668 nbuf += min((physmem_est - 65536) * 2 / (factor * 5), 669 32 * 1024 * 1024 / (factor * 5)); 670 671 if (maxbcache && nbuf > maxbcache / BKVASIZE) 672 nbuf = maxbcache / BKVASIZE; 673 tuned_nbuf = 1; 674 } else 675 tuned_nbuf = 0; 676 677 /* XXX Avoid unsigned long overflows later on with maxbufspace. */ 678 maxbuf = (LONG_MAX / 3) / BKVASIZE; 679 if (nbuf > maxbuf) { 680 if (!tuned_nbuf) 681 printf("Warning: nbufs lowered from %d to %ld\n", nbuf, 682 maxbuf); 683 nbuf = maxbuf; 684 } 685 686 /* 687 * Ideal allocation size for the transient bio submap if 10% 688 * of the maximal space buffer map. This roughly corresponds 689 * to the amount of the buffer mapped for typical UFS load. 690 * 691 * Clip the buffer map to reserve space for the transient 692 * BIOs, if its extent is bigger than 90% (80% on i386) of the 693 * maximum buffer map extent on the platform. 694 * 695 * The fall-back to the maxbuf in case of maxbcache unset, 696 * allows to not trim the buffer KVA for the architectures 697 * with ample KVA space. 698 */ 699 if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) { 700 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE; 701 buf_sz = (long)nbuf * BKVASIZE; 702 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM * 703 (TRANSIENT_DENOM - 1)) { 704 /* 705 * There is more KVA than memory. Do not 706 * adjust buffer map size, and assign the rest 707 * of maxbuf to transient map. 708 */ 709 biotmap_sz = maxbuf_sz - buf_sz; 710 } else { 711 /* 712 * Buffer map spans all KVA we could afford on 713 * this platform. Give 10% (20% on i386) of 714 * the buffer map to the transient bio map. 715 */ 716 biotmap_sz = buf_sz / TRANSIENT_DENOM; 717 buf_sz -= biotmap_sz; 718 } 719 if (biotmap_sz / INT_MAX > MAXPHYS) 720 bio_transient_maxcnt = INT_MAX; 721 else 722 bio_transient_maxcnt = biotmap_sz / MAXPHYS; 723 /* 724 * Artifically limit to 1024 simultaneous in-flight I/Os 725 * using the transient mapping. 726 */ 727 if (bio_transient_maxcnt > 1024) 728 bio_transient_maxcnt = 1024; 729 if (tuned_nbuf) 730 nbuf = buf_sz / BKVASIZE; 731 } 732 733 /* 734 * swbufs are used as temporary holders for I/O, such as paging I/O. 735 * We have no less then 16 and no more then 256. 736 */ 737 nswbuf = max(min(nbuf/4, 256), 16); 738#ifdef NSWBUF_MIN 739 if (nswbuf < NSWBUF_MIN) 740 nswbuf = NSWBUF_MIN; 741#endif 742#ifdef DIRECTIO 743 ffs_rawread_setup(); 744#endif 745 746 /* 747 * Reserve space for the buffer cache buffers 748 */ 749 swbuf = (void *)v; 750 v = (caddr_t)(swbuf + nswbuf); 751 buf = (void *)v; 752 v = (caddr_t)(buf + nbuf); 753 754 return(v); 755} 756 757/* Initialize the buffer subsystem. Called before use of any buffers. */ 758void 759bufinit(void) 760{ 761 struct buf *bp; 762 int i; 763 764 mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF); 765 mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF); 766 mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF); 767 mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF); 768 mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); 769 mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF); 770 771 /* next, make a null set of free lists */ 772 for (i = 0; i < BUFFER_QUEUES; i++) 773 TAILQ_INIT(&bufqueues[i]); 774 775 /* finally, initialize each buffer header and stick on empty q */ 776 for (i = 0; i < nbuf; i++) { 777 bp = &buf[i]; 778 bzero(bp, sizeof *bp); 779 bp->b_flags = B_INVAL | B_INFREECNT; 780 bp->b_rcred = NOCRED; 781 bp->b_wcred = NOCRED; 782 bp->b_qindex = QUEUE_EMPTY; 783 bp->b_xflags = 0; 784 LIST_INIT(&bp->b_dep); 785 BUF_LOCKINIT(bp); 786 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 787#ifdef INVARIANTS 788 bq_len[QUEUE_EMPTY]++; 789#endif 790 } 791 792 /* 793 * maxbufspace is the absolute maximum amount of buffer space we are 794 * allowed to reserve in KVM and in real terms. The absolute maximum 795 * is nominally used by buf_daemon. hibufspace is the nominal maximum 796 * used by most other processes. The differential is required to 797 * ensure that buf_daemon is able to run when other processes might 798 * be blocked waiting for buffer space. 799 * 800 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 801 * this may result in KVM fragmentation which is not handled optimally 802 * by the system. 803 */ 804 maxbufspace = (long)nbuf * BKVASIZE; 805 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 806 lobufspace = hibufspace - MAXBSIZE; 807 808 /* 809 * Note: The 16 MiB upper limit for hirunningspace was chosen 810 * arbitrarily and may need further tuning. It corresponds to 811 * 128 outstanding write IO requests (if IO size is 128 KiB), 812 * which fits with many RAID controllers' tagged queuing limits. 813 * The lower 1 MiB limit is the historical upper limit for 814 * hirunningspace. 815 */ 816 hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBSIZE), 817 16 * 1024 * 1024), 1024 * 1024); 818 lorunningspace = roundup((hirunningspace * 2) / 3, MAXBSIZE); 819 820/* 821 * Limit the amount of malloc memory since it is wired permanently into 822 * the kernel space. Even though this is accounted for in the buffer 823 * allocation, we don't want the malloced region to grow uncontrolled. 824 * The malloc scheme improves memory utilization significantly on average 825 * (small) directories. 826 */ 827 maxbufmallocspace = hibufspace / 20; 828 829/* 830 * Reduce the chance of a deadlock occuring by limiting the number 831 * of delayed-write dirty buffers we allow to stack up. 832 */ 833 hidirtybuffers = nbuf / 4 + 20; 834 dirtybufthresh = hidirtybuffers * 9 / 10; 835 numdirtybuffers = 0; 836/* 837 * To support extreme low-memory systems, make sure hidirtybuffers cannot 838 * eat up all available buffer space. This occurs when our minimum cannot 839 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 840 * BKVASIZE'd buffers. 841 */ 842 while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 843 hidirtybuffers >>= 1; 844 } 845 lodirtybuffers = hidirtybuffers / 2; 846 847/* 848 * Try to keep the number of free buffers in the specified range, 849 * and give special processes (e.g. like buf_daemon) access to an 850 * emergency reserve. 851 */ 852 lofreebuffers = nbuf / 18 + 5; 853 hifreebuffers = 2 * lofreebuffers; 854 numfreebuffers = nbuf; 855 856 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 857 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 858 unmapped_buf = (caddr_t)kva_alloc(MAXPHYS); 859} 860 861#ifdef INVARIANTS 862static inline void 863vfs_buf_check_mapped(struct buf *bp) 864{ 865 866 KASSERT((bp->b_flags & B_UNMAPPED) == 0, 867 ("mapped buf %p %x", bp, bp->b_flags)); 868 KASSERT(bp->b_kvabase != unmapped_buf, 869 ("mapped buf: b_kvabase was not updated %p", bp)); 870 KASSERT(bp->b_data != unmapped_buf, 871 ("mapped buf: b_data was not updated %p", bp)); 872} 873 874static inline void 875vfs_buf_check_unmapped(struct buf *bp) 876{ 877 878 KASSERT((bp->b_flags & B_UNMAPPED) == B_UNMAPPED, 879 ("unmapped buf %p %x", bp, bp->b_flags)); 880 KASSERT(bp->b_kvabase == unmapped_buf, 881 ("unmapped buf: corrupted b_kvabase %p", bp)); 882 KASSERT(bp->b_data == unmapped_buf, 883 ("unmapped buf: corrupted b_data %p", bp)); 884} 885 886#define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp) 887#define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp) 888#else 889#define BUF_CHECK_MAPPED(bp) do {} while (0) 890#define BUF_CHECK_UNMAPPED(bp) do {} while (0) 891#endif 892 893static void 894bpmap_qenter(struct buf *bp) 895{ 896 897 BUF_CHECK_MAPPED(bp); 898 899 /* 900 * bp->b_data is relative to bp->b_offset, but 901 * bp->b_offset may be offset into the first page. 902 */ 903 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data); 904 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 905 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 906 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 907} 908 909/* 910 * bfreekva() - free the kva allocation for a buffer. 911 * 912 * Since this call frees up buffer space, we call bufspacewakeup(). 913 */ 914static void 915bfreekva(struct buf *bp) 916{ 917 918 if (bp->b_kvasize == 0) 919 return; 920 921 atomic_add_int(&buffreekvacnt, 1); 922 atomic_subtract_long(&bufspace, bp->b_kvasize); 923 if ((bp->b_flags & B_UNMAPPED) == 0) { 924 BUF_CHECK_MAPPED(bp); 925 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, 926 bp->b_kvasize); 927 } else { 928 BUF_CHECK_UNMAPPED(bp); 929 if ((bp->b_flags & B_KVAALLOC) != 0) { 930 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvaalloc, 931 bp->b_kvasize); 932 } 933 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize); 934 bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC); 935 } 936 bp->b_kvasize = 0; 937 bufspacewakeup(); 938} 939 940/* 941 * binsfree: 942 * 943 * Insert the buffer into the appropriate free list. 944 */ 945static void 946binsfree(struct buf *bp, int qindex) 947{ 948 struct mtx *olock, *nlock; 949 950 BUF_ASSERT_XLOCKED(bp); 951 952 olock = bqlock(bp->b_qindex); 953 nlock = bqlock(qindex); 954 mtx_lock(olock); 955 /* Handle delayed bremfree() processing. */ 956 if (bp->b_flags & B_REMFREE) 957 bremfreel(bp); 958 959 if (bp->b_qindex != QUEUE_NONE) 960 panic("binsfree: free buffer onto another queue???"); 961 962 bp->b_qindex = qindex; 963 if (olock != nlock) { 964 mtx_unlock(olock); 965 mtx_lock(nlock); 966 } 967 if (bp->b_flags & B_AGE) 968 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 969 else 970 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 971#ifdef INVARIANTS 972 bq_len[bp->b_qindex]++; 973#endif 974 mtx_unlock(nlock); 975 976 /* 977 * Something we can maybe free or reuse. 978 */ 979 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 980 bufspacewakeup(); 981 982 if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 983 bufcountadd(bp); 984} 985 986/* 987 * bremfree: 988 * 989 * Mark the buffer for removal from the appropriate free list. 990 * 991 */ 992void 993bremfree(struct buf *bp) 994{ 995 996 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 997 KASSERT((bp->b_flags & B_REMFREE) == 0, 998 ("bremfree: buffer %p already marked for delayed removal.", bp)); 999 KASSERT(bp->b_qindex != QUEUE_NONE, 1000 ("bremfree: buffer %p not on a queue.", bp)); 1001 BUF_ASSERT_XLOCKED(bp); 1002 1003 bp->b_flags |= B_REMFREE; 1004 bufcountsub(bp); 1005} 1006 1007/* 1008 * bremfreef: 1009 * 1010 * Force an immediate removal from a free list. Used only in nfs when 1011 * it abuses the b_freelist pointer. 1012 */ 1013void 1014bremfreef(struct buf *bp) 1015{ 1016 struct mtx *qlock; 1017 1018 qlock = bqlock(bp->b_qindex); 1019 mtx_lock(qlock); 1020 bremfreel(bp); 1021 mtx_unlock(qlock); 1022} 1023 1024/* 1025 * bremfreel: 1026 * 1027 * Removes a buffer from the free list, must be called with the 1028 * correct qlock held. 1029 */ 1030static void 1031bremfreel(struct buf *bp) 1032{ 1033 1034 CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X", 1035 bp, bp->b_vp, bp->b_flags); 1036 KASSERT(bp->b_qindex != QUEUE_NONE, 1037 ("bremfreel: buffer %p not on a queue.", bp)); 1038 BUF_ASSERT_XLOCKED(bp); 1039 mtx_assert(bqlock(bp->b_qindex), MA_OWNED); 1040 1041 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 1042#ifdef INVARIANTS 1043 KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow", 1044 bp->b_qindex)); 1045 bq_len[bp->b_qindex]--; 1046#endif 1047 bp->b_qindex = QUEUE_NONE; 1048 /* 1049 * If this was a delayed bremfree() we only need to remove the buffer 1050 * from the queue and return the stats are already done. 1051 */ 1052 if (bp->b_flags & B_REMFREE) { 1053 bp->b_flags &= ~B_REMFREE; 1054 return; 1055 } 1056 bufcountsub(bp); 1057} 1058 1059/* 1060 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must 1061 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set, 1062 * the buffer is valid and we do not have to do anything. 1063 */ 1064void 1065breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, 1066 int cnt, struct ucred * cred) 1067{ 1068 struct buf *rabp; 1069 int i; 1070 1071 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 1072 if (inmem(vp, *rablkno)) 1073 continue; 1074 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0); 1075 1076 if ((rabp->b_flags & B_CACHE) == 0) { 1077 if (!TD_IS_IDLETHREAD(curthread)) 1078 curthread->td_ru.ru_inblock++; 1079 rabp->b_flags |= B_ASYNC; 1080 rabp->b_flags &= ~B_INVAL; 1081 rabp->b_ioflags &= ~BIO_ERROR; 1082 rabp->b_iocmd = BIO_READ; 1083 if (rabp->b_rcred == NOCRED && cred != NOCRED) 1084 rabp->b_rcred = crhold(cred); 1085 vfs_busy_pages(rabp, 0); 1086 BUF_KERNPROC(rabp); 1087 rabp->b_iooffset = dbtob(rabp->b_blkno); 1088 bstrategy(rabp); 1089 } else { 1090 brelse(rabp); 1091 } 1092 } 1093} 1094 1095/* 1096 * Entry point for bread() and breadn() via #defines in sys/buf.h. 1097 * 1098 * Get a buffer with the specified data. Look in the cache first. We 1099 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 1100 * is set, the buffer is valid and we do not have to do anything, see 1101 * getblk(). Also starts asynchronous I/O on read-ahead blocks. 1102 */ 1103int 1104breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno, 1105 int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp) 1106{ 1107 struct buf *bp; 1108 int rv = 0, readwait = 0; 1109 1110 CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size); 1111 /* 1112 * Can only return NULL if GB_LOCK_NOWAIT flag is specified. 1113 */ 1114 *bpp = bp = getblk(vp, blkno, size, 0, 0, flags); 1115 if (bp == NULL) 1116 return (EBUSY); 1117 1118 /* if not found in cache, do some I/O */ 1119 if ((bp->b_flags & B_CACHE) == 0) { 1120 if (!TD_IS_IDLETHREAD(curthread)) 1121 curthread->td_ru.ru_inblock++; 1122 bp->b_iocmd = BIO_READ; 1123 bp->b_flags &= ~B_INVAL; 1124 bp->b_ioflags &= ~BIO_ERROR; 1125 if (bp->b_rcred == NOCRED && cred != NOCRED) 1126 bp->b_rcred = crhold(cred); 1127 vfs_busy_pages(bp, 0); 1128 bp->b_iooffset = dbtob(bp->b_blkno); 1129 bstrategy(bp); 1130 ++readwait; 1131 } 1132 1133 breada(vp, rablkno, rabsize, cnt, cred); 1134 1135 if (readwait) { 1136 rv = bufwait(bp); 1137 } 1138 return (rv); 1139} 1140 1141/* 1142 * Write, release buffer on completion. (Done by iodone 1143 * if async). Do not bother writing anything if the buffer 1144 * is invalid. 1145 * 1146 * Note that we set B_CACHE here, indicating that buffer is 1147 * fully valid and thus cacheable. This is true even of NFS 1148 * now so we set it generally. This could be set either here 1149 * or in biodone() since the I/O is synchronous. We put it 1150 * here. 1151 */ 1152int 1153bufwrite(struct buf *bp) 1154{ 1155 int oldflags; 1156 struct vnode *vp; 1157 long space; 1158 int vp_md; 1159 1160 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1161 if (bp->b_flags & B_INVAL) { 1162 brelse(bp); 1163 return (0); 1164 } 1165 1166 if (bp->b_flags & B_BARRIER) 1167 barrierwrites++; 1168 1169 oldflags = bp->b_flags; 1170 1171 BUF_ASSERT_HELD(bp); 1172 1173 if (bp->b_pin_count > 0) 1174 bunpin_wait(bp); 1175 1176 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG), 1177 ("FFS background buffer should not get here %p", bp)); 1178 1179 vp = bp->b_vp; 1180 if (vp) 1181 vp_md = vp->v_vflag & VV_MD; 1182 else 1183 vp_md = 0; 1184 1185 /* 1186 * Mark the buffer clean. Increment the bufobj write count 1187 * before bundirty() call, to prevent other thread from seeing 1188 * empty dirty list and zero counter for writes in progress, 1189 * falsely indicating that the bufobj is clean. 1190 */ 1191 bufobj_wref(bp->b_bufobj); 1192 bundirty(bp); 1193 1194 bp->b_flags &= ~B_DONE; 1195 bp->b_ioflags &= ~BIO_ERROR; 1196 bp->b_flags |= B_CACHE; 1197 bp->b_iocmd = BIO_WRITE; 1198 1199 vfs_busy_pages(bp, 1); 1200 1201 /* 1202 * Normal bwrites pipeline writes 1203 */ 1204 bp->b_runningbufspace = bp->b_bufsize; 1205 space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace); 1206 1207 if (!TD_IS_IDLETHREAD(curthread)) 1208 curthread->td_ru.ru_oublock++; 1209 if (oldflags & B_ASYNC) 1210 BUF_KERNPROC(bp); 1211 bp->b_iooffset = dbtob(bp->b_blkno); 1212 bstrategy(bp); 1213 1214 if ((oldflags & B_ASYNC) == 0) { 1215 int rtval = bufwait(bp); 1216 brelse(bp); 1217 return (rtval); 1218 } else if (space > hirunningspace) { 1219 /* 1220 * don't allow the async write to saturate the I/O 1221 * system. We will not deadlock here because 1222 * we are blocking waiting for I/O that is already in-progress 1223 * to complete. We do not block here if it is the update 1224 * or syncer daemon trying to clean up as that can lead 1225 * to deadlock. 1226 */ 1227 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md) 1228 waitrunningbufspace(); 1229 } 1230 1231 return (0); 1232} 1233 1234void 1235bufbdflush(struct bufobj *bo, struct buf *bp) 1236{ 1237 struct buf *nbp; 1238 1239 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) { 1240 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread); 1241 altbufferflushes++; 1242 } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) { 1243 BO_LOCK(bo); 1244 /* 1245 * Try to find a buffer to flush. 1246 */ 1247 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) { 1248 if ((nbp->b_vflags & BV_BKGRDINPROG) || 1249 BUF_LOCK(nbp, 1250 LK_EXCLUSIVE | LK_NOWAIT, NULL)) 1251 continue; 1252 if (bp == nbp) 1253 panic("bdwrite: found ourselves"); 1254 BO_UNLOCK(bo); 1255 /* Don't countdeps with the bo lock held. */ 1256 if (buf_countdeps(nbp, 0)) { 1257 BO_LOCK(bo); 1258 BUF_UNLOCK(nbp); 1259 continue; 1260 } 1261 if (nbp->b_flags & B_CLUSTEROK) { 1262 vfs_bio_awrite(nbp); 1263 } else { 1264 bremfree(nbp); 1265 bawrite(nbp); 1266 } 1267 dirtybufferflushes++; 1268 break; 1269 } 1270 if (nbp == NULL) 1271 BO_UNLOCK(bo); 1272 } 1273} 1274 1275/* 1276 * Delayed write. (Buffer is marked dirty). Do not bother writing 1277 * anything if the buffer is marked invalid. 1278 * 1279 * Note that since the buffer must be completely valid, we can safely 1280 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 1281 * biodone() in order to prevent getblk from writing the buffer 1282 * out synchronously. 1283 */ 1284void 1285bdwrite(struct buf *bp) 1286{ 1287 struct thread *td = curthread; 1288 struct vnode *vp; 1289 struct bufobj *bo; 1290 1291 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1292 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1293 KASSERT((bp->b_flags & B_BARRIER) == 0, 1294 ("Barrier request in delayed write %p", bp)); 1295 BUF_ASSERT_HELD(bp); 1296 1297 if (bp->b_flags & B_INVAL) { 1298 brelse(bp); 1299 return; 1300 } 1301 1302 /* 1303 * If we have too many dirty buffers, don't create any more. 1304 * If we are wildly over our limit, then force a complete 1305 * cleanup. Otherwise, just keep the situation from getting 1306 * out of control. Note that we have to avoid a recursive 1307 * disaster and not try to clean up after our own cleanup! 1308 */ 1309 vp = bp->b_vp; 1310 bo = bp->b_bufobj; 1311 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) { 1312 td->td_pflags |= TDP_INBDFLUSH; 1313 BO_BDFLUSH(bo, bp); 1314 td->td_pflags &= ~TDP_INBDFLUSH; 1315 } else 1316 recursiveflushes++; 1317 1318 bdirty(bp); 1319 /* 1320 * Set B_CACHE, indicating that the buffer is fully valid. This is 1321 * true even of NFS now. 1322 */ 1323 bp->b_flags |= B_CACHE; 1324 1325 /* 1326 * This bmap keeps the system from needing to do the bmap later, 1327 * perhaps when the system is attempting to do a sync. Since it 1328 * is likely that the indirect block -- or whatever other datastructure 1329 * that the filesystem needs is still in memory now, it is a good 1330 * thing to do this. Note also, that if the pageout daemon is 1331 * requesting a sync -- there might not be enough memory to do 1332 * the bmap then... So, this is important to do. 1333 */ 1334 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) { 1335 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 1336 } 1337 1338 /* 1339 * Set the *dirty* buffer range based upon the VM system dirty 1340 * pages. 1341 * 1342 * Mark the buffer pages as clean. We need to do this here to 1343 * satisfy the vnode_pager and the pageout daemon, so that it 1344 * thinks that the pages have been "cleaned". Note that since 1345 * the pages are in a delayed write buffer -- the VFS layer 1346 * "will" see that the pages get written out on the next sync, 1347 * or perhaps the cluster will be completed. 1348 */ 1349 vfs_clean_pages_dirty_buf(bp); 1350 bqrelse(bp); 1351 1352 /* 1353 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1354 * due to the softdep code. 1355 */ 1356} 1357 1358/* 1359 * bdirty: 1360 * 1361 * Turn buffer into delayed write request. We must clear BIO_READ and 1362 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 1363 * itself to properly update it in the dirty/clean lists. We mark it 1364 * B_DONE to ensure that any asynchronization of the buffer properly 1365 * clears B_DONE ( else a panic will occur later ). 1366 * 1367 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 1368 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 1369 * should only be called if the buffer is known-good. 1370 * 1371 * Since the buffer is not on a queue, we do not update the numfreebuffers 1372 * count. 1373 * 1374 * The buffer must be on QUEUE_NONE. 1375 */ 1376void 1377bdirty(struct buf *bp) 1378{ 1379 1380 CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X", 1381 bp, bp->b_vp, bp->b_flags); 1382 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1383 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1384 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1385 BUF_ASSERT_HELD(bp); 1386 bp->b_flags &= ~(B_RELBUF); 1387 bp->b_iocmd = BIO_WRITE; 1388 1389 if ((bp->b_flags & B_DELWRI) == 0) { 1390 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI; 1391 reassignbuf(bp); 1392 bdirtyadd(); 1393 } 1394} 1395 1396/* 1397 * bundirty: 1398 * 1399 * Clear B_DELWRI for buffer. 1400 * 1401 * Since the buffer is not on a queue, we do not update the numfreebuffers 1402 * count. 1403 * 1404 * The buffer must be on QUEUE_NONE. 1405 */ 1406 1407void 1408bundirty(struct buf *bp) 1409{ 1410 1411 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1412 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1413 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1414 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1415 BUF_ASSERT_HELD(bp); 1416 1417 if (bp->b_flags & B_DELWRI) { 1418 bp->b_flags &= ~B_DELWRI; 1419 reassignbuf(bp); 1420 bdirtysub(); 1421 } 1422 /* 1423 * Since it is now being written, we can clear its deferred write flag. 1424 */ 1425 bp->b_flags &= ~B_DEFERRED; 1426} 1427 1428/* 1429 * bawrite: 1430 * 1431 * Asynchronous write. Start output on a buffer, but do not wait for 1432 * it to complete. The buffer is released when the output completes. 1433 * 1434 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1435 * B_INVAL buffers. Not us. 1436 */ 1437void 1438bawrite(struct buf *bp) 1439{ 1440 1441 bp->b_flags |= B_ASYNC; 1442 (void) bwrite(bp); 1443} 1444 1445/* 1446 * babarrierwrite: 1447 * 1448 * Asynchronous barrier write. Start output on a buffer, but do not 1449 * wait for it to complete. Place a write barrier after this write so 1450 * that this buffer and all buffers written before it are committed to 1451 * the disk before any buffers written after this write are committed 1452 * to the disk. The buffer is released when the output completes. 1453 */ 1454void 1455babarrierwrite(struct buf *bp) 1456{ 1457 1458 bp->b_flags |= B_ASYNC | B_BARRIER; 1459 (void) bwrite(bp); 1460} 1461 1462/* 1463 * bbarrierwrite: 1464 * 1465 * Synchronous barrier write. Start output on a buffer and wait for 1466 * it to complete. Place a write barrier after this write so that 1467 * this buffer and all buffers written before it are committed to 1468 * the disk before any buffers written after this write are committed 1469 * to the disk. The buffer is released when the output completes. 1470 */ 1471int 1472bbarrierwrite(struct buf *bp) 1473{ 1474 1475 bp->b_flags |= B_BARRIER; 1476 return (bwrite(bp)); 1477} 1478 1479/* 1480 * bwillwrite: 1481 * 1482 * Called prior to the locking of any vnodes when we are expecting to 1483 * write. We do not want to starve the buffer cache with too many 1484 * dirty buffers so we block here. By blocking prior to the locking 1485 * of any vnodes we attempt to avoid the situation where a locked vnode 1486 * prevents the various system daemons from flushing related buffers. 1487 */ 1488void 1489bwillwrite(void) 1490{ 1491 1492 if (numdirtybuffers >= hidirtybuffers) { 1493 mtx_lock(&bdirtylock); 1494 while (numdirtybuffers >= hidirtybuffers) { 1495 bdirtywait = 1; 1496 msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4), 1497 "flswai", 0); 1498 } 1499 mtx_unlock(&bdirtylock); 1500 } 1501} 1502 1503/* 1504 * Return true if we have too many dirty buffers. 1505 */ 1506int 1507buf_dirty_count_severe(void) 1508{ 1509 1510 return(numdirtybuffers >= hidirtybuffers); 1511} 1512 1513static __noinline int 1514buf_vm_page_count_severe(void) 1515{ 1516 1517 KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1); 1518 1519 return vm_page_count_severe(); 1520} 1521 1522/* 1523 * brelse: 1524 * 1525 * Release a busy buffer and, if requested, free its resources. The 1526 * buffer will be stashed in the appropriate bufqueue[] allowing it 1527 * to be accessed later as a cache entity or reused for other purposes. 1528 */ 1529void 1530brelse(struct buf *bp) 1531{ 1532 int qindex; 1533 1534 CTR3(KTR_BUF, "brelse(%p) vp %p flags %X", 1535 bp, bp->b_vp, bp->b_flags); 1536 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1537 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1538 1539 if (BUF_LOCKRECURSED(bp)) { 1540 /* 1541 * Do not process, in particular, do not handle the 1542 * B_INVAL/B_RELBUF and do not release to free list. 1543 */ 1544 BUF_UNLOCK(bp); 1545 return; 1546 } 1547 1548 if (bp->b_flags & B_MANAGED) { 1549 bqrelse(bp); 1550 return; 1551 } 1552 1553 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) && 1554 bp->b_error == EIO && !(bp->b_flags & B_INVAL)) { 1555 /* 1556 * Failed write, redirty. Must clear BIO_ERROR to prevent 1557 * pages from being scrapped. If the error is anything 1558 * other than an I/O error (EIO), assume that retrying 1559 * is futile. 1560 */ 1561 bp->b_ioflags &= ~BIO_ERROR; 1562 bdirty(bp); 1563 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || 1564 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) { 1565 /* 1566 * Either a failed I/O or we were asked to free or not 1567 * cache the buffer. 1568 */ 1569 bp->b_flags |= B_INVAL; 1570 if (!LIST_EMPTY(&bp->b_dep)) 1571 buf_deallocate(bp); 1572 if (bp->b_flags & B_DELWRI) 1573 bdirtysub(); 1574 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1575 if ((bp->b_flags & B_VMIO) == 0) { 1576 if (bp->b_bufsize) 1577 allocbuf(bp, 0); 1578 if (bp->b_vp) 1579 brelvp(bp); 1580 } 1581 } 1582 1583 /* 1584 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 1585 * is called with B_DELWRI set, the underlying pages may wind up 1586 * getting freed causing a previous write (bdwrite()) to get 'lost' 1587 * because pages associated with a B_DELWRI bp are marked clean. 1588 * 1589 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1590 * if B_DELWRI is set. 1591 * 1592 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1593 * on pages to return pages to the VM page queues. 1594 */ 1595 if (bp->b_flags & B_DELWRI) 1596 bp->b_flags &= ~B_RELBUF; 1597 else if (buf_vm_page_count_severe()) { 1598 /* 1599 * BKGRDINPROG can only be set with the buf and bufobj 1600 * locks both held. We tolerate a race to clear it here. 1601 */ 1602 if (!(bp->b_vflags & BV_BKGRDINPROG)) 1603 bp->b_flags |= B_RELBUF; 1604 } 1605 1606 /* 1607 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 1608 * constituted, not even NFS buffers now. Two flags effect this. If 1609 * B_INVAL, the struct buf is invalidated but the VM object is kept 1610 * around ( i.e. so it is trivial to reconstitute the buffer later ). 1611 * 1612 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be 1613 * invalidated. BIO_ERROR cannot be set for a failed write unless the 1614 * buffer is also B_INVAL because it hits the re-dirtying code above. 1615 * 1616 * Normally we can do this whether a buffer is B_DELWRI or not. If 1617 * the buffer is an NFS buffer, it is tracking piecemeal writes or 1618 * the commit state and we cannot afford to lose the buffer. If the 1619 * buffer has a background write in progress, we need to keep it 1620 * around to prevent it from being reconstituted and starting a second 1621 * background write. 1622 */ 1623 if ((bp->b_flags & B_VMIO) 1624 && !(bp->b_vp->v_mount != NULL && 1625 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 1626 !vn_isdisk(bp->b_vp, NULL) && 1627 (bp->b_flags & B_DELWRI)) 1628 ) { 1629 1630 int i, j, resid; 1631 vm_page_t m; 1632 off_t foff; 1633 vm_pindex_t poff; 1634 vm_object_t obj; 1635 1636 obj = bp->b_bufobj->bo_object; 1637 1638 /* 1639 * Get the base offset and length of the buffer. Note that 1640 * in the VMIO case if the buffer block size is not 1641 * page-aligned then b_data pointer may not be page-aligned. 1642 * But our b_pages[] array *IS* page aligned. 1643 * 1644 * block sizes less then DEV_BSIZE (usually 512) are not 1645 * supported due to the page granularity bits (m->valid, 1646 * m->dirty, etc...). 1647 * 1648 * See man buf(9) for more information 1649 */ 1650 resid = bp->b_bufsize; 1651 foff = bp->b_offset; 1652 for (i = 0; i < bp->b_npages; i++) { 1653 int had_bogus = 0; 1654 1655 m = bp->b_pages[i]; 1656 1657 /* 1658 * If we hit a bogus page, fixup *all* the bogus pages 1659 * now. 1660 */ 1661 if (m == bogus_page) { 1662 poff = OFF_TO_IDX(bp->b_offset); 1663 had_bogus = 1; 1664 1665 VM_OBJECT_RLOCK(obj); 1666 for (j = i; j < bp->b_npages; j++) { 1667 vm_page_t mtmp; 1668 mtmp = bp->b_pages[j]; 1669 if (mtmp == bogus_page) { 1670 mtmp = vm_page_lookup(obj, poff + j); 1671 if (!mtmp) { 1672 panic("brelse: page missing\n"); 1673 } 1674 bp->b_pages[j] = mtmp; 1675 } 1676 } 1677 VM_OBJECT_RUNLOCK(obj); 1678 1679 if ((bp->b_flags & (B_INVAL | B_UNMAPPED)) == 0) { 1680 BUF_CHECK_MAPPED(bp); 1681 pmap_qenter( 1682 trunc_page((vm_offset_t)bp->b_data), 1683 bp->b_pages, bp->b_npages); 1684 } 1685 m = bp->b_pages[i]; 1686 } 1687 if ((bp->b_flags & B_NOCACHE) || 1688 (bp->b_ioflags & BIO_ERROR && 1689 bp->b_iocmd == BIO_READ)) { 1690 int poffset = foff & PAGE_MASK; 1691 int presid = resid > (PAGE_SIZE - poffset) ? 1692 (PAGE_SIZE - poffset) : resid; 1693 1694 KASSERT(presid >= 0, ("brelse: extra page")); 1695 VM_OBJECT_WLOCK(obj); 1696 while (vm_page_xbusied(m)) { 1697 vm_page_lock(m); 1698 VM_OBJECT_WUNLOCK(obj); 1699 vm_page_busy_sleep(m, "mbncsh"); 1700 VM_OBJECT_WLOCK(obj); 1701 } 1702 if (pmap_page_wired_mappings(m) == 0) 1703 vm_page_set_invalid(m, poffset, presid); 1704 VM_OBJECT_WUNLOCK(obj); 1705 if (had_bogus) 1706 printf("avoided corruption bug in bogus_page/brelse code\n"); 1707 } 1708 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1709 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1710 } 1711 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1712 vfs_vmio_release(bp); 1713 1714 } else if (bp->b_flags & B_VMIO) { 1715 1716 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1717 vfs_vmio_release(bp); 1718 } 1719 1720 } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) { 1721 if (bp->b_bufsize != 0) 1722 allocbuf(bp, 0); 1723 if (bp->b_vp != NULL) 1724 brelvp(bp); 1725 } 1726 1727 /* 1728 * If the buffer has junk contents signal it and eventually 1729 * clean up B_DELWRI and diassociate the vnode so that gbincore() 1730 * doesn't find it. 1731 */ 1732 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 || 1733 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0) 1734 bp->b_flags |= B_INVAL; 1735 if (bp->b_flags & B_INVAL) { 1736 if (bp->b_flags & B_DELWRI) 1737 bundirty(bp); 1738 if (bp->b_vp) 1739 brelvp(bp); 1740 } 1741 1742 /* buffers with no memory */ 1743 if (bp->b_bufsize == 0) { 1744 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1745 if (bp->b_vflags & BV_BKGRDINPROG) 1746 panic("losing buffer 1"); 1747 if (bp->b_kvasize) 1748 qindex = QUEUE_EMPTYKVA; 1749 else 1750 qindex = QUEUE_EMPTY; 1751 bp->b_flags |= B_AGE; 1752 /* buffers with junk contents */ 1753 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || 1754 (bp->b_ioflags & BIO_ERROR)) { 1755 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1756 if (bp->b_vflags & BV_BKGRDINPROG) 1757 panic("losing buffer 2"); 1758 qindex = QUEUE_CLEAN; 1759 bp->b_flags |= B_AGE; 1760 /* remaining buffers */ 1761 } else if (bp->b_flags & B_DELWRI) 1762 qindex = QUEUE_DIRTY; 1763 else 1764 qindex = QUEUE_CLEAN; 1765 1766 binsfree(bp, qindex); 1767 1768 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT); 1769 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 1770 panic("brelse: not dirty"); 1771 /* unlock */ 1772 BUF_UNLOCK(bp); 1773} 1774 1775/* 1776 * Release a buffer back to the appropriate queue but do not try to free 1777 * it. The buffer is expected to be used again soon. 1778 * 1779 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1780 * biodone() to requeue an async I/O on completion. It is also used when 1781 * known good buffers need to be requeued but we think we may need the data 1782 * again soon. 1783 * 1784 * XXX we should be able to leave the B_RELBUF hint set on completion. 1785 */ 1786void 1787bqrelse(struct buf *bp) 1788{ 1789 int qindex; 1790 1791 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1792 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1793 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1794 1795 if (BUF_LOCKRECURSED(bp)) { 1796 /* do not release to free list */ 1797 BUF_UNLOCK(bp); 1798 return; 1799 } 1800 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1801 1802 if (bp->b_flags & B_MANAGED) { 1803 if (bp->b_flags & B_REMFREE) 1804 bremfreef(bp); 1805 goto out; 1806 } 1807 1808 /* buffers with stale but valid contents */ 1809 if (bp->b_flags & B_DELWRI) { 1810 qindex = QUEUE_DIRTY; 1811 } else { 1812 if ((bp->b_flags & B_DELWRI) == 0 && 1813 (bp->b_xflags & BX_VNDIRTY)) 1814 panic("bqrelse: not dirty"); 1815 /* 1816 * BKGRDINPROG can only be set with the buf and bufobj 1817 * locks both held. We tolerate a race to clear it here. 1818 */ 1819 if (buf_vm_page_count_severe() && 1820 (bp->b_vflags & BV_BKGRDINPROG) == 0) { 1821 /* 1822 * We are too low on memory, we have to try to free 1823 * the buffer (most importantly: the wired pages 1824 * making up its backing store) *now*. 1825 */ 1826 brelse(bp); 1827 return; 1828 } 1829 qindex = QUEUE_CLEAN; 1830 } 1831 binsfree(bp, qindex); 1832 1833out: 1834 /* unlock */ 1835 BUF_UNLOCK(bp); 1836} 1837 1838/* Give pages used by the bp back to the VM system (where possible) */ 1839static void 1840vfs_vmio_release(struct buf *bp) 1841{ 1842 int i; 1843 vm_page_t m; 1844 1845 if ((bp->b_flags & B_UNMAPPED) == 0) { 1846 BUF_CHECK_MAPPED(bp); 1847 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages); 1848 } else 1849 BUF_CHECK_UNMAPPED(bp); 1850 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 1851 for (i = 0; i < bp->b_npages; i++) { 1852 m = bp->b_pages[i]; 1853 bp->b_pages[i] = NULL; 1854 /* 1855 * In order to keep page LRU ordering consistent, put 1856 * everything on the inactive queue. 1857 */ 1858 vm_page_lock(m); 1859 vm_page_unwire(m, 0); 1860 1861 /* 1862 * Might as well free the page if we can and it has 1863 * no valid data. We also free the page if the 1864 * buffer was used for direct I/O 1865 */ 1866 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) { 1867 if (m->wire_count == 0 && !vm_page_busied(m)) 1868 vm_page_free(m); 1869 } else if (bp->b_flags & B_DIRECT) 1870 vm_page_try_to_free(m); 1871 else if (buf_vm_page_count_severe()) 1872 vm_page_try_to_cache(m); 1873 vm_page_unlock(m); 1874 } 1875 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 1876 1877 if (bp->b_bufsize) { 1878 bufspacewakeup(); 1879 bp->b_bufsize = 0; 1880 } 1881 bp->b_npages = 0; 1882 bp->b_flags &= ~B_VMIO; 1883 if (bp->b_vp) 1884 brelvp(bp); 1885} 1886 1887/* 1888 * Check to see if a block at a particular lbn is available for a clustered 1889 * write. 1890 */ 1891static int 1892vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno) 1893{ 1894 struct buf *bpa; 1895 int match; 1896 1897 match = 0; 1898 1899 /* If the buf isn't in core skip it */ 1900 if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL) 1901 return (0); 1902 1903 /* If the buf is busy we don't want to wait for it */ 1904 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 1905 return (0); 1906 1907 /* Only cluster with valid clusterable delayed write buffers */ 1908 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) != 1909 (B_DELWRI | B_CLUSTEROK)) 1910 goto done; 1911 1912 if (bpa->b_bufsize != size) 1913 goto done; 1914 1915 /* 1916 * Check to see if it is in the expected place on disk and that the 1917 * block has been mapped. 1918 */ 1919 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno)) 1920 match = 1; 1921done: 1922 BUF_UNLOCK(bpa); 1923 return (match); 1924} 1925 1926/* 1927 * vfs_bio_awrite: 1928 * 1929 * Implement clustered async writes for clearing out B_DELWRI buffers. 1930 * This is much better then the old way of writing only one buffer at 1931 * a time. Note that we may not be presented with the buffers in the 1932 * correct order, so we search for the cluster in both directions. 1933 */ 1934int 1935vfs_bio_awrite(struct buf *bp) 1936{ 1937 struct bufobj *bo; 1938 int i; 1939 int j; 1940 daddr_t lblkno = bp->b_lblkno; 1941 struct vnode *vp = bp->b_vp; 1942 int ncl; 1943 int nwritten; 1944 int size; 1945 int maxcl; 1946 int gbflags; 1947 1948 bo = &vp->v_bufobj; 1949 gbflags = (bp->b_flags & B_UNMAPPED) != 0 ? GB_UNMAPPED : 0; 1950 /* 1951 * right now we support clustered writing only to regular files. If 1952 * we find a clusterable block we could be in the middle of a cluster 1953 * rather then at the beginning. 1954 */ 1955 if ((vp->v_type == VREG) && 1956 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1957 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1958 1959 size = vp->v_mount->mnt_stat.f_iosize; 1960 maxcl = MAXPHYS / size; 1961 1962 BO_RLOCK(bo); 1963 for (i = 1; i < maxcl; i++) 1964 if (vfs_bio_clcheck(vp, size, lblkno + i, 1965 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0) 1966 break; 1967 1968 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 1969 if (vfs_bio_clcheck(vp, size, lblkno - j, 1970 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0) 1971 break; 1972 BO_RUNLOCK(bo); 1973 --j; 1974 ncl = i + j; 1975 /* 1976 * this is a possible cluster write 1977 */ 1978 if (ncl != 1) { 1979 BUF_UNLOCK(bp); 1980 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl, 1981 gbflags); 1982 return (nwritten); 1983 } 1984 } 1985 bremfree(bp); 1986 bp->b_flags |= B_ASYNC; 1987 /* 1988 * default (old) behavior, writing out only one block 1989 * 1990 * XXX returns b_bufsize instead of b_bcount for nwritten? 1991 */ 1992 nwritten = bp->b_bufsize; 1993 (void) bwrite(bp); 1994 1995 return (nwritten); 1996} 1997 1998static void 1999setbufkva(struct buf *bp, vm_offset_t addr, int maxsize, int gbflags) 2000{ 2001 2002 KASSERT((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 && 2003 bp->b_kvasize == 0, ("call bfreekva(%p)", bp)); 2004 if ((gbflags & GB_UNMAPPED) == 0) { 2005 bp->b_kvabase = (caddr_t)addr; 2006 } else if ((gbflags & GB_KVAALLOC) != 0) { 2007 KASSERT((gbflags & GB_UNMAPPED) != 0, 2008 ("GB_KVAALLOC without GB_UNMAPPED")); 2009 bp->b_kvaalloc = (caddr_t)addr; 2010 bp->b_flags |= B_UNMAPPED | B_KVAALLOC; 2011 atomic_add_long(&unmapped_bufspace, bp->b_kvasize); 2012 } 2013 bp->b_kvasize = maxsize; 2014} 2015 2016/* 2017 * Allocate the buffer KVA and set b_kvasize. Also set b_kvabase if 2018 * needed. 2019 */ 2020static int 2021allocbufkva(struct buf *bp, int maxsize, int gbflags) 2022{ 2023 vm_offset_t addr; 2024 2025 bfreekva(bp); 2026 addr = 0; 2027 2028 if (vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr)) { 2029 /* 2030 * Buffer map is too fragmented. Request the caller 2031 * to defragment the map. 2032 */ 2033 atomic_add_int(&bufdefragcnt, 1); 2034 return (1); 2035 } 2036 setbufkva(bp, addr, maxsize, gbflags); 2037 atomic_add_long(&bufspace, bp->b_kvasize); 2038 return (0); 2039} 2040 2041/* 2042 * Ask the bufdaemon for help, or act as bufdaemon itself, when a 2043 * locked vnode is supplied. 2044 */ 2045static void 2046getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo, 2047 int defrag) 2048{ 2049 struct thread *td; 2050 char *waitmsg; 2051 int cnt, error, flags, norunbuf, wait; 2052 2053 mtx_assert(&bqclean, MA_OWNED); 2054 2055 if (defrag) { 2056 flags = VFS_BIO_NEED_BUFSPACE; 2057 waitmsg = "nbufkv"; 2058 } else if (bufspace >= hibufspace) { 2059 waitmsg = "nbufbs"; 2060 flags = VFS_BIO_NEED_BUFSPACE; 2061 } else { 2062 waitmsg = "newbuf"; 2063 flags = VFS_BIO_NEED_ANY; 2064 } 2065 mtx_lock(&nblock); 2066 needsbuffer |= flags; 2067 mtx_unlock(&nblock); 2068 mtx_unlock(&bqclean); 2069 2070 bd_speedup(); /* heeeelp */ 2071 if ((gbflags & GB_NOWAIT_BD) != 0) 2072 return; 2073 2074 td = curthread; 2075 cnt = 0; 2076 wait = MNT_NOWAIT; 2077 mtx_lock(&nblock); 2078 while (needsbuffer & flags) { 2079 if (vp != NULL && (td->td_pflags & TDP_BUFNEED) == 0) { 2080 mtx_unlock(&nblock); 2081 2082 /* 2083 * getblk() is called with a vnode locked, and 2084 * some majority of the dirty buffers may as 2085 * well belong to the vnode. Flushing the 2086 * buffers there would make a progress that 2087 * cannot be achieved by the buf_daemon, that 2088 * cannot lock the vnode. 2089 */ 2090 if (cnt++ > 2) 2091 wait = MNT_WAIT; 2092 ASSERT_VOP_LOCKED(vp, "bufd_helper"); 2093 error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 : 2094 vn_lock(vp, LK_TRYUPGRADE); 2095 if (error == 0) { 2096 /* play bufdaemon */ 2097 norunbuf = curthread_pflags_set(TDP_BUFNEED | 2098 TDP_NORUNNINGBUF); 2099 VOP_FSYNC(vp, wait, td); 2100 atomic_add_long(¬bufdflushes, 1); 2101 curthread_pflags_restore(norunbuf); 2102 } 2103 mtx_lock(&nblock); 2104 if ((needsbuffer & flags) == 0) 2105 break; 2106 } 2107 if (msleep(&needsbuffer, &nblock, (PRIBIO + 4) | slpflag, 2108 waitmsg, slptimeo)) 2109 break; 2110 } 2111 mtx_unlock(&nblock); 2112} 2113 2114static void 2115getnewbuf_reuse_bp(struct buf *bp, int qindex) 2116{ 2117 2118 CTR6(KTR_BUF, "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d " 2119 "queue %d (recycling)", bp, bp->b_vp, bp->b_flags, 2120 bp->b_kvasize, bp->b_bufsize, qindex); 2121 mtx_assert(&bqclean, MA_NOTOWNED); 2122 2123 /* 2124 * Note: we no longer distinguish between VMIO and non-VMIO 2125 * buffers. 2126 */ 2127 KASSERT((bp->b_flags & B_DELWRI) == 0, 2128 ("delwri buffer %p found in queue %d", bp, qindex)); 2129 2130 if (qindex == QUEUE_CLEAN) { 2131 if (bp->b_flags & B_VMIO) { 2132 bp->b_flags &= ~B_ASYNC; 2133 vfs_vmio_release(bp); 2134 } 2135 if (bp->b_vp != NULL) 2136 brelvp(bp); 2137 } 2138 2139 /* 2140 * Get the rest of the buffer freed up. b_kva* is still valid 2141 * after this operation. 2142 */ 2143 2144 if (bp->b_rcred != NOCRED) { 2145 crfree(bp->b_rcred); 2146 bp->b_rcred = NOCRED; 2147 } 2148 if (bp->b_wcred != NOCRED) { 2149 crfree(bp->b_wcred); 2150 bp->b_wcred = NOCRED; 2151 } 2152 if (!LIST_EMPTY(&bp->b_dep)) 2153 buf_deallocate(bp); 2154 if (bp->b_vflags & BV_BKGRDINPROG) 2155 panic("losing buffer 3"); 2156 KASSERT(bp->b_vp == NULL, ("bp: %p still has vnode %p. qindex: %d", 2157 bp, bp->b_vp, qindex)); 2158 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0, 2159 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags)); 2160 2161 if (bp->b_bufsize) 2162 allocbuf(bp, 0); 2163 2164 bp->b_flags &= B_UNMAPPED | B_KVAALLOC; 2165 bp->b_ioflags = 0; 2166 bp->b_xflags = 0; 2167 KASSERT((bp->b_flags & B_INFREECNT) == 0, 2168 ("buf %p still counted as free?", bp)); 2169 bp->b_vflags = 0; 2170 bp->b_vp = NULL; 2171 bp->b_blkno = bp->b_lblkno = 0; 2172 bp->b_offset = NOOFFSET; 2173 bp->b_iodone = 0; 2174 bp->b_error = 0; 2175 bp->b_resid = 0; 2176 bp->b_bcount = 0; 2177 bp->b_npages = 0; 2178 bp->b_dirtyoff = bp->b_dirtyend = 0; 2179 bp->b_bufobj = NULL; 2180 bp->b_pin_count = 0; 2181 bp->b_fsprivate1 = NULL; 2182 bp->b_fsprivate2 = NULL; 2183 bp->b_fsprivate3 = NULL; 2184 2185 LIST_INIT(&bp->b_dep); 2186} 2187 2188static int flushingbufs; 2189 2190static struct buf * 2191getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata) 2192{ 2193 struct buf *bp, *nbp; 2194 int nqindex, qindex, pass; 2195 2196 KASSERT(!unmapped || !defrag, ("both unmapped and defrag")); 2197 2198 pass = 1; 2199restart: 2200 atomic_add_int(&getnewbufrestarts, 1); 2201 2202 /* 2203 * Setup for scan. If we do not have enough free buffers, 2204 * we setup a degenerate case that immediately fails. Note 2205 * that if we are specially marked process, we are allowed to 2206 * dip into our reserves. 2207 * 2208 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 2209 * for the allocation of the mapped buffer. For unmapped, the 2210 * easiest is to start with EMPTY outright. 2211 * 2212 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 2213 * However, there are a number of cases (defragging, reusing, ...) 2214 * where we cannot backup. 2215 */ 2216 nbp = NULL; 2217 mtx_lock(&bqclean); 2218 if (!defrag && unmapped) { 2219 nqindex = QUEUE_EMPTY; 2220 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 2221 } 2222 if (nbp == NULL) { 2223 nqindex = QUEUE_EMPTYKVA; 2224 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 2225 } 2226 2227 /* 2228 * If no EMPTYKVA buffers and we are either defragging or 2229 * reusing, locate a CLEAN buffer to free or reuse. If 2230 * bufspace useage is low skip this step so we can allocate a 2231 * new buffer. 2232 */ 2233 if (nbp == NULL && (defrag || bufspace >= lobufspace)) { 2234 nqindex = QUEUE_CLEAN; 2235 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2236 } 2237 2238 /* 2239 * If we could not find or were not allowed to reuse a CLEAN 2240 * buffer, check to see if it is ok to use an EMPTY buffer. 2241 * We can only use an EMPTY buffer if allocating its KVA would 2242 * not otherwise run us out of buffer space. No KVA is needed 2243 * for the unmapped allocation. 2244 */ 2245 if (nbp == NULL && defrag == 0 && (bufspace + maxsize < hibufspace || 2246 metadata)) { 2247 nqindex = QUEUE_EMPTY; 2248 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 2249 } 2250 2251 /* 2252 * All available buffers might be clean, retry ignoring the 2253 * lobufspace as the last resort. 2254 */ 2255 if (nbp == NULL && !TAILQ_EMPTY(&bufqueues[QUEUE_CLEAN])) { 2256 nqindex = QUEUE_CLEAN; 2257 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2258 } 2259 2260 /* 2261 * Run scan, possibly freeing data and/or kva mappings on the fly 2262 * depending. 2263 */ 2264 while ((bp = nbp) != NULL) { 2265 qindex = nqindex; 2266 2267 /* 2268 * Calculate next bp (we can only use it if we do not 2269 * block or do other fancy things). 2270 */ 2271 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 2272 switch (qindex) { 2273 case QUEUE_EMPTY: 2274 nqindex = QUEUE_EMPTYKVA; 2275 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 2276 if (nbp != NULL) 2277 break; 2278 /* FALLTHROUGH */ 2279 case QUEUE_EMPTYKVA: 2280 nqindex = QUEUE_CLEAN; 2281 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2282 if (nbp != NULL) 2283 break; 2284 /* FALLTHROUGH */ 2285 case QUEUE_CLEAN: 2286 if (metadata && pass == 1) { 2287 pass = 2; 2288 nqindex = QUEUE_EMPTY; 2289 nbp = TAILQ_FIRST( 2290 &bufqueues[QUEUE_EMPTY]); 2291 } 2292 /* 2293 * nbp is NULL. 2294 */ 2295 break; 2296 } 2297 } 2298 /* 2299 * If we are defragging then we need a buffer with 2300 * b_kvasize != 0. XXX this situation should no longer 2301 * occur, if defrag is non-zero the buffer's b_kvasize 2302 * should also be non-zero at this point. XXX 2303 */ 2304 if (defrag && bp->b_kvasize == 0) { 2305 printf("Warning: defrag empty buffer %p\n", bp); 2306 continue; 2307 } 2308 2309 /* 2310 * Start freeing the bp. This is somewhat involved. nbp 2311 * remains valid only for QUEUE_EMPTY[KVA] bp's. 2312 */ 2313 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2314 continue; 2315 /* 2316 * BKGRDINPROG can only be set with the buf and bufobj 2317 * locks both held. We tolerate a race to clear it here. 2318 */ 2319 if (bp->b_vflags & BV_BKGRDINPROG) { 2320 BUF_UNLOCK(bp); 2321 continue; 2322 } 2323 2324 KASSERT(bp->b_qindex == qindex, 2325 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 2326 2327 bremfreel(bp); 2328 mtx_unlock(&bqclean); 2329 /* 2330 * NOTE: nbp is now entirely invalid. We can only restart 2331 * the scan from this point on. 2332 */ 2333 2334 getnewbuf_reuse_bp(bp, qindex); 2335 mtx_assert(&bqclean, MA_NOTOWNED); 2336 2337 /* 2338 * If we are defragging then free the buffer. 2339 */ 2340 if (defrag) { 2341 bp->b_flags |= B_INVAL; 2342 bfreekva(bp); 2343 brelse(bp); 2344 defrag = 0; 2345 goto restart; 2346 } 2347 2348 /* 2349 * Notify any waiters for the buffer lock about 2350 * identity change by freeing the buffer. 2351 */ 2352 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) { 2353 bp->b_flags |= B_INVAL; 2354 bfreekva(bp); 2355 brelse(bp); 2356 goto restart; 2357 } 2358 2359 if (metadata) 2360 break; 2361 2362 /* 2363 * If we are overcomitted then recover the buffer and its 2364 * KVM space. This occurs in rare situations when multiple 2365 * processes are blocked in getnewbuf() or allocbuf(). 2366 */ 2367 if (bufspace >= hibufspace) 2368 flushingbufs = 1; 2369 if (flushingbufs && bp->b_kvasize != 0) { 2370 bp->b_flags |= B_INVAL; 2371 bfreekva(bp); 2372 brelse(bp); 2373 goto restart; 2374 } 2375 if (bufspace < lobufspace) 2376 flushingbufs = 0; 2377 break; 2378 } 2379 return (bp); 2380} 2381 2382/* 2383 * getnewbuf: 2384 * 2385 * Find and initialize a new buffer header, freeing up existing buffers 2386 * in the bufqueues as necessary. The new buffer is returned locked. 2387 * 2388 * Important: B_INVAL is not set. If the caller wishes to throw the 2389 * buffer away, the caller must set B_INVAL prior to calling brelse(). 2390 * 2391 * We block if: 2392 * We have insufficient buffer headers 2393 * We have insufficient buffer space 2394 * buffer_arena is too fragmented ( space reservation fails ) 2395 * If we have to flush dirty buffers ( but we try to avoid this ) 2396 */ 2397static struct buf * 2398getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize, 2399 int gbflags) 2400{ 2401 struct buf *bp; 2402 int defrag, metadata; 2403 2404 KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 2405 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 2406 if (!unmapped_buf_allowed) 2407 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC); 2408 2409 defrag = 0; 2410 if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 || 2411 vp->v_type == VCHR) 2412 metadata = 1; 2413 else 2414 metadata = 0; 2415 /* 2416 * We can't afford to block since we might be holding a vnode lock, 2417 * which may prevent system daemons from running. We deal with 2418 * low-memory situations by proactively returning memory and running 2419 * async I/O rather then sync I/O. 2420 */ 2421 atomic_add_int(&getnewbufcalls, 1); 2422 atomic_subtract_int(&getnewbufrestarts, 1); 2423restart: 2424 bp = getnewbuf_scan(maxsize, defrag, (gbflags & (GB_UNMAPPED | 2425 GB_KVAALLOC)) == GB_UNMAPPED, metadata); 2426 if (bp != NULL) 2427 defrag = 0; 2428 2429 /* 2430 * If we exhausted our list, sleep as appropriate. We may have to 2431 * wakeup various daemons and write out some dirty buffers. 2432 * 2433 * Generally we are sleeping due to insufficient buffer space. 2434 */ 2435 if (bp == NULL) { 2436 mtx_assert(&bqclean, MA_OWNED); 2437 getnewbuf_bufd_help(vp, gbflags, slpflag, slptimeo, defrag); 2438 mtx_assert(&bqclean, MA_NOTOWNED); 2439 } else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) { 2440 mtx_assert(&bqclean, MA_NOTOWNED); 2441 2442 bfreekva(bp); 2443 bp->b_flags |= B_UNMAPPED; 2444 bp->b_kvabase = bp->b_data = unmapped_buf; 2445 bp->b_kvasize = maxsize; 2446 atomic_add_long(&bufspace, bp->b_kvasize); 2447 atomic_add_long(&unmapped_bufspace, bp->b_kvasize); 2448 atomic_add_int(&bufreusecnt, 1); 2449 } else { 2450 mtx_assert(&bqclean, MA_NOTOWNED); 2451 2452 /* 2453 * We finally have a valid bp. We aren't quite out of the 2454 * woods, we still have to reserve kva space. In order 2455 * to keep fragmentation sane we only allocate kva in 2456 * BKVASIZE chunks. 2457 */ 2458 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 2459 2460 if (maxsize != bp->b_kvasize || (bp->b_flags & (B_UNMAPPED | 2461 B_KVAALLOC)) == B_UNMAPPED) { 2462 if (allocbufkva(bp, maxsize, gbflags)) { 2463 defrag = 1; 2464 bp->b_flags |= B_INVAL; 2465 brelse(bp); 2466 goto restart; 2467 } 2468 atomic_add_int(&bufreusecnt, 1); 2469 } else if ((bp->b_flags & B_KVAALLOC) != 0 && 2470 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 0) { 2471 /* 2472 * If the reused buffer has KVA allocated, 2473 * reassign b_kvaalloc to b_kvabase. 2474 */ 2475 bp->b_kvabase = bp->b_kvaalloc; 2476 bp->b_flags &= ~B_KVAALLOC; 2477 atomic_subtract_long(&unmapped_bufspace, 2478 bp->b_kvasize); 2479 atomic_add_int(&bufreusecnt, 1); 2480 } else if ((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 && 2481 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == (GB_UNMAPPED | 2482 GB_KVAALLOC)) { 2483 /* 2484 * The case of reused buffer already have KVA 2485 * mapped, but the request is for unmapped 2486 * buffer with KVA allocated. 2487 */ 2488 bp->b_kvaalloc = bp->b_kvabase; 2489 bp->b_data = bp->b_kvabase = unmapped_buf; 2490 bp->b_flags |= B_UNMAPPED | B_KVAALLOC; 2491 atomic_add_long(&unmapped_bufspace, 2492 bp->b_kvasize); 2493 atomic_add_int(&bufreusecnt, 1); 2494 } 2495 if ((gbflags & GB_UNMAPPED) == 0) { 2496 bp->b_saveaddr = bp->b_kvabase; 2497 bp->b_data = bp->b_saveaddr; 2498 bp->b_flags &= ~B_UNMAPPED; 2499 BUF_CHECK_MAPPED(bp); 2500 } 2501 } 2502 return (bp); 2503} 2504 2505/* 2506 * buf_daemon: 2507 * 2508 * buffer flushing daemon. Buffers are normally flushed by the 2509 * update daemon but if it cannot keep up this process starts to 2510 * take the load in an attempt to prevent getnewbuf() from blocking. 2511 */ 2512 2513static struct kproc_desc buf_kp = { 2514 "bufdaemon", 2515 buf_daemon, 2516 &bufdaemonproc 2517}; 2518SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp); 2519 2520static int 2521buf_flush(int target) 2522{ 2523 int flushed; 2524 2525 flushed = flushbufqueues(target, 0); 2526 if (flushed == 0) { 2527 /* 2528 * Could not find any buffers without rollback 2529 * dependencies, so just write the first one 2530 * in the hopes of eventually making progress. 2531 */ 2532 flushed = flushbufqueues(target, 1); 2533 } 2534 return (flushed); 2535} 2536 2537static void 2538buf_daemon() 2539{ 2540 int lodirty; 2541 2542 /* 2543 * This process needs to be suspended prior to shutdown sync. 2544 */ 2545 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc, 2546 SHUTDOWN_PRI_LAST); 2547 2548 /* 2549 * This process is allowed to take the buffer cache to the limit 2550 */ 2551 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED; 2552 mtx_lock(&bdlock); 2553 for (;;) { 2554 bd_request = 0; 2555 mtx_unlock(&bdlock); 2556 2557 kproc_suspend_check(bufdaemonproc); 2558 lodirty = lodirtybuffers; 2559 if (bd_speedupreq) { 2560 lodirty = numdirtybuffers / 2; 2561 bd_speedupreq = 0; 2562 } 2563 /* 2564 * Do the flush. Limit the amount of in-transit I/O we 2565 * allow to build up, otherwise we would completely saturate 2566 * the I/O system. 2567 */ 2568 while (numdirtybuffers > lodirty) { 2569 if (buf_flush(numdirtybuffers - lodirty) == 0) 2570 break; 2571 kern_yield(PRI_USER); 2572 } 2573 2574 /* 2575 * Only clear bd_request if we have reached our low water 2576 * mark. The buf_daemon normally waits 1 second and 2577 * then incrementally flushes any dirty buffers that have 2578 * built up, within reason. 2579 * 2580 * If we were unable to hit our low water mark and couldn't 2581 * find any flushable buffers, we sleep for a short period 2582 * to avoid endless loops on unlockable buffers. 2583 */ 2584 mtx_lock(&bdlock); 2585 if (numdirtybuffers <= lodirtybuffers) { 2586 /* 2587 * We reached our low water mark, reset the 2588 * request and sleep until we are needed again. 2589 * The sleep is just so the suspend code works. 2590 */ 2591 bd_request = 0; 2592 /* 2593 * Do an extra wakeup in case dirty threshold 2594 * changed via sysctl and the explicit transition 2595 * out of shortfall was missed. 2596 */ 2597 bdirtywakeup(); 2598 if (runningbufspace <= lorunningspace) 2599 runningwakeup(); 2600 msleep(&bd_request, &bdlock, PVM, "psleep", hz); 2601 } else { 2602 /* 2603 * We couldn't find any flushable dirty buffers but 2604 * still have too many dirty buffers, we 2605 * have to sleep and try again. (rare) 2606 */ 2607 msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10); 2608 } 2609 } 2610} 2611 2612/* 2613 * flushbufqueues: 2614 * 2615 * Try to flush a buffer in the dirty queue. We must be careful to 2616 * free up B_INVAL buffers instead of write them, which NFS is 2617 * particularly sensitive to. 2618 */ 2619static int flushwithdeps = 0; 2620SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps, 2621 0, "Number of buffers flushed with dependecies that require rollbacks"); 2622 2623static int 2624flushbufqueues(int target, int flushdeps) 2625{ 2626 struct buf *sentinel; 2627 struct vnode *vp; 2628 struct mount *mp; 2629 struct buf *bp; 2630 int hasdeps; 2631 int flushed; 2632 int queue; 2633 int error; 2634 2635 flushed = 0; 2636 queue = QUEUE_DIRTY; 2637 bp = NULL; 2638 sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO); 2639 sentinel->b_qindex = QUEUE_SENTINEL; 2640 mtx_lock(&bqdirty); 2641 TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist); 2642 mtx_unlock(&bqdirty); 2643 while (flushed != target) { 2644 maybe_yield(); 2645 mtx_lock(&bqdirty); 2646 bp = TAILQ_NEXT(sentinel, b_freelist); 2647 if (bp != NULL) { 2648 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2649 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel, 2650 b_freelist); 2651 } else { 2652 mtx_unlock(&bqdirty); 2653 break; 2654 } 2655 KASSERT(bp->b_qindex != QUEUE_SENTINEL, 2656 ("parallel calls to flushbufqueues() bp %p", bp)); 2657 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL); 2658 mtx_unlock(&bqdirty); 2659 if (error != 0) 2660 continue; 2661 if (bp->b_pin_count > 0) { 2662 BUF_UNLOCK(bp); 2663 continue; 2664 } 2665 /* 2666 * BKGRDINPROG can only be set with the buf and bufobj 2667 * locks both held. We tolerate a race to clear it here. 2668 */ 2669 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 || 2670 (bp->b_flags & B_DELWRI) == 0) { 2671 BUF_UNLOCK(bp); 2672 continue; 2673 } 2674 if (bp->b_flags & B_INVAL) { 2675 bremfreef(bp); 2676 brelse(bp); 2677 flushed++; 2678 continue; 2679 } 2680 2681 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) { 2682 if (flushdeps == 0) { 2683 BUF_UNLOCK(bp); 2684 continue; 2685 } 2686 hasdeps = 1; 2687 } else 2688 hasdeps = 0; 2689 /* 2690 * We must hold the lock on a vnode before writing 2691 * one of its buffers. Otherwise we may confuse, or 2692 * in the case of a snapshot vnode, deadlock the 2693 * system. 2694 * 2695 * The lock order here is the reverse of the normal 2696 * of vnode followed by buf lock. This is ok because 2697 * the NOWAIT will prevent deadlock. 2698 */ 2699 vp = bp->b_vp; 2700 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2701 BUF_UNLOCK(bp); 2702 continue; 2703 } 2704 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT); 2705 if (error == 0) { 2706 CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X", 2707 bp, bp->b_vp, bp->b_flags); 2708 vfs_bio_awrite(bp); 2709 vn_finished_write(mp); 2710 VOP_UNLOCK(vp, 0); 2711 flushwithdeps += hasdeps; 2712 flushed++; 2713 if (runningbufspace > hirunningspace) 2714 waitrunningbufspace(); 2715 continue; 2716 } 2717 vn_finished_write(mp); 2718 BUF_UNLOCK(bp); 2719 } 2720 mtx_lock(&bqdirty); 2721 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2722 mtx_unlock(&bqdirty); 2723 free(sentinel, M_TEMP); 2724 return (flushed); 2725} 2726 2727/* 2728 * Check to see if a block is currently memory resident. 2729 */ 2730struct buf * 2731incore(struct bufobj *bo, daddr_t blkno) 2732{ 2733 struct buf *bp; 2734 2735 BO_RLOCK(bo); 2736 bp = gbincore(bo, blkno); 2737 BO_RUNLOCK(bo); 2738 return (bp); 2739} 2740 2741/* 2742 * Returns true if no I/O is needed to access the 2743 * associated VM object. This is like incore except 2744 * it also hunts around in the VM system for the data. 2745 */ 2746 2747static int 2748inmem(struct vnode * vp, daddr_t blkno) 2749{ 2750 vm_object_t obj; 2751 vm_offset_t toff, tinc, size; 2752 vm_page_t m; 2753 vm_ooffset_t off; 2754 2755 ASSERT_VOP_LOCKED(vp, "inmem"); 2756 2757 if (incore(&vp->v_bufobj, blkno)) 2758 return 1; 2759 if (vp->v_mount == NULL) 2760 return 0; 2761 obj = vp->v_object; 2762 if (obj == NULL) 2763 return (0); 2764 2765 size = PAGE_SIZE; 2766 if (size > vp->v_mount->mnt_stat.f_iosize) 2767 size = vp->v_mount->mnt_stat.f_iosize; 2768 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 2769 2770 VM_OBJECT_RLOCK(obj); 2771 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2772 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 2773 if (!m) 2774 goto notinmem; 2775 tinc = size; 2776 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 2777 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 2778 if (vm_page_is_valid(m, 2779 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 2780 goto notinmem; 2781 } 2782 VM_OBJECT_RUNLOCK(obj); 2783 return 1; 2784 2785notinmem: 2786 VM_OBJECT_RUNLOCK(obj); 2787 return (0); 2788} 2789 2790/* 2791 * Set the dirty range for a buffer based on the status of the dirty 2792 * bits in the pages comprising the buffer. The range is limited 2793 * to the size of the buffer. 2794 * 2795 * Tell the VM system that the pages associated with this buffer 2796 * are clean. This is used for delayed writes where the data is 2797 * going to go to disk eventually without additional VM intevention. 2798 * 2799 * Note that while we only really need to clean through to b_bcount, we 2800 * just go ahead and clean through to b_bufsize. 2801 */ 2802static void 2803vfs_clean_pages_dirty_buf(struct buf *bp) 2804{ 2805 vm_ooffset_t foff, noff, eoff; 2806 vm_page_t m; 2807 int i; 2808 2809 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0) 2810 return; 2811 2812 foff = bp->b_offset; 2813 KASSERT(bp->b_offset != NOOFFSET, 2814 ("vfs_clean_pages_dirty_buf: no buffer offset")); 2815 2816 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 2817 vfs_drain_busy_pages(bp); 2818 vfs_setdirty_locked_object(bp); 2819 for (i = 0; i < bp->b_npages; i++) { 2820 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 2821 eoff = noff; 2822 if (eoff > bp->b_offset + bp->b_bufsize) 2823 eoff = bp->b_offset + bp->b_bufsize; 2824 m = bp->b_pages[i]; 2825 vfs_page_set_validclean(bp, foff, m); 2826 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 2827 foff = noff; 2828 } 2829 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 2830} 2831 2832static void 2833vfs_setdirty_locked_object(struct buf *bp) 2834{ 2835 vm_object_t object; 2836 int i; 2837 2838 object = bp->b_bufobj->bo_object; 2839 VM_OBJECT_ASSERT_WLOCKED(object); 2840 2841 /* 2842 * We qualify the scan for modified pages on whether the 2843 * object has been flushed yet. 2844 */ 2845 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) { 2846 vm_offset_t boffset; 2847 vm_offset_t eoffset; 2848 2849 /* 2850 * test the pages to see if they have been modified directly 2851 * by users through the VM system. 2852 */ 2853 for (i = 0; i < bp->b_npages; i++) 2854 vm_page_test_dirty(bp->b_pages[i]); 2855 2856 /* 2857 * Calculate the encompassing dirty range, boffset and eoffset, 2858 * (eoffset - boffset) bytes. 2859 */ 2860 2861 for (i = 0; i < bp->b_npages; i++) { 2862 if (bp->b_pages[i]->dirty) 2863 break; 2864 } 2865 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2866 2867 for (i = bp->b_npages - 1; i >= 0; --i) { 2868 if (bp->b_pages[i]->dirty) { 2869 break; 2870 } 2871 } 2872 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2873 2874 /* 2875 * Fit it to the buffer. 2876 */ 2877 2878 if (eoffset > bp->b_bcount) 2879 eoffset = bp->b_bcount; 2880 2881 /* 2882 * If we have a good dirty range, merge with the existing 2883 * dirty range. 2884 */ 2885 2886 if (boffset < eoffset) { 2887 if (bp->b_dirtyoff > boffset) 2888 bp->b_dirtyoff = boffset; 2889 if (bp->b_dirtyend < eoffset) 2890 bp->b_dirtyend = eoffset; 2891 } 2892 } 2893} 2894 2895/* 2896 * Allocate the KVA mapping for an existing buffer. It handles the 2897 * cases of both B_UNMAPPED buffer, and buffer with the preallocated 2898 * KVA which is not mapped (B_KVAALLOC). 2899 */ 2900static void 2901bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags) 2902{ 2903 struct buf *scratch_bp; 2904 int bsize, maxsize, need_mapping, need_kva; 2905 off_t offset; 2906 2907 need_mapping = (bp->b_flags & B_UNMAPPED) != 0 && 2908 (gbflags & GB_UNMAPPED) == 0; 2909 need_kva = (bp->b_flags & (B_KVAALLOC | B_UNMAPPED)) == B_UNMAPPED && 2910 (gbflags & GB_KVAALLOC) != 0; 2911 if (!need_mapping && !need_kva) 2912 return; 2913 2914 BUF_CHECK_UNMAPPED(bp); 2915 2916 if (need_mapping && (bp->b_flags & B_KVAALLOC) != 0) { 2917 /* 2918 * Buffer is not mapped, but the KVA was already 2919 * reserved at the time of the instantiation. Use the 2920 * allocated space. 2921 */ 2922 bp->b_flags &= ~B_KVAALLOC; 2923 KASSERT(bp->b_kvaalloc != 0, ("kvaalloc == 0")); 2924 bp->b_kvabase = bp->b_kvaalloc; 2925 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize); 2926 goto has_addr; 2927 } 2928 2929 /* 2930 * Calculate the amount of the address space we would reserve 2931 * if the buffer was mapped. 2932 */ 2933 bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize; 2934 offset = blkno * bsize; 2935 maxsize = size + (offset & PAGE_MASK); 2936 maxsize = imax(maxsize, bsize); 2937 2938mapping_loop: 2939 if (allocbufkva(bp, maxsize, gbflags)) { 2940 /* 2941 * Request defragmentation. getnewbuf() returns us the 2942 * allocated space by the scratch buffer KVA. 2943 */ 2944 scratch_bp = getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags | 2945 (GB_UNMAPPED | GB_KVAALLOC)); 2946 if (scratch_bp == NULL) { 2947 if ((gbflags & GB_NOWAIT_BD) != 0) { 2948 /* 2949 * XXXKIB: defragmentation cannot 2950 * succeed, not sure what else to do. 2951 */ 2952 panic("GB_NOWAIT_BD and B_UNMAPPED %p", bp); 2953 } 2954 atomic_add_int(&mappingrestarts, 1); 2955 goto mapping_loop; 2956 } 2957 KASSERT((scratch_bp->b_flags & B_KVAALLOC) != 0, 2958 ("scratch bp !B_KVAALLOC %p", scratch_bp)); 2959 setbufkva(bp, (vm_offset_t)scratch_bp->b_kvaalloc, 2960 scratch_bp->b_kvasize, gbflags); 2961 2962 /* Get rid of the scratch buffer. */ 2963 scratch_bp->b_kvasize = 0; 2964 scratch_bp->b_flags |= B_INVAL; 2965 scratch_bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC); 2966 brelse(scratch_bp); 2967 } 2968 if (!need_mapping) 2969 return; 2970 2971has_addr: 2972 bp->b_saveaddr = bp->b_kvabase; 2973 bp->b_data = bp->b_saveaddr; /* b_offset is handled by bpmap_qenter */ 2974 bp->b_flags &= ~B_UNMAPPED; 2975 BUF_CHECK_MAPPED(bp); 2976 bpmap_qenter(bp); 2977} 2978 2979/* 2980 * getblk: 2981 * 2982 * Get a block given a specified block and offset into a file/device. 2983 * The buffers B_DONE bit will be cleared on return, making it almost 2984 * ready for an I/O initiation. B_INVAL may or may not be set on 2985 * return. The caller should clear B_INVAL prior to initiating a 2986 * READ. 2987 * 2988 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2989 * an existing buffer. 2990 * 2991 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2992 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2993 * and then cleared based on the backing VM. If the previous buffer is 2994 * non-0-sized but invalid, B_CACHE will be cleared. 2995 * 2996 * If getblk() must create a new buffer, the new buffer is returned with 2997 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2998 * case it is returned with B_INVAL clear and B_CACHE set based on the 2999 * backing VM. 3000 * 3001 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 3002 * B_CACHE bit is clear. 3003 * 3004 * What this means, basically, is that the caller should use B_CACHE to 3005 * determine whether the buffer is fully valid or not and should clear 3006 * B_INVAL prior to issuing a read. If the caller intends to validate 3007 * the buffer by loading its data area with something, the caller needs 3008 * to clear B_INVAL. If the caller does this without issuing an I/O, 3009 * the caller should set B_CACHE ( as an optimization ), else the caller 3010 * should issue the I/O and biodone() will set B_CACHE if the I/O was 3011 * a write attempt or if it was a successfull read. If the caller 3012 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR 3013 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 3014 */ 3015struct buf * 3016getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo, 3017 int flags) 3018{ 3019 struct buf *bp; 3020 struct bufobj *bo; 3021 int bsize, error, maxsize, vmio; 3022 off_t offset; 3023 3024 CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size); 3025 KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 3026 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 3027 ASSERT_VOP_LOCKED(vp, "getblk"); 3028 if (size > MAXBSIZE) 3029 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 3030 if (!unmapped_buf_allowed) 3031 flags &= ~(GB_UNMAPPED | GB_KVAALLOC); 3032 3033 bo = &vp->v_bufobj; 3034loop: 3035 BO_RLOCK(bo); 3036 bp = gbincore(bo, blkno); 3037 if (bp != NULL) { 3038 int lockflags; 3039 /* 3040 * Buffer is in-core. If the buffer is not busy nor managed, 3041 * it must be on a queue. 3042 */ 3043 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK; 3044 3045 if (flags & GB_LOCK_NOWAIT) 3046 lockflags |= LK_NOWAIT; 3047 3048 error = BUF_TIMELOCK(bp, lockflags, 3049 BO_LOCKPTR(bo), "getblk", slpflag, slptimeo); 3050 3051 /* 3052 * If we slept and got the lock we have to restart in case 3053 * the buffer changed identities. 3054 */ 3055 if (error == ENOLCK) 3056 goto loop; 3057 /* We timed out or were interrupted. */ 3058 else if (error) 3059 return (NULL); 3060 /* If recursed, assume caller knows the rules. */ 3061 else if (BUF_LOCKRECURSED(bp)) 3062 goto end; 3063 3064 /* 3065 * The buffer is locked. B_CACHE is cleared if the buffer is 3066 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set 3067 * and for a VMIO buffer B_CACHE is adjusted according to the 3068 * backing VM cache. 3069 */ 3070 if (bp->b_flags & B_INVAL) 3071 bp->b_flags &= ~B_CACHE; 3072 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 3073 bp->b_flags |= B_CACHE; 3074 if (bp->b_flags & B_MANAGED) 3075 MPASS(bp->b_qindex == QUEUE_NONE); 3076 else 3077 bremfree(bp); 3078 3079 /* 3080 * check for size inconsistencies for non-VMIO case. 3081 */ 3082 if (bp->b_bcount != size) { 3083 if ((bp->b_flags & B_VMIO) == 0 || 3084 (size > bp->b_kvasize)) { 3085 if (bp->b_flags & B_DELWRI) { 3086 /* 3087 * If buffer is pinned and caller does 3088 * not want sleep waiting for it to be 3089 * unpinned, bail out 3090 * */ 3091 if (bp->b_pin_count > 0) { 3092 if (flags & GB_LOCK_NOWAIT) { 3093 bqrelse(bp); 3094 return (NULL); 3095 } else { 3096 bunpin_wait(bp); 3097 } 3098 } 3099 bp->b_flags |= B_NOCACHE; 3100 bwrite(bp); 3101 } else { 3102 if (LIST_EMPTY(&bp->b_dep)) { 3103 bp->b_flags |= B_RELBUF; 3104 brelse(bp); 3105 } else { 3106 bp->b_flags |= B_NOCACHE; 3107 bwrite(bp); 3108 } 3109 } 3110 goto loop; 3111 } 3112 } 3113 3114 /* 3115 * Handle the case of unmapped buffer which should 3116 * become mapped, or the buffer for which KVA 3117 * reservation is requested. 3118 */ 3119 bp_unmapped_get_kva(bp, blkno, size, flags); 3120 3121 /* 3122 * If the size is inconsistant in the VMIO case, we can resize 3123 * the buffer. This might lead to B_CACHE getting set or 3124 * cleared. If the size has not changed, B_CACHE remains 3125 * unchanged from its previous state. 3126 */ 3127 if (bp->b_bcount != size) 3128 allocbuf(bp, size); 3129 3130 KASSERT(bp->b_offset != NOOFFSET, 3131 ("getblk: no buffer offset")); 3132 3133 /* 3134 * A buffer with B_DELWRI set and B_CACHE clear must 3135 * be committed before we can return the buffer in 3136 * order to prevent the caller from issuing a read 3137 * ( due to B_CACHE not being set ) and overwriting 3138 * it. 3139 * 3140 * Most callers, including NFS and FFS, need this to 3141 * operate properly either because they assume they 3142 * can issue a read if B_CACHE is not set, or because 3143 * ( for example ) an uncached B_DELWRI might loop due 3144 * to softupdates re-dirtying the buffer. In the latter 3145 * case, B_CACHE is set after the first write completes, 3146 * preventing further loops. 3147 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 3148 * above while extending the buffer, we cannot allow the 3149 * buffer to remain with B_CACHE set after the write 3150 * completes or it will represent a corrupt state. To 3151 * deal with this we set B_NOCACHE to scrap the buffer 3152 * after the write. 3153 * 3154 * We might be able to do something fancy, like setting 3155 * B_CACHE in bwrite() except if B_DELWRI is already set, 3156 * so the below call doesn't set B_CACHE, but that gets real 3157 * confusing. This is much easier. 3158 */ 3159 3160 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 3161 bp->b_flags |= B_NOCACHE; 3162 bwrite(bp); 3163 goto loop; 3164 } 3165 bp->b_flags &= ~B_DONE; 3166 } else { 3167 /* 3168 * Buffer is not in-core, create new buffer. The buffer 3169 * returned by getnewbuf() is locked. Note that the returned 3170 * buffer is also considered valid (not marked B_INVAL). 3171 */ 3172 BO_RUNLOCK(bo); 3173 /* 3174 * If the user does not want us to create the buffer, bail out 3175 * here. 3176 */ 3177 if (flags & GB_NOCREAT) 3178 return NULL; 3179 if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread)) 3180 return NULL; 3181 3182 bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize; 3183 offset = blkno * bsize; 3184 vmio = vp->v_object != NULL; 3185 if (vmio) { 3186 maxsize = size + (offset & PAGE_MASK); 3187 } else { 3188 maxsize = size; 3189 /* Do not allow non-VMIO notmapped buffers. */ 3190 flags &= ~GB_UNMAPPED; 3191 } 3192 maxsize = imax(maxsize, bsize); 3193 3194 bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags); 3195 if (bp == NULL) { 3196 if (slpflag || slptimeo) 3197 return NULL; 3198 goto loop; 3199 } 3200 3201 /* 3202 * This code is used to make sure that a buffer is not 3203 * created while the getnewbuf routine is blocked. 3204 * This can be a problem whether the vnode is locked or not. 3205 * If the buffer is created out from under us, we have to 3206 * throw away the one we just created. 3207 * 3208 * Note: this must occur before we associate the buffer 3209 * with the vp especially considering limitations in 3210 * the splay tree implementation when dealing with duplicate 3211 * lblkno's. 3212 */ 3213 BO_LOCK(bo); 3214 if (gbincore(bo, blkno)) { 3215 BO_UNLOCK(bo); 3216 bp->b_flags |= B_INVAL; 3217 brelse(bp); 3218 goto loop; 3219 } 3220 3221 /* 3222 * Insert the buffer into the hash, so that it can 3223 * be found by incore. 3224 */ 3225 bp->b_blkno = bp->b_lblkno = blkno; 3226 bp->b_offset = offset; 3227 bgetvp(vp, bp); 3228 BO_UNLOCK(bo); 3229 3230 /* 3231 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 3232 * buffer size starts out as 0, B_CACHE will be set by 3233 * allocbuf() for the VMIO case prior to it testing the 3234 * backing store for validity. 3235 */ 3236 3237 if (vmio) { 3238 bp->b_flags |= B_VMIO; 3239 KASSERT(vp->v_object == bp->b_bufobj->bo_object, 3240 ("ARGH! different b_bufobj->bo_object %p %p %p\n", 3241 bp, vp->v_object, bp->b_bufobj->bo_object)); 3242 } else { 3243 bp->b_flags &= ~B_VMIO; 3244 KASSERT(bp->b_bufobj->bo_object == NULL, 3245 ("ARGH! has b_bufobj->bo_object %p %p\n", 3246 bp, bp->b_bufobj->bo_object)); 3247 BUF_CHECK_MAPPED(bp); 3248 } 3249 3250 allocbuf(bp, size); 3251 bp->b_flags &= ~B_DONE; 3252 } 3253 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); 3254 BUF_ASSERT_HELD(bp); 3255end: 3256 KASSERT(bp->b_bufobj == bo, 3257 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3258 return (bp); 3259} 3260 3261/* 3262 * Get an empty, disassociated buffer of given size. The buffer is initially 3263 * set to B_INVAL. 3264 */ 3265struct buf * 3266geteblk(int size, int flags) 3267{ 3268 struct buf *bp; 3269 int maxsize; 3270 3271 maxsize = (size + BKVAMASK) & ~BKVAMASK; 3272 while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) { 3273 if ((flags & GB_NOWAIT_BD) && 3274 (curthread->td_pflags & TDP_BUFNEED) != 0) 3275 return (NULL); 3276 } 3277 allocbuf(bp, size); 3278 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 3279 BUF_ASSERT_HELD(bp); 3280 return (bp); 3281} 3282 3283 3284/* 3285 * This code constitutes the buffer memory from either anonymous system 3286 * memory (in the case of non-VMIO operations) or from an associated 3287 * VM object (in the case of VMIO operations). This code is able to 3288 * resize a buffer up or down. 3289 * 3290 * Note that this code is tricky, and has many complications to resolve 3291 * deadlock or inconsistant data situations. Tread lightly!!! 3292 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 3293 * the caller. Calling this code willy nilly can result in the loss of data. 3294 * 3295 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 3296 * B_CACHE for the non-VMIO case. 3297 */ 3298 3299int 3300allocbuf(struct buf *bp, int size) 3301{ 3302 int newbsize, mbsize; 3303 int i; 3304 3305 BUF_ASSERT_HELD(bp); 3306 3307 if (bp->b_kvasize < size) 3308 panic("allocbuf: buffer too small"); 3309 3310 if ((bp->b_flags & B_VMIO) == 0) { 3311 caddr_t origbuf; 3312 int origbufsize; 3313 /* 3314 * Just get anonymous memory from the kernel. Don't 3315 * mess with B_CACHE. 3316 */ 3317 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3318 if (bp->b_flags & B_MALLOC) 3319 newbsize = mbsize; 3320 else 3321 newbsize = round_page(size); 3322 3323 if (newbsize < bp->b_bufsize) { 3324 /* 3325 * malloced buffers are not shrunk 3326 */ 3327 if (bp->b_flags & B_MALLOC) { 3328 if (newbsize) { 3329 bp->b_bcount = size; 3330 } else { 3331 free(bp->b_data, M_BIOBUF); 3332 if (bp->b_bufsize) { 3333 atomic_subtract_long( 3334 &bufmallocspace, 3335 bp->b_bufsize); 3336 bufspacewakeup(); 3337 bp->b_bufsize = 0; 3338 } 3339 bp->b_saveaddr = bp->b_kvabase; 3340 bp->b_data = bp->b_saveaddr; 3341 bp->b_bcount = 0; 3342 bp->b_flags &= ~B_MALLOC; 3343 } 3344 return 1; 3345 } 3346 vm_hold_free_pages(bp, newbsize); 3347 } else if (newbsize > bp->b_bufsize) { 3348 /* 3349 * We only use malloced memory on the first allocation. 3350 * and revert to page-allocated memory when the buffer 3351 * grows. 3352 */ 3353 /* 3354 * There is a potential smp race here that could lead 3355 * to bufmallocspace slightly passing the max. It 3356 * is probably extremely rare and not worth worrying 3357 * over. 3358 */ 3359 if ( (bufmallocspace < maxbufmallocspace) && 3360 (bp->b_bufsize == 0) && 3361 (mbsize <= PAGE_SIZE/2)) { 3362 3363 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 3364 bp->b_bufsize = mbsize; 3365 bp->b_bcount = size; 3366 bp->b_flags |= B_MALLOC; 3367 atomic_add_long(&bufmallocspace, mbsize); 3368 return 1; 3369 } 3370 origbuf = NULL; 3371 origbufsize = 0; 3372 /* 3373 * If the buffer is growing on its other-than-first allocation, 3374 * then we revert to the page-allocation scheme. 3375 */ 3376 if (bp->b_flags & B_MALLOC) { 3377 origbuf = bp->b_data; 3378 origbufsize = bp->b_bufsize; 3379 bp->b_data = bp->b_kvabase; 3380 if (bp->b_bufsize) { 3381 atomic_subtract_long(&bufmallocspace, 3382 bp->b_bufsize); 3383 bufspacewakeup(); 3384 bp->b_bufsize = 0; 3385 } 3386 bp->b_flags &= ~B_MALLOC; 3387 newbsize = round_page(newbsize); 3388 } 3389 vm_hold_load_pages( 3390 bp, 3391 (vm_offset_t) bp->b_data + bp->b_bufsize, 3392 (vm_offset_t) bp->b_data + newbsize); 3393 if (origbuf) { 3394 bcopy(origbuf, bp->b_data, origbufsize); 3395 free(origbuf, M_BIOBUF); 3396 } 3397 } 3398 } else { 3399 int desiredpages; 3400 3401 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3402 desiredpages = (size == 0) ? 0 : 3403 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 3404 3405 if (bp->b_flags & B_MALLOC) 3406 panic("allocbuf: VMIO buffer can't be malloced"); 3407 /* 3408 * Set B_CACHE initially if buffer is 0 length or will become 3409 * 0-length. 3410 */ 3411 if (size == 0 || bp->b_bufsize == 0) 3412 bp->b_flags |= B_CACHE; 3413 3414 if (newbsize < bp->b_bufsize) { 3415 /* 3416 * DEV_BSIZE aligned new buffer size is less then the 3417 * DEV_BSIZE aligned existing buffer size. Figure out 3418 * if we have to remove any pages. 3419 */ 3420 if (desiredpages < bp->b_npages) { 3421 vm_page_t m; 3422 3423 if ((bp->b_flags & B_UNMAPPED) == 0) { 3424 BUF_CHECK_MAPPED(bp); 3425 pmap_qremove((vm_offset_t)trunc_page( 3426 (vm_offset_t)bp->b_data) + 3427 (desiredpages << PAGE_SHIFT), 3428 (bp->b_npages - desiredpages)); 3429 } else 3430 BUF_CHECK_UNMAPPED(bp); 3431 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 3432 for (i = desiredpages; i < bp->b_npages; i++) { 3433 /* 3434 * the page is not freed here -- it 3435 * is the responsibility of 3436 * vnode_pager_setsize 3437 */ 3438 m = bp->b_pages[i]; 3439 KASSERT(m != bogus_page, 3440 ("allocbuf: bogus page found")); 3441 while (vm_page_sleep_if_busy(m, 3442 "biodep")) 3443 continue; 3444 3445 bp->b_pages[i] = NULL; 3446 vm_page_lock(m); 3447 vm_page_unwire(m, 0); 3448 vm_page_unlock(m); 3449 } 3450 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 3451 bp->b_npages = desiredpages; 3452 } 3453 } else if (size > bp->b_bcount) { 3454 /* 3455 * We are growing the buffer, possibly in a 3456 * byte-granular fashion. 3457 */ 3458 vm_object_t obj; 3459 vm_offset_t toff; 3460 vm_offset_t tinc; 3461 3462 /* 3463 * Step 1, bring in the VM pages from the object, 3464 * allocating them if necessary. We must clear 3465 * B_CACHE if these pages are not valid for the 3466 * range covered by the buffer. 3467 */ 3468 3469 obj = bp->b_bufobj->bo_object; 3470 3471 VM_OBJECT_WLOCK(obj); 3472 while (bp->b_npages < desiredpages) { 3473 vm_page_t m; 3474 3475 /* 3476 * We must allocate system pages since blocking 3477 * here could interfere with paging I/O, no 3478 * matter which process we are. 3479 * 3480 * Only exclusive busy can be tested here. 3481 * Blocking on shared busy might lead to 3482 * deadlocks once allocbuf() is called after 3483 * pages are vfs_busy_pages(). 3484 */ 3485 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + 3486 bp->b_npages, VM_ALLOC_NOBUSY | 3487 VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | 3488 VM_ALLOC_IGN_SBUSY | 3489 VM_ALLOC_COUNT(desiredpages - bp->b_npages)); 3490 if (m->valid == 0) 3491 bp->b_flags &= ~B_CACHE; 3492 bp->b_pages[bp->b_npages] = m; 3493 ++bp->b_npages; 3494 } 3495 3496 /* 3497 * Step 2. We've loaded the pages into the buffer, 3498 * we have to figure out if we can still have B_CACHE 3499 * set. Note that B_CACHE is set according to the 3500 * byte-granular range ( bcount and size ), new the 3501 * aligned range ( newbsize ). 3502 * 3503 * The VM test is against m->valid, which is DEV_BSIZE 3504 * aligned. Needless to say, the validity of the data 3505 * needs to also be DEV_BSIZE aligned. Note that this 3506 * fails with NFS if the server or some other client 3507 * extends the file's EOF. If our buffer is resized, 3508 * B_CACHE may remain set! XXX 3509 */ 3510 3511 toff = bp->b_bcount; 3512 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 3513 3514 while ((bp->b_flags & B_CACHE) && toff < size) { 3515 vm_pindex_t pi; 3516 3517 if (tinc > (size - toff)) 3518 tinc = size - toff; 3519 3520 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 3521 PAGE_SHIFT; 3522 3523 vfs_buf_test_cache( 3524 bp, 3525 bp->b_offset, 3526 toff, 3527 tinc, 3528 bp->b_pages[pi] 3529 ); 3530 toff += tinc; 3531 tinc = PAGE_SIZE; 3532 } 3533 VM_OBJECT_WUNLOCK(obj); 3534 3535 /* 3536 * Step 3, fixup the KVM pmap. 3537 */ 3538 if ((bp->b_flags & B_UNMAPPED) == 0) 3539 bpmap_qenter(bp); 3540 else 3541 BUF_CHECK_UNMAPPED(bp); 3542 } 3543 } 3544 if (newbsize < bp->b_bufsize) 3545 bufspacewakeup(); 3546 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3547 bp->b_bcount = size; /* requested buffer size */ 3548 return 1; 3549} 3550 3551extern int inflight_transient_maps; 3552 3553void 3554biodone(struct bio *bp) 3555{ 3556 struct mtx *mtxp; 3557 void (*done)(struct bio *); 3558 vm_offset_t start, end; 3559 int transient; 3560 3561 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3562 mtx_lock(mtxp); 3563 bp->bio_flags |= BIO_DONE; 3564 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) { 3565 start = trunc_page((vm_offset_t)bp->bio_data); 3566 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length); 3567 transient = 1; 3568 } else { 3569 transient = 0; 3570 start = end = 0; 3571 } 3572 done = bp->bio_done; 3573 if (done == NULL) 3574 wakeup(bp); 3575 mtx_unlock(mtxp); 3576 if (done != NULL) 3577 done(bp); 3578 if (transient) { 3579 pmap_qremove(start, OFF_TO_IDX(end - start)); 3580 vmem_free(transient_arena, start, end - start); 3581 atomic_add_int(&inflight_transient_maps, -1); 3582 } 3583} 3584 3585/* 3586 * Wait for a BIO to finish. 3587 * 3588 * XXX: resort to a timeout for now. The optimal locking (if any) for this 3589 * case is not yet clear. 3590 */ 3591int 3592biowait(struct bio *bp, const char *wchan) 3593{ 3594 struct mtx *mtxp; 3595 3596 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3597 mtx_lock(mtxp); 3598 while ((bp->bio_flags & BIO_DONE) == 0) 3599 msleep(bp, mtxp, PRIBIO, wchan, hz / 10); 3600 mtx_unlock(mtxp); 3601 if (bp->bio_error != 0) 3602 return (bp->bio_error); 3603 if (!(bp->bio_flags & BIO_ERROR)) 3604 return (0); 3605 return (EIO); 3606} 3607 3608void 3609biofinish(struct bio *bp, struct devstat *stat, int error) 3610{ 3611 3612 if (error) { 3613 bp->bio_error = error; 3614 bp->bio_flags |= BIO_ERROR; 3615 } 3616 if (stat != NULL) 3617 devstat_end_transaction_bio(stat, bp); 3618 biodone(bp); 3619} 3620 3621/* 3622 * bufwait: 3623 * 3624 * Wait for buffer I/O completion, returning error status. The buffer 3625 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR 3626 * error and cleared. 3627 */ 3628int 3629bufwait(struct buf *bp) 3630{ 3631 if (bp->b_iocmd == BIO_READ) 3632 bwait(bp, PRIBIO, "biord"); 3633 else 3634 bwait(bp, PRIBIO, "biowr"); 3635 if (bp->b_flags & B_EINTR) { 3636 bp->b_flags &= ~B_EINTR; 3637 return (EINTR); 3638 } 3639 if (bp->b_ioflags & BIO_ERROR) { 3640 return (bp->b_error ? bp->b_error : EIO); 3641 } else { 3642 return (0); 3643 } 3644} 3645 3646 /* 3647 * Call back function from struct bio back up to struct buf. 3648 */ 3649static void 3650bufdonebio(struct bio *bip) 3651{ 3652 struct buf *bp; 3653 3654 bp = bip->bio_caller2; 3655 bp->b_resid = bp->b_bcount - bip->bio_completed; 3656 bp->b_resid = bip->bio_resid; /* XXX: remove */ 3657 bp->b_ioflags = bip->bio_flags; 3658 bp->b_error = bip->bio_error; 3659 if (bp->b_error) 3660 bp->b_ioflags |= BIO_ERROR; 3661 bufdone(bp); 3662 g_destroy_bio(bip); 3663} 3664 3665void 3666dev_strategy(struct cdev *dev, struct buf *bp) 3667{ 3668 struct cdevsw *csw; 3669 int ref; 3670 3671 KASSERT(dev->si_refcount > 0, 3672 ("dev_strategy on un-referenced struct cdev *(%s) %p", 3673 devtoname(dev), dev)); 3674 3675 csw = dev_refthread(dev, &ref); 3676 dev_strategy_csw(dev, csw, bp); 3677 dev_relthread(dev, ref); 3678} 3679 3680void 3681dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp) 3682{ 3683 struct bio *bip; 3684 3685 KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE, 3686 ("b_iocmd botch")); 3687 KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) || 3688 dev->si_threadcount > 0, 3689 ("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev), 3690 dev)); 3691 if (csw == NULL) { 3692 bp->b_error = ENXIO; 3693 bp->b_ioflags = BIO_ERROR; 3694 bufdone(bp); 3695 return; 3696 } 3697 for (;;) { 3698 bip = g_new_bio(); 3699 if (bip != NULL) 3700 break; 3701 /* Try again later */ 3702 tsleep(&bp, PRIBIO, "dev_strat", hz/10); 3703 } 3704 bip->bio_cmd = bp->b_iocmd; 3705 bip->bio_offset = bp->b_iooffset; 3706 bip->bio_length = bp->b_bcount; 3707 bip->bio_bcount = bp->b_bcount; /* XXX: remove */ 3708 bdata2bio(bp, bip); 3709 bip->bio_done = bufdonebio; 3710 bip->bio_caller2 = bp; 3711 bip->bio_dev = dev; 3712 (*csw->d_strategy)(bip); 3713} 3714 3715/* 3716 * bufdone: 3717 * 3718 * Finish I/O on a buffer, optionally calling a completion function. 3719 * This is usually called from an interrupt so process blocking is 3720 * not allowed. 3721 * 3722 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 3723 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3724 * assuming B_INVAL is clear. 3725 * 3726 * For the VMIO case, we set B_CACHE if the op was a read and no 3727 * read error occured, or if the op was a write. B_CACHE is never 3728 * set if the buffer is invalid or otherwise uncacheable. 3729 * 3730 * biodone does not mess with B_INVAL, allowing the I/O routine or the 3731 * initiator to leave B_INVAL set to brelse the buffer out of existance 3732 * in the biodone routine. 3733 */ 3734void 3735bufdone(struct buf *bp) 3736{ 3737 struct bufobj *dropobj; 3738 void (*biodone)(struct buf *); 3739 3740 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 3741 dropobj = NULL; 3742 3743 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 3744 BUF_ASSERT_HELD(bp); 3745 3746 runningbufwakeup(bp); 3747 if (bp->b_iocmd == BIO_WRITE) 3748 dropobj = bp->b_bufobj; 3749 /* call optional completion function if requested */ 3750 if (bp->b_iodone != NULL) { 3751 biodone = bp->b_iodone; 3752 bp->b_iodone = NULL; 3753 (*biodone) (bp); 3754 if (dropobj) 3755 bufobj_wdrop(dropobj); 3756 return; 3757 } 3758 3759 bufdone_finish(bp); 3760 3761 if (dropobj) 3762 bufobj_wdrop(dropobj); 3763} 3764 3765void 3766bufdone_finish(struct buf *bp) 3767{ 3768 BUF_ASSERT_HELD(bp); 3769 3770 if (!LIST_EMPTY(&bp->b_dep)) 3771 buf_complete(bp); 3772 3773 if (bp->b_flags & B_VMIO) { 3774 vm_ooffset_t foff; 3775 vm_page_t m; 3776 vm_object_t obj; 3777 struct vnode *vp; 3778 int bogus, i, iosize; 3779 3780 obj = bp->b_bufobj->bo_object; 3781 KASSERT(obj->paging_in_progress >= bp->b_npages, 3782 ("biodone_finish: paging in progress(%d) < b_npages(%d)", 3783 obj->paging_in_progress, bp->b_npages)); 3784 3785 vp = bp->b_vp; 3786 KASSERT(vp->v_holdcnt > 0, 3787 ("biodone_finish: vnode %p has zero hold count", vp)); 3788 KASSERT(vp->v_object != NULL, 3789 ("biodone_finish: vnode %p has no vm_object", vp)); 3790 3791 foff = bp->b_offset; 3792 KASSERT(bp->b_offset != NOOFFSET, 3793 ("biodone_finish: bp %p has no buffer offset", bp)); 3794 3795 /* 3796 * Set B_CACHE if the op was a normal read and no error 3797 * occured. B_CACHE is set for writes in the b*write() 3798 * routines. 3799 */ 3800 iosize = bp->b_bcount - bp->b_resid; 3801 if (bp->b_iocmd == BIO_READ && 3802 !(bp->b_flags & (B_INVAL|B_NOCACHE)) && 3803 !(bp->b_ioflags & BIO_ERROR)) { 3804 bp->b_flags |= B_CACHE; 3805 } 3806 bogus = 0; 3807 VM_OBJECT_WLOCK(obj); 3808 for (i = 0; i < bp->b_npages; i++) { 3809 int bogusflag = 0; 3810 int resid; 3811 3812 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3813 if (resid > iosize) 3814 resid = iosize; 3815 3816 /* 3817 * cleanup bogus pages, restoring the originals 3818 */ 3819 m = bp->b_pages[i]; 3820 if (m == bogus_page) { 3821 bogus = bogusflag = 1; 3822 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3823 if (m == NULL) 3824 panic("biodone: page disappeared!"); 3825 bp->b_pages[i] = m; 3826 } 3827 KASSERT(OFF_TO_IDX(foff) == m->pindex, 3828 ("biodone_finish: foff(%jd)/pindex(%ju) mismatch", 3829 (intmax_t)foff, (uintmax_t)m->pindex)); 3830 3831 /* 3832 * In the write case, the valid and clean bits are 3833 * already changed correctly ( see bdwrite() ), so we 3834 * only need to do this here in the read case. 3835 */ 3836 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) { 3837 KASSERT((m->dirty & vm_page_bits(foff & 3838 PAGE_MASK, resid)) == 0, ("bufdone_finish:" 3839 " page %p has unexpected dirty bits", m)); 3840 vfs_page_set_valid(bp, foff, m); 3841 } 3842 3843 vm_page_sunbusy(m); 3844 vm_object_pip_subtract(obj, 1); 3845 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3846 iosize -= resid; 3847 } 3848 vm_object_pip_wakeupn(obj, 0); 3849 VM_OBJECT_WUNLOCK(obj); 3850 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) { 3851 BUF_CHECK_MAPPED(bp); 3852 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3853 bp->b_pages, bp->b_npages); 3854 } 3855 } 3856 3857 /* 3858 * For asynchronous completions, release the buffer now. The brelse 3859 * will do a wakeup there if necessary - so no need to do a wakeup 3860 * here in the async case. The sync case always needs to do a wakeup. 3861 */ 3862 3863 if (bp->b_flags & B_ASYNC) { 3864 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR)) 3865 brelse(bp); 3866 else 3867 bqrelse(bp); 3868 } else 3869 bdone(bp); 3870} 3871 3872/* 3873 * This routine is called in lieu of iodone in the case of 3874 * incomplete I/O. This keeps the busy status for pages 3875 * consistant. 3876 */ 3877void 3878vfs_unbusy_pages(struct buf *bp) 3879{ 3880 int i; 3881 vm_object_t obj; 3882 vm_page_t m; 3883 3884 runningbufwakeup(bp); 3885 if (!(bp->b_flags & B_VMIO)) 3886 return; 3887 3888 obj = bp->b_bufobj->bo_object; 3889 VM_OBJECT_WLOCK(obj); 3890 for (i = 0; i < bp->b_npages; i++) { 3891 m = bp->b_pages[i]; 3892 if (m == bogus_page) { 3893 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 3894 if (!m) 3895 panic("vfs_unbusy_pages: page missing\n"); 3896 bp->b_pages[i] = m; 3897 if ((bp->b_flags & B_UNMAPPED) == 0) { 3898 BUF_CHECK_MAPPED(bp); 3899 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3900 bp->b_pages, bp->b_npages); 3901 } else 3902 BUF_CHECK_UNMAPPED(bp); 3903 } 3904 vm_object_pip_subtract(obj, 1); 3905 vm_page_sunbusy(m); 3906 } 3907 vm_object_pip_wakeupn(obj, 0); 3908 VM_OBJECT_WUNLOCK(obj); 3909} 3910 3911/* 3912 * vfs_page_set_valid: 3913 * 3914 * Set the valid bits in a page based on the supplied offset. The 3915 * range is restricted to the buffer's size. 3916 * 3917 * This routine is typically called after a read completes. 3918 */ 3919static void 3920vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m) 3921{ 3922 vm_ooffset_t eoff; 3923 3924 /* 3925 * Compute the end offset, eoff, such that [off, eoff) does not span a 3926 * page boundary and eoff is not greater than the end of the buffer. 3927 * The end of the buffer, in this case, is our file EOF, not the 3928 * allocation size of the buffer. 3929 */ 3930 eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK; 3931 if (eoff > bp->b_offset + bp->b_bcount) 3932 eoff = bp->b_offset + bp->b_bcount; 3933 3934 /* 3935 * Set valid range. This is typically the entire buffer and thus the 3936 * entire page. 3937 */ 3938 if (eoff > off) 3939 vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off); 3940} 3941 3942/* 3943 * vfs_page_set_validclean: 3944 * 3945 * Set the valid bits and clear the dirty bits in a page based on the 3946 * supplied offset. The range is restricted to the buffer's size. 3947 */ 3948static void 3949vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m) 3950{ 3951 vm_ooffset_t soff, eoff; 3952 3953 /* 3954 * Start and end offsets in buffer. eoff - soff may not cross a 3955 * page boundry or cross the end of the buffer. The end of the 3956 * buffer, in this case, is our file EOF, not the allocation size 3957 * of the buffer. 3958 */ 3959 soff = off; 3960 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3961 if (eoff > bp->b_offset + bp->b_bcount) 3962 eoff = bp->b_offset + bp->b_bcount; 3963 3964 /* 3965 * Set valid range. This is typically the entire buffer and thus the 3966 * entire page. 3967 */ 3968 if (eoff > soff) { 3969 vm_page_set_validclean( 3970 m, 3971 (vm_offset_t) (soff & PAGE_MASK), 3972 (vm_offset_t) (eoff - soff) 3973 ); 3974 } 3975} 3976 3977/* 3978 * Ensure that all buffer pages are not exclusive busied. If any page is 3979 * exclusive busy, drain it. 3980 */ 3981void 3982vfs_drain_busy_pages(struct buf *bp) 3983{ 3984 vm_page_t m; 3985 int i, last_busied; 3986 3987 VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object); 3988 last_busied = 0; 3989 for (i = 0; i < bp->b_npages; i++) { 3990 m = bp->b_pages[i]; 3991 if (vm_page_xbusied(m)) { 3992 for (; last_busied < i; last_busied++) 3993 vm_page_sbusy(bp->b_pages[last_busied]); 3994 while (vm_page_xbusied(m)) { 3995 vm_page_lock(m); 3996 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 3997 vm_page_busy_sleep(m, "vbpage"); 3998 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 3999 } 4000 } 4001 } 4002 for (i = 0; i < last_busied; i++) 4003 vm_page_sunbusy(bp->b_pages[i]); 4004} 4005 4006/* 4007 * This routine is called before a device strategy routine. 4008 * It is used to tell the VM system that paging I/O is in 4009 * progress, and treat the pages associated with the buffer 4010 * almost as being exclusive busy. Also the object paging_in_progress 4011 * flag is handled to make sure that the object doesn't become 4012 * inconsistant. 4013 * 4014 * Since I/O has not been initiated yet, certain buffer flags 4015 * such as BIO_ERROR or B_INVAL may be in an inconsistant state 4016 * and should be ignored. 4017 */ 4018void 4019vfs_busy_pages(struct buf *bp, int clear_modify) 4020{ 4021 int i, bogus; 4022 vm_object_t obj; 4023 vm_ooffset_t foff; 4024 vm_page_t m; 4025 4026 if (!(bp->b_flags & B_VMIO)) 4027 return; 4028 4029 obj = bp->b_bufobj->bo_object; 4030 foff = bp->b_offset; 4031 KASSERT(bp->b_offset != NOOFFSET, 4032 ("vfs_busy_pages: no buffer offset")); 4033 VM_OBJECT_WLOCK(obj); 4034 vfs_drain_busy_pages(bp); 4035 if (bp->b_bufsize != 0) 4036 vfs_setdirty_locked_object(bp); 4037 bogus = 0; 4038 for (i = 0; i < bp->b_npages; i++) { 4039 m = bp->b_pages[i]; 4040 4041 if ((bp->b_flags & B_CLUSTER) == 0) { 4042 vm_object_pip_add(obj, 1); 4043 vm_page_sbusy(m); 4044 } 4045 /* 4046 * When readying a buffer for a read ( i.e 4047 * clear_modify == 0 ), it is important to do 4048 * bogus_page replacement for valid pages in 4049 * partially instantiated buffers. Partially 4050 * instantiated buffers can, in turn, occur when 4051 * reconstituting a buffer from its VM backing store 4052 * base. We only have to do this if B_CACHE is 4053 * clear ( which causes the I/O to occur in the 4054 * first place ). The replacement prevents the read 4055 * I/O from overwriting potentially dirty VM-backed 4056 * pages. XXX bogus page replacement is, uh, bogus. 4057 * It may not work properly with small-block devices. 4058 * We need to find a better way. 4059 */ 4060 if (clear_modify) { 4061 pmap_remove_write(m); 4062 vfs_page_set_validclean(bp, foff, m); 4063 } else if (m->valid == VM_PAGE_BITS_ALL && 4064 (bp->b_flags & B_CACHE) == 0) { 4065 bp->b_pages[i] = bogus_page; 4066 bogus++; 4067 } 4068 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 4069 } 4070 VM_OBJECT_WUNLOCK(obj); 4071 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) { 4072 BUF_CHECK_MAPPED(bp); 4073 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 4074 bp->b_pages, bp->b_npages); 4075 } 4076} 4077 4078/* 4079 * vfs_bio_set_valid: 4080 * 4081 * Set the range within the buffer to valid. The range is 4082 * relative to the beginning of the buffer, b_offset. Note that 4083 * b_offset itself may be offset from the beginning of the first 4084 * page. 4085 */ 4086void 4087vfs_bio_set_valid(struct buf *bp, int base, int size) 4088{ 4089 int i, n; 4090 vm_page_t m; 4091 4092 if (!(bp->b_flags & B_VMIO)) 4093 return; 4094 4095 /* 4096 * Fixup base to be relative to beginning of first page. 4097 * Set initial n to be the maximum number of bytes in the 4098 * first page that can be validated. 4099 */ 4100 base += (bp->b_offset & PAGE_MASK); 4101 n = PAGE_SIZE - (base & PAGE_MASK); 4102 4103 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4104 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4105 m = bp->b_pages[i]; 4106 if (n > size) 4107 n = size; 4108 vm_page_set_valid_range(m, base & PAGE_MASK, n); 4109 base += n; 4110 size -= n; 4111 n = PAGE_SIZE; 4112 } 4113 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4114} 4115 4116/* 4117 * vfs_bio_clrbuf: 4118 * 4119 * If the specified buffer is a non-VMIO buffer, clear the entire 4120 * buffer. If the specified buffer is a VMIO buffer, clear and 4121 * validate only the previously invalid portions of the buffer. 4122 * This routine essentially fakes an I/O, so we need to clear 4123 * BIO_ERROR and B_INVAL. 4124 * 4125 * Note that while we only theoretically need to clear through b_bcount, 4126 * we go ahead and clear through b_bufsize. 4127 */ 4128void 4129vfs_bio_clrbuf(struct buf *bp) 4130{ 4131 int i, j, mask, sa, ea, slide; 4132 4133 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) { 4134 clrbuf(bp); 4135 return; 4136 } 4137 bp->b_flags &= ~B_INVAL; 4138 bp->b_ioflags &= ~BIO_ERROR; 4139 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4140 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 4141 (bp->b_offset & PAGE_MASK) == 0) { 4142 if (bp->b_pages[0] == bogus_page) 4143 goto unlock; 4144 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 4145 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object); 4146 if ((bp->b_pages[0]->valid & mask) == mask) 4147 goto unlock; 4148 if ((bp->b_pages[0]->valid & mask) == 0) { 4149 pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize); 4150 bp->b_pages[0]->valid |= mask; 4151 goto unlock; 4152 } 4153 } 4154 sa = bp->b_offset & PAGE_MASK; 4155 slide = 0; 4156 for (i = 0; i < bp->b_npages; i++, sa = 0) { 4157 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize); 4158 ea = slide & PAGE_MASK; 4159 if (ea == 0) 4160 ea = PAGE_SIZE; 4161 if (bp->b_pages[i] == bogus_page) 4162 continue; 4163 j = sa / DEV_BSIZE; 4164 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 4165 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object); 4166 if ((bp->b_pages[i]->valid & mask) == mask) 4167 continue; 4168 if ((bp->b_pages[i]->valid & mask) == 0) 4169 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa); 4170 else { 4171 for (; sa < ea; sa += DEV_BSIZE, j++) { 4172 if ((bp->b_pages[i]->valid & (1 << j)) == 0) { 4173 pmap_zero_page_area(bp->b_pages[i], 4174 sa, DEV_BSIZE); 4175 } 4176 } 4177 } 4178 bp->b_pages[i]->valid |= mask; 4179 } 4180unlock: 4181 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4182 bp->b_resid = 0; 4183} 4184 4185void 4186vfs_bio_bzero_buf(struct buf *bp, int base, int size) 4187{ 4188 vm_page_t m; 4189 int i, n; 4190 4191 if ((bp->b_flags & B_UNMAPPED) == 0) { 4192 BUF_CHECK_MAPPED(bp); 4193 bzero(bp->b_data + base, size); 4194 } else { 4195 BUF_CHECK_UNMAPPED(bp); 4196 n = PAGE_SIZE - (base & PAGE_MASK); 4197 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4198 m = bp->b_pages[i]; 4199 if (n > size) 4200 n = size; 4201 pmap_zero_page_area(m, base & PAGE_MASK, n); 4202 base += n; 4203 size -= n; 4204 n = PAGE_SIZE; 4205 } 4206 } 4207} 4208 4209/* 4210 * vm_hold_load_pages and vm_hold_free_pages get pages into 4211 * a buffers address space. The pages are anonymous and are 4212 * not associated with a file object. 4213 */ 4214static void 4215vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 4216{ 4217 vm_offset_t pg; 4218 vm_page_t p; 4219 int index; 4220 4221 BUF_CHECK_MAPPED(bp); 4222 4223 to = round_page(to); 4224 from = round_page(from); 4225 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4226 4227 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 4228tryagain: 4229 /* 4230 * note: must allocate system pages since blocking here 4231 * could interfere with paging I/O, no matter which 4232 * process we are. 4233 */ 4234 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | 4235 VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT)); 4236 if (p == NULL) { 4237 VM_WAIT; 4238 goto tryagain; 4239 } 4240 pmap_qenter(pg, &p, 1); 4241 bp->b_pages[index] = p; 4242 } 4243 bp->b_npages = index; 4244} 4245 4246/* Return pages associated with this buf to the vm system */ 4247static void 4248vm_hold_free_pages(struct buf *bp, int newbsize) 4249{ 4250 vm_offset_t from; 4251 vm_page_t p; 4252 int index, newnpages; 4253 4254 BUF_CHECK_MAPPED(bp); 4255 4256 from = round_page((vm_offset_t)bp->b_data + newbsize); 4257 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4258 if (bp->b_npages > newnpages) 4259 pmap_qremove(from, bp->b_npages - newnpages); 4260 for (index = newnpages; index < bp->b_npages; index++) { 4261 p = bp->b_pages[index]; 4262 bp->b_pages[index] = NULL; 4263 if (vm_page_sbusied(p)) 4264 printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n", 4265 (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno); 4266 p->wire_count--; 4267 vm_page_free(p); 4268 atomic_subtract_int(&cnt.v_wire_count, 1); 4269 } 4270 bp->b_npages = newnpages; 4271} 4272 4273/* 4274 * Map an IO request into kernel virtual address space. 4275 * 4276 * All requests are (re)mapped into kernel VA space. 4277 * Notice that we use b_bufsize for the size of the buffer 4278 * to be mapped. b_bcount might be modified by the driver. 4279 * 4280 * Note that even if the caller determines that the address space should 4281 * be valid, a race or a smaller-file mapped into a larger space may 4282 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST 4283 * check the return value. 4284 */ 4285int 4286vmapbuf(struct buf *bp, int mapbuf) 4287{ 4288 caddr_t kva; 4289 vm_prot_t prot; 4290 int pidx; 4291 4292 if (bp->b_bufsize < 0) 4293 return (-1); 4294 prot = VM_PROT_READ; 4295 if (bp->b_iocmd == BIO_READ) 4296 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 4297 if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 4298 (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages, 4299 btoc(MAXPHYS))) < 0) 4300 return (-1); 4301 bp->b_npages = pidx; 4302 if (mapbuf || !unmapped_buf_allowed) { 4303 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx); 4304 kva = bp->b_saveaddr; 4305 bp->b_saveaddr = bp->b_data; 4306 bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK); 4307 bp->b_flags &= ~B_UNMAPPED; 4308 } else { 4309 bp->b_flags |= B_UNMAPPED; 4310 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK; 4311 bp->b_saveaddr = bp->b_data; 4312 bp->b_data = unmapped_buf; 4313 } 4314 return(0); 4315} 4316 4317/* 4318 * Free the io map PTEs associated with this IO operation. 4319 * We also invalidate the TLB entries and restore the original b_addr. 4320 */ 4321void 4322vunmapbuf(struct buf *bp) 4323{ 4324 int npages; 4325 4326 npages = bp->b_npages; 4327 if (bp->b_flags & B_UNMAPPED) 4328 bp->b_flags &= ~B_UNMAPPED; 4329 else 4330 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); 4331 vm_page_unhold_pages(bp->b_pages, npages); 4332 4333 bp->b_data = bp->b_saveaddr; 4334} 4335 4336void 4337bdone(struct buf *bp) 4338{ 4339 struct mtx *mtxp; 4340 4341 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4342 mtx_lock(mtxp); 4343 bp->b_flags |= B_DONE; 4344 wakeup(bp); 4345 mtx_unlock(mtxp); 4346} 4347 4348void 4349bwait(struct buf *bp, u_char pri, const char *wchan) 4350{ 4351 struct mtx *mtxp; 4352 4353 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4354 mtx_lock(mtxp); 4355 while ((bp->b_flags & B_DONE) == 0) 4356 msleep(bp, mtxp, pri, wchan, 0); 4357 mtx_unlock(mtxp); 4358} 4359 4360int 4361bufsync(struct bufobj *bo, int waitfor) 4362{ 4363 4364 return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread)); 4365} 4366 4367void 4368bufstrategy(struct bufobj *bo, struct buf *bp) 4369{ 4370 int i = 0; 4371 struct vnode *vp; 4372 4373 vp = bp->b_vp; 4374 KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy")); 4375 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 4376 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); 4377 i = VOP_STRATEGY(vp, bp); 4378 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); 4379} 4380 4381void 4382bufobj_wrefl(struct bufobj *bo) 4383{ 4384 4385 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4386 ASSERT_BO_WLOCKED(bo); 4387 bo->bo_numoutput++; 4388} 4389 4390void 4391bufobj_wref(struct bufobj *bo) 4392{ 4393 4394 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4395 BO_LOCK(bo); 4396 bo->bo_numoutput++; 4397 BO_UNLOCK(bo); 4398} 4399 4400void 4401bufobj_wdrop(struct bufobj *bo) 4402{ 4403 4404 KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop")); 4405 BO_LOCK(bo); 4406 KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count")); 4407 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) { 4408 bo->bo_flag &= ~BO_WWAIT; 4409 wakeup(&bo->bo_numoutput); 4410 } 4411 BO_UNLOCK(bo); 4412} 4413 4414int 4415bufobj_wwait(struct bufobj *bo, int slpflag, int timeo) 4416{ 4417 int error; 4418 4419 KASSERT(bo != NULL, ("NULL bo in bufobj_wwait")); 4420 ASSERT_BO_WLOCKED(bo); 4421 error = 0; 4422 while (bo->bo_numoutput) { 4423 bo->bo_flag |= BO_WWAIT; 4424 error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo), 4425 slpflag | (PRIBIO + 1), "bo_wwait", timeo); 4426 if (error) 4427 break; 4428 } 4429 return (error); 4430} 4431 4432void 4433bpin(struct buf *bp) 4434{ 4435 struct mtx *mtxp; 4436 4437 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4438 mtx_lock(mtxp); 4439 bp->b_pin_count++; 4440 mtx_unlock(mtxp); 4441} 4442 4443void 4444bunpin(struct buf *bp) 4445{ 4446 struct mtx *mtxp; 4447 4448 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4449 mtx_lock(mtxp); 4450 if (--bp->b_pin_count == 0) 4451 wakeup(bp); 4452 mtx_unlock(mtxp); 4453} 4454 4455void 4456bunpin_wait(struct buf *bp) 4457{ 4458 struct mtx *mtxp; 4459 4460 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4461 mtx_lock(mtxp); 4462 while (bp->b_pin_count > 0) 4463 msleep(bp, mtxp, PRIBIO, "bwunpin", 0); 4464 mtx_unlock(mtxp); 4465} 4466 4467/* 4468 * Set bio_data or bio_ma for struct bio from the struct buf. 4469 */ 4470void 4471bdata2bio(struct buf *bp, struct bio *bip) 4472{ 4473 4474 if ((bp->b_flags & B_UNMAPPED) != 0) { 4475 KASSERT(unmapped_buf_allowed, ("unmapped")); 4476 bip->bio_ma = bp->b_pages; 4477 bip->bio_ma_n = bp->b_npages; 4478 bip->bio_data = unmapped_buf; 4479 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; 4480 bip->bio_flags |= BIO_UNMAPPED; 4481 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) / 4482 PAGE_SIZE == bp->b_npages, 4483 ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset, 4484 (long long)bip->bio_length, bip->bio_ma_n)); 4485 } else { 4486 bip->bio_data = bp->b_data; 4487 bip->bio_ma = NULL; 4488 } 4489} 4490 4491#include "opt_ddb.h" 4492#ifdef DDB 4493#include <ddb/ddb.h> 4494 4495/* DDB command to show buffer data */ 4496DB_SHOW_COMMAND(buffer, db_show_buffer) 4497{ 4498 /* get args */ 4499 struct buf *bp = (struct buf *)addr; 4500 4501 if (!have_addr) { 4502 db_printf("usage: show buffer <addr>\n"); 4503 return; 4504 } 4505 4506 db_printf("buf at %p\n", bp); 4507 db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n", 4508 (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags, 4509 PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS); 4510 db_printf( 4511 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n" 4512 "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, " 4513 "b_dep = %p\n", 4514 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4515 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno, 4516 (intmax_t)bp->b_lblkno, bp->b_dep.lh_first); 4517 if (bp->b_npages) { 4518 int i; 4519 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 4520 for (i = 0; i < bp->b_npages; i++) { 4521 vm_page_t m; 4522 m = bp->b_pages[i]; 4523 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4524 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4525 if ((i + 1) < bp->b_npages) 4526 db_printf(","); 4527 } 4528 db_printf("\n"); 4529 } 4530 db_printf(" "); 4531 BUF_LOCKPRINTINFO(bp); 4532} 4533 4534DB_SHOW_COMMAND(lockedbufs, lockedbufs) 4535{ 4536 struct buf *bp; 4537 int i; 4538 4539 for (i = 0; i < nbuf; i++) { 4540 bp = &buf[i]; 4541 if (BUF_ISLOCKED(bp)) { 4542 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4543 db_printf("\n"); 4544 } 4545 } 4546} 4547 4548DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs) 4549{ 4550 struct vnode *vp; 4551 struct buf *bp; 4552 4553 if (!have_addr) { 4554 db_printf("usage: show vnodebufs <addr>\n"); 4555 return; 4556 } 4557 vp = (struct vnode *)addr; 4558 db_printf("Clean buffers:\n"); 4559 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { 4560 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4561 db_printf("\n"); 4562 } 4563 db_printf("Dirty buffers:\n"); 4564 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 4565 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4566 db_printf("\n"); 4567 } 4568} 4569 4570DB_COMMAND(countfreebufs, db_coundfreebufs) 4571{ 4572 struct buf *bp; 4573 int i, used = 0, nfree = 0; 4574 4575 if (have_addr) { 4576 db_printf("usage: countfreebufs\n"); 4577 return; 4578 } 4579 4580 for (i = 0; i < nbuf; i++) { 4581 bp = &buf[i]; 4582 if ((bp->b_flags & B_INFREECNT) != 0) 4583 nfree++; 4584 else 4585 used++; 4586 } 4587 4588 db_printf("Counted %d free, %d used (%d tot)\n", nfree, used, 4589 nfree + used); 4590 db_printf("numfreebuffers is %d\n", numfreebuffers); 4591} 4592#endif /* DDB */ 4593