vfs_bio.c revision 190331
1/*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * Copyright (c) 1994,1997 John S. Dyson 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * this file contains a new buffer I/O scheme implementing a coherent 30 * VM object and buffer cache scheme. Pains have been taken to make 31 * sure that the performance degradation associated with schemes such 32 * as this is not realized. 33 * 34 * Author: John S. Dyson 35 * Significant help during the development and debugging phases 36 * had been provided by David Greenman, also of the FreeBSD core team. 37 * 38 * see man buf(9) for more info. 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/kern/vfs_bio.c 190331 2009-03-23 20:18:06Z jhb $"); 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/bio.h> 47#include <sys/conf.h> 48#include <sys/buf.h> 49#include <sys/devicestat.h> 50#include <sys/eventhandler.h> 51#include <sys/limits.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mount.h> 55#include <sys/mutex.h> 56#include <sys/kernel.h> 57#include <sys/kthread.h> 58#include <sys/proc.h> 59#include <sys/resourcevar.h> 60#include <sys/sysctl.h> 61#include <sys/vmmeter.h> 62#include <sys/vnode.h> 63#include <geom/geom.h> 64#include <vm/vm.h> 65#include <vm/vm_param.h> 66#include <vm/vm_kern.h> 67#include <vm/vm_pageout.h> 68#include <vm/vm_page.h> 69#include <vm/vm_object.h> 70#include <vm/vm_extern.h> 71#include <vm/vm_map.h> 72#include "opt_compat.h" 73#include "opt_directio.h" 74#include "opt_swap.h" 75 76static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer"); 77 78struct bio_ops bioops; /* I/O operation notification */ 79 80struct buf_ops buf_ops_bio = { 81 .bop_name = "buf_ops_bio", 82 .bop_write = bufwrite, 83 .bop_strategy = bufstrategy, 84 .bop_sync = bufsync, 85 .bop_bdflush = bufbdflush, 86}; 87 88/* 89 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has 90 * carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c. 91 */ 92struct buf *buf; /* buffer header pool */ 93 94static struct proc *bufdaemonproc; 95 96static int inmem(struct vnode *vp, daddr_t blkno); 97static void vm_hold_free_pages(struct buf *bp, vm_offset_t from, 98 vm_offset_t to); 99static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, 100 vm_offset_t to); 101static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 102 vm_page_t m); 103static void vfs_clean_pages(struct buf *bp); 104static void vfs_setdirty(struct buf *bp); 105static void vfs_setdirty_locked_object(struct buf *bp); 106static void vfs_vmio_release(struct buf *bp); 107static int vfs_bio_clcheck(struct vnode *vp, int size, 108 daddr_t lblkno, daddr_t blkno); 109static int buf_do_flush(struct vnode *vp); 110static int flushbufqueues(struct vnode *, int, int); 111static void buf_daemon(void); 112static void bremfreel(struct buf *bp); 113#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 114 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 115static int sysctl_bufspace(SYSCTL_HANDLER_ARGS); 116#endif 117 118int vmiodirenable = TRUE; 119SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0, 120 "Use the VM system for directory writes"); 121long runningbufspace; 122SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 123 "Amount of presently outstanding async buffer io"); 124static long bufspace; 125#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 126 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 127SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD, 128 &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers"); 129#else 130SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 131 "Virtual memory used for buffers"); 132#endif 133static long maxbufspace; 134SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 135 "Maximum allowed value of bufspace (including buf_daemon)"); 136static long bufmallocspace; 137SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 138 "Amount of malloced memory for buffers"); 139static long maxbufmallocspace; 140SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0, 141 "Maximum amount of malloced memory for buffers"); 142static long lobufspace; 143SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 144 "Minimum amount of buffers we want to have"); 145long hibufspace; 146SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 147 "Maximum allowed value of bufspace (excluding buf_daemon)"); 148static int bufreusecnt; 149SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0, 150 "Number of times we have reused a buffer"); 151static int buffreekvacnt; 152SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0, 153 "Number of times we have freed the KVA space from some buffer"); 154static int bufdefragcnt; 155SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0, 156 "Number of times we have had to repeat buffer allocation to defragment"); 157static long lorunningspace; 158SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 159 "Minimum preferred space used for in-progress I/O"); 160static long hirunningspace; 161SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 162 "Maximum amount of space to use for in-progress I/O"); 163int dirtybufferflushes; 164SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes, 165 0, "Number of bdwrite to bawrite conversions to limit dirty buffers"); 166int bdwriteskip; 167SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip, 168 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk"); 169int altbufferflushes; 170SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes, 171 0, "Number of fsync flushes to limit dirty buffers"); 172static int recursiveflushes; 173SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes, 174 0, "Number of flushes skipped due to being recursive"); 175static int numdirtybuffers; 176SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0, 177 "Number of buffers that are dirty (has unwritten changes) at the moment"); 178static int lodirtybuffers; 179SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0, 180 "How many buffers we want to have free before bufdaemon can sleep"); 181static int hidirtybuffers; 182SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0, 183 "When the number of dirty buffers is considered severe"); 184int dirtybufthresh; 185SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh, 186 0, "Number of bdwrite to bawrite conversions to clear dirty buffers"); 187static int numfreebuffers; 188SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0, 189 "Number of free buffers"); 190static int lofreebuffers; 191SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0, 192 "XXX Unused"); 193static int hifreebuffers; 194SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0, 195 "XXX Complicatedly unused"); 196static int getnewbufcalls; 197SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0, 198 "Number of calls to getnewbuf"); 199static int getnewbufrestarts; 200SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0, 201 "Number of times getnewbuf has had to restart a buffer aquisition"); 202static int flushbufqtarget = 100; 203SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0, 204 "Amount of work to do in flushbufqueues when helping bufdaemon"); 205 206/* 207 * Wakeup point for bufdaemon, as well as indicator of whether it is already 208 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it 209 * is idling. 210 */ 211static int bd_request; 212 213/* 214 * This lock synchronizes access to bd_request. 215 */ 216static struct mtx bdlock; 217 218/* 219 * bogus page -- for I/O to/from partially complete buffers 220 * this is a temporary solution to the problem, but it is not 221 * really that bad. it would be better to split the buffer 222 * for input in the case of buffers partially already in memory, 223 * but the code is intricate enough already. 224 */ 225vm_page_t bogus_page; 226 227/* 228 * Synchronization (sleep/wakeup) variable for active buffer space requests. 229 * Set when wait starts, cleared prior to wakeup(). 230 * Used in runningbufwakeup() and waitrunningbufspace(). 231 */ 232static int runningbufreq; 233 234/* 235 * This lock protects the runningbufreq and synchronizes runningbufwakeup and 236 * waitrunningbufspace(). 237 */ 238static struct mtx rbreqlock; 239 240/* 241 * Synchronization (sleep/wakeup) variable for buffer requests. 242 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done 243 * by and/or. 244 * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(), 245 * getnewbuf(), and getblk(). 246 */ 247static int needsbuffer; 248 249/* 250 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it. 251 */ 252static struct mtx nblock; 253 254/* 255 * Definitions for the buffer free lists. 256 */ 257#define BUFFER_QUEUES 6 /* number of free buffer queues */ 258 259#define QUEUE_NONE 0 /* on no queue */ 260#define QUEUE_CLEAN 1 /* non-B_DELWRI buffers */ 261#define QUEUE_DIRTY 2 /* B_DELWRI buffers */ 262#define QUEUE_DIRTY_GIANT 3 /* B_DELWRI buffers that need giant */ 263#define QUEUE_EMPTYKVA 4 /* empty buffer headers w/KVA assignment */ 264#define QUEUE_EMPTY 5 /* empty buffer headers */ 265#define QUEUE_SENTINEL 1024 /* not an queue index, but mark for sentinel */ 266 267/* Queues for free buffers with various properties */ 268static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } }; 269 270/* Lock for the bufqueues */ 271static struct mtx bqlock; 272 273/* 274 * Single global constant for BUF_WMESG, to avoid getting multiple references. 275 * buf_wmesg is referred from macros. 276 */ 277const char *buf_wmesg = BUF_WMESG; 278 279#define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 280#define VFS_BIO_NEED_DIRTYFLUSH 0x02 /* waiting for dirty buffer flush */ 281#define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 282#define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 283 284#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 285 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 286static int 287sysctl_bufspace(SYSCTL_HANDLER_ARGS) 288{ 289 long lvalue; 290 int ivalue; 291 292 if (sizeof(int) == sizeof(long) || req->oldlen == sizeof(long)) 293 return (sysctl_handle_long(oidp, arg1, arg2, req)); 294 lvalue = *(long *)arg1; 295 if (lvalue > INT_MAX) 296 /* On overflow, still write out a long to trigger ENOMEM. */ 297 return (sysctl_handle_long(oidp, &lvalue, 0, req)); 298 ivalue = lvalue; 299 return (sysctl_handle_int(oidp, &ivalue, 0, req)); 300} 301#endif 302 303#ifdef DIRECTIO 304extern void ffs_rawread_setup(void); 305#endif /* DIRECTIO */ 306/* 307 * numdirtywakeup: 308 * 309 * If someone is blocked due to there being too many dirty buffers, 310 * and numdirtybuffers is now reasonable, wake them up. 311 */ 312 313static __inline void 314numdirtywakeup(int level) 315{ 316 317 if (numdirtybuffers <= level) { 318 mtx_lock(&nblock); 319 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) { 320 needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH; 321 wakeup(&needsbuffer); 322 } 323 mtx_unlock(&nblock); 324 } 325} 326 327/* 328 * bufspacewakeup: 329 * 330 * Called when buffer space is potentially available for recovery. 331 * getnewbuf() will block on this flag when it is unable to free 332 * sufficient buffer space. Buffer space becomes recoverable when 333 * bp's get placed back in the queues. 334 */ 335 336static __inline void 337bufspacewakeup(void) 338{ 339 340 /* 341 * If someone is waiting for BUF space, wake them up. Even 342 * though we haven't freed the kva space yet, the waiting 343 * process will be able to now. 344 */ 345 mtx_lock(&nblock); 346 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 347 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 348 wakeup(&needsbuffer); 349 } 350 mtx_unlock(&nblock); 351} 352 353/* 354 * runningbufwakeup() - in-progress I/O accounting. 355 * 356 */ 357void 358runningbufwakeup(struct buf *bp) 359{ 360 361 if (bp->b_runningbufspace) { 362 atomic_subtract_long(&runningbufspace, bp->b_runningbufspace); 363 bp->b_runningbufspace = 0; 364 mtx_lock(&rbreqlock); 365 if (runningbufreq && runningbufspace <= lorunningspace) { 366 runningbufreq = 0; 367 wakeup(&runningbufreq); 368 } 369 mtx_unlock(&rbreqlock); 370 } 371} 372 373/* 374 * bufcountwakeup: 375 * 376 * Called when a buffer has been added to one of the free queues to 377 * account for the buffer and to wakeup anyone waiting for free buffers. 378 * This typically occurs when large amounts of metadata are being handled 379 * by the buffer cache ( else buffer space runs out first, usually ). 380 */ 381 382static __inline void 383bufcountwakeup(void) 384{ 385 386 atomic_add_int(&numfreebuffers, 1); 387 mtx_lock(&nblock); 388 if (needsbuffer) { 389 needsbuffer &= ~VFS_BIO_NEED_ANY; 390 if (numfreebuffers >= hifreebuffers) 391 needsbuffer &= ~VFS_BIO_NEED_FREE; 392 wakeup(&needsbuffer); 393 } 394 mtx_unlock(&nblock); 395} 396 397/* 398 * waitrunningbufspace() 399 * 400 * runningbufspace is a measure of the amount of I/O currently 401 * running. This routine is used in async-write situations to 402 * prevent creating huge backups of pending writes to a device. 403 * Only asynchronous writes are governed by this function. 404 * 405 * Reads will adjust runningbufspace, but will not block based on it. 406 * The read load has a side effect of reducing the allowed write load. 407 * 408 * This does NOT turn an async write into a sync write. It waits 409 * for earlier writes to complete and generally returns before the 410 * caller's write has reached the device. 411 */ 412void 413waitrunningbufspace(void) 414{ 415 416 mtx_lock(&rbreqlock); 417 while (runningbufspace > hirunningspace) { 418 ++runningbufreq; 419 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0); 420 } 421 mtx_unlock(&rbreqlock); 422} 423 424 425/* 426 * vfs_buf_test_cache: 427 * 428 * Called when a buffer is extended. This function clears the B_CACHE 429 * bit if the newly extended portion of the buffer does not contain 430 * valid data. 431 */ 432static __inline 433void 434vfs_buf_test_cache(struct buf *bp, 435 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 436 vm_page_t m) 437{ 438 439 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 440 if (bp->b_flags & B_CACHE) { 441 int base = (foff + off) & PAGE_MASK; 442 if (vm_page_is_valid(m, base, size) == 0) 443 bp->b_flags &= ~B_CACHE; 444 } 445} 446 447/* Wake up the buffer daemon if necessary */ 448static __inline 449void 450bd_wakeup(int dirtybuflevel) 451{ 452 453 mtx_lock(&bdlock); 454 if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) { 455 bd_request = 1; 456 wakeup(&bd_request); 457 } 458 mtx_unlock(&bdlock); 459} 460 461/* 462 * bd_speedup - speedup the buffer cache flushing code 463 */ 464 465static __inline 466void 467bd_speedup(void) 468{ 469 470 bd_wakeup(1); 471} 472 473/* 474 * Calculating buffer cache scaling values and reserve space for buffer 475 * headers. This is called during low level kernel initialization and 476 * may be called more then once. We CANNOT write to the memory area 477 * being reserved at this time. 478 */ 479caddr_t 480kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) 481{ 482 int tuned_nbuf; 483 long maxbuf; 484 485 /* 486 * physmem_est is in pages. Convert it to kilobytes (assumes 487 * PAGE_SIZE is >= 1K) 488 */ 489 physmem_est = physmem_est * (PAGE_SIZE / 1024); 490 491 /* 492 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 493 * For the first 64MB of ram nominally allocate sufficient buffers to 494 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 495 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing 496 * the buffer cache we limit the eventual kva reservation to 497 * maxbcache bytes. 498 * 499 * factor represents the 1/4 x ram conversion. 500 */ 501 if (nbuf == 0) { 502 int factor = 4 * BKVASIZE / 1024; 503 504 nbuf = 50; 505 if (physmem_est > 4096) 506 nbuf += min((physmem_est - 4096) / factor, 507 65536 / factor); 508 if (physmem_est > 65536) 509 nbuf += (physmem_est - 65536) * 2 / (factor * 5); 510 511 if (maxbcache && nbuf > maxbcache / BKVASIZE) 512 nbuf = maxbcache / BKVASIZE; 513 tuned_nbuf = 1; 514 } else 515 tuned_nbuf = 0; 516 517 /* XXX Avoid unsigned long overflows later on with maxbufspace. */ 518 maxbuf = (LONG_MAX / 3) / BKVASIZE; 519 if (nbuf > maxbuf) { 520 if (!tuned_nbuf) 521 printf("Warning: nbufs lowered from %d to %ld\n", nbuf, 522 maxbuf); 523 nbuf = maxbuf; 524 } 525 526 /* 527 * swbufs are used as temporary holders for I/O, such as paging I/O. 528 * We have no less then 16 and no more then 256. 529 */ 530 nswbuf = max(min(nbuf/4, 256), 16); 531#ifdef NSWBUF_MIN 532 if (nswbuf < NSWBUF_MIN) 533 nswbuf = NSWBUF_MIN; 534#endif 535#ifdef DIRECTIO 536 ffs_rawread_setup(); 537#endif 538 539 /* 540 * Reserve space for the buffer cache buffers 541 */ 542 swbuf = (void *)v; 543 v = (caddr_t)(swbuf + nswbuf); 544 buf = (void *)v; 545 v = (caddr_t)(buf + nbuf); 546 547 return(v); 548} 549 550/* Initialize the buffer subsystem. Called before use of any buffers. */ 551void 552bufinit(void) 553{ 554 struct buf *bp; 555 int i; 556 557 mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF); 558 mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF); 559 mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF); 560 mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); 561 562 /* next, make a null set of free lists */ 563 for (i = 0; i < BUFFER_QUEUES; i++) 564 TAILQ_INIT(&bufqueues[i]); 565 566 /* finally, initialize each buffer header and stick on empty q */ 567 for (i = 0; i < nbuf; i++) { 568 bp = &buf[i]; 569 bzero(bp, sizeof *bp); 570 bp->b_flags = B_INVAL; /* we're just an empty header */ 571 bp->b_rcred = NOCRED; 572 bp->b_wcred = NOCRED; 573 bp->b_qindex = QUEUE_EMPTY; 574 bp->b_vflags = 0; 575 bp->b_xflags = 0; 576 LIST_INIT(&bp->b_dep); 577 BUF_LOCKINIT(bp); 578 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 579 } 580 581 /* 582 * maxbufspace is the absolute maximum amount of buffer space we are 583 * allowed to reserve in KVM and in real terms. The absolute maximum 584 * is nominally used by buf_daemon. hibufspace is the nominal maximum 585 * used by most other processes. The differential is required to 586 * ensure that buf_daemon is able to run when other processes might 587 * be blocked waiting for buffer space. 588 * 589 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 590 * this may result in KVM fragmentation which is not handled optimally 591 * by the system. 592 */ 593 maxbufspace = (long)nbuf * BKVASIZE; 594 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 595 lobufspace = hibufspace - MAXBSIZE; 596 597 lorunningspace = 512 * 1024; 598 hirunningspace = 1024 * 1024; 599 600/* 601 * Limit the amount of malloc memory since it is wired permanently into 602 * the kernel space. Even though this is accounted for in the buffer 603 * allocation, we don't want the malloced region to grow uncontrolled. 604 * The malloc scheme improves memory utilization significantly on average 605 * (small) directories. 606 */ 607 maxbufmallocspace = hibufspace / 20; 608 609/* 610 * Reduce the chance of a deadlock occuring by limiting the number 611 * of delayed-write dirty buffers we allow to stack up. 612 */ 613 hidirtybuffers = nbuf / 4 + 20; 614 dirtybufthresh = hidirtybuffers * 9 / 10; 615 numdirtybuffers = 0; 616/* 617 * To support extreme low-memory systems, make sure hidirtybuffers cannot 618 * eat up all available buffer space. This occurs when our minimum cannot 619 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 620 * BKVASIZE'd (8K) buffers. 621 */ 622 while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 623 hidirtybuffers >>= 1; 624 } 625 lodirtybuffers = hidirtybuffers / 2; 626 627/* 628 * Try to keep the number of free buffers in the specified range, 629 * and give special processes (e.g. like buf_daemon) access to an 630 * emergency reserve. 631 */ 632 lofreebuffers = nbuf / 18 + 5; 633 hifreebuffers = 2 * lofreebuffers; 634 numfreebuffers = nbuf; 635 636/* 637 * Maximum number of async ops initiated per buf_daemon loop. This is 638 * somewhat of a hack at the moment, we really need to limit ourselves 639 * based on the number of bytes of I/O in-transit that were initiated 640 * from buf_daemon. 641 */ 642 643 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 644 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 645} 646 647/* 648 * bfreekva() - free the kva allocation for a buffer. 649 * 650 * Since this call frees up buffer space, we call bufspacewakeup(). 651 */ 652static void 653bfreekva(struct buf *bp) 654{ 655 656 if (bp->b_kvasize) { 657 atomic_add_int(&buffreekvacnt, 1); 658 atomic_subtract_long(&bufspace, bp->b_kvasize); 659 vm_map_remove(buffer_map, (vm_offset_t) bp->b_kvabase, 660 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 661 bp->b_kvasize = 0; 662 bufspacewakeup(); 663 } 664} 665 666/* 667 * bremfree: 668 * 669 * Mark the buffer for removal from the appropriate free list in brelse. 670 * 671 */ 672void 673bremfree(struct buf *bp) 674{ 675 676 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 677 KASSERT((bp->b_flags & B_REMFREE) == 0, 678 ("bremfree: buffer %p already marked for delayed removal.", bp)); 679 KASSERT(bp->b_qindex != QUEUE_NONE, 680 ("bremfree: buffer %p not on a queue.", bp)); 681 BUF_ASSERT_HELD(bp); 682 683 bp->b_flags |= B_REMFREE; 684 /* Fixup numfreebuffers count. */ 685 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) 686 atomic_subtract_int(&numfreebuffers, 1); 687} 688 689/* 690 * bremfreef: 691 * 692 * Force an immediate removal from a free list. Used only in nfs when 693 * it abuses the b_freelist pointer. 694 */ 695void 696bremfreef(struct buf *bp) 697{ 698 mtx_lock(&bqlock); 699 bremfreel(bp); 700 mtx_unlock(&bqlock); 701} 702 703/* 704 * bremfreel: 705 * 706 * Removes a buffer from the free list, must be called with the 707 * bqlock held. 708 */ 709static void 710bremfreel(struct buf *bp) 711{ 712 CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X", 713 bp, bp->b_vp, bp->b_flags); 714 KASSERT(bp->b_qindex != QUEUE_NONE, 715 ("bremfreel: buffer %p not on a queue.", bp)); 716 BUF_ASSERT_HELD(bp); 717 mtx_assert(&bqlock, MA_OWNED); 718 719 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 720 bp->b_qindex = QUEUE_NONE; 721 /* 722 * If this was a delayed bremfree() we only need to remove the buffer 723 * from the queue and return the stats are already done. 724 */ 725 if (bp->b_flags & B_REMFREE) { 726 bp->b_flags &= ~B_REMFREE; 727 return; 728 } 729 /* 730 * Fixup numfreebuffers count. If the buffer is invalid or not 731 * delayed-write, the buffer was free and we must decrement 732 * numfreebuffers. 733 */ 734 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) 735 atomic_subtract_int(&numfreebuffers, 1); 736} 737 738 739/* 740 * Get a buffer with the specified data. Look in the cache first. We 741 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 742 * is set, the buffer is valid and we do not have to do anything ( see 743 * getblk() ). This is really just a special case of breadn(). 744 */ 745int 746bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 747 struct buf **bpp) 748{ 749 750 return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp)); 751} 752 753/* 754 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must 755 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set, 756 * the buffer is valid and we do not have to do anything. 757 */ 758void 759breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, 760 int cnt, struct ucred * cred) 761{ 762 struct buf *rabp; 763 int i; 764 765 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 766 if (inmem(vp, *rablkno)) 767 continue; 768 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0); 769 770 if ((rabp->b_flags & B_CACHE) == 0) { 771 if (!TD_IS_IDLETHREAD(curthread)) 772 curthread->td_ru.ru_inblock++; 773 rabp->b_flags |= B_ASYNC; 774 rabp->b_flags &= ~B_INVAL; 775 rabp->b_ioflags &= ~BIO_ERROR; 776 rabp->b_iocmd = BIO_READ; 777 if (rabp->b_rcred == NOCRED && cred != NOCRED) 778 rabp->b_rcred = crhold(cred); 779 vfs_busy_pages(rabp, 0); 780 BUF_KERNPROC(rabp); 781 rabp->b_iooffset = dbtob(rabp->b_blkno); 782 bstrategy(rabp); 783 } else { 784 brelse(rabp); 785 } 786 } 787} 788 789/* 790 * Operates like bread, but also starts asynchronous I/O on 791 * read-ahead blocks. 792 */ 793int 794breadn(struct vnode * vp, daddr_t blkno, int size, 795 daddr_t * rablkno, int *rabsize, 796 int cnt, struct ucred * cred, struct buf **bpp) 797{ 798 struct buf *bp; 799 int rv = 0, readwait = 0; 800 801 CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size); 802 *bpp = bp = getblk(vp, blkno, size, 0, 0, 0); 803 804 /* if not found in cache, do some I/O */ 805 if ((bp->b_flags & B_CACHE) == 0) { 806 if (!TD_IS_IDLETHREAD(curthread)) 807 curthread->td_ru.ru_inblock++; 808 bp->b_iocmd = BIO_READ; 809 bp->b_flags &= ~B_INVAL; 810 bp->b_ioflags &= ~BIO_ERROR; 811 if (bp->b_rcred == NOCRED && cred != NOCRED) 812 bp->b_rcred = crhold(cred); 813 vfs_busy_pages(bp, 0); 814 bp->b_iooffset = dbtob(bp->b_blkno); 815 bstrategy(bp); 816 ++readwait; 817 } 818 819 breada(vp, rablkno, rabsize, cnt, cred); 820 821 if (readwait) { 822 rv = bufwait(bp); 823 } 824 return (rv); 825} 826 827/* 828 * Write, release buffer on completion. (Done by iodone 829 * if async). Do not bother writing anything if the buffer 830 * is invalid. 831 * 832 * Note that we set B_CACHE here, indicating that buffer is 833 * fully valid and thus cacheable. This is true even of NFS 834 * now so we set it generally. This could be set either here 835 * or in biodone() since the I/O is synchronous. We put it 836 * here. 837 */ 838int 839bufwrite(struct buf *bp) 840{ 841 int oldflags; 842 struct vnode *vp; 843 int vp_md; 844 845 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 846 if (bp->b_flags & B_INVAL) { 847 brelse(bp); 848 return (0); 849 } 850 851 oldflags = bp->b_flags; 852 853 BUF_ASSERT_HELD(bp); 854 855 if (bp->b_pin_count > 0) 856 bunpin_wait(bp); 857 858 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG), 859 ("FFS background buffer should not get here %p", bp)); 860 861 vp = bp->b_vp; 862 if (vp) 863 vp_md = vp->v_vflag & VV_MD; 864 else 865 vp_md = 0; 866 867 /* Mark the buffer clean */ 868 bundirty(bp); 869 870 bp->b_flags &= ~B_DONE; 871 bp->b_ioflags &= ~BIO_ERROR; 872 bp->b_flags |= B_CACHE; 873 bp->b_iocmd = BIO_WRITE; 874 875 bufobj_wref(bp->b_bufobj); 876 vfs_busy_pages(bp, 1); 877 878 /* 879 * Normal bwrites pipeline writes 880 */ 881 bp->b_runningbufspace = bp->b_bufsize; 882 atomic_add_long(&runningbufspace, bp->b_runningbufspace); 883 884 if (!TD_IS_IDLETHREAD(curthread)) 885 curthread->td_ru.ru_oublock++; 886 if (oldflags & B_ASYNC) 887 BUF_KERNPROC(bp); 888 bp->b_iooffset = dbtob(bp->b_blkno); 889 bstrategy(bp); 890 891 if ((oldflags & B_ASYNC) == 0) { 892 int rtval = bufwait(bp); 893 brelse(bp); 894 return (rtval); 895 } else { 896 /* 897 * don't allow the async write to saturate the I/O 898 * system. We will not deadlock here because 899 * we are blocking waiting for I/O that is already in-progress 900 * to complete. We do not block here if it is the update 901 * or syncer daemon trying to clean up as that can lead 902 * to deadlock. 903 */ 904 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md) 905 waitrunningbufspace(); 906 } 907 908 return (0); 909} 910 911void 912bufbdflush(struct bufobj *bo, struct buf *bp) 913{ 914 struct buf *nbp; 915 916 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) { 917 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread); 918 altbufferflushes++; 919 } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) { 920 BO_LOCK(bo); 921 /* 922 * Try to find a buffer to flush. 923 */ 924 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) { 925 if ((nbp->b_vflags & BV_BKGRDINPROG) || 926 BUF_LOCK(nbp, 927 LK_EXCLUSIVE | LK_NOWAIT, NULL)) 928 continue; 929 if (bp == nbp) 930 panic("bdwrite: found ourselves"); 931 BO_UNLOCK(bo); 932 /* Don't countdeps with the bo lock held. */ 933 if (buf_countdeps(nbp, 0)) { 934 BO_LOCK(bo); 935 BUF_UNLOCK(nbp); 936 continue; 937 } 938 if (nbp->b_flags & B_CLUSTEROK) { 939 vfs_bio_awrite(nbp); 940 } else { 941 bremfree(nbp); 942 bawrite(nbp); 943 } 944 dirtybufferflushes++; 945 break; 946 } 947 if (nbp == NULL) 948 BO_UNLOCK(bo); 949 } 950} 951 952/* 953 * Delayed write. (Buffer is marked dirty). Do not bother writing 954 * anything if the buffer is marked invalid. 955 * 956 * Note that since the buffer must be completely valid, we can safely 957 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 958 * biodone() in order to prevent getblk from writing the buffer 959 * out synchronously. 960 */ 961void 962bdwrite(struct buf *bp) 963{ 964 struct thread *td = curthread; 965 struct vnode *vp; 966 struct bufobj *bo; 967 968 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 969 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 970 BUF_ASSERT_HELD(bp); 971 972 if (bp->b_flags & B_INVAL) { 973 brelse(bp); 974 return; 975 } 976 977 /* 978 * If we have too many dirty buffers, don't create any more. 979 * If we are wildly over our limit, then force a complete 980 * cleanup. Otherwise, just keep the situation from getting 981 * out of control. Note that we have to avoid a recursive 982 * disaster and not try to clean up after our own cleanup! 983 */ 984 vp = bp->b_vp; 985 bo = bp->b_bufobj; 986 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) { 987 td->td_pflags |= TDP_INBDFLUSH; 988 BO_BDFLUSH(bo, bp); 989 td->td_pflags &= ~TDP_INBDFLUSH; 990 } else 991 recursiveflushes++; 992 993 bdirty(bp); 994 /* 995 * Set B_CACHE, indicating that the buffer is fully valid. This is 996 * true even of NFS now. 997 */ 998 bp->b_flags |= B_CACHE; 999 1000 /* 1001 * This bmap keeps the system from needing to do the bmap later, 1002 * perhaps when the system is attempting to do a sync. Since it 1003 * is likely that the indirect block -- or whatever other datastructure 1004 * that the filesystem needs is still in memory now, it is a good 1005 * thing to do this. Note also, that if the pageout daemon is 1006 * requesting a sync -- there might not be enough memory to do 1007 * the bmap then... So, this is important to do. 1008 */ 1009 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) { 1010 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 1011 } 1012 1013 /* 1014 * Set the *dirty* buffer range based upon the VM system dirty pages. 1015 */ 1016 vfs_setdirty(bp); 1017 1018 /* 1019 * We need to do this here to satisfy the vnode_pager and the 1020 * pageout daemon, so that it thinks that the pages have been 1021 * "cleaned". Note that since the pages are in a delayed write 1022 * buffer -- the VFS layer "will" see that the pages get written 1023 * out on the next sync, or perhaps the cluster will be completed. 1024 */ 1025 vfs_clean_pages(bp); 1026 bqrelse(bp); 1027 1028 /* 1029 * Wakeup the buffer flushing daemon if we have a lot of dirty 1030 * buffers (midpoint between our recovery point and our stall 1031 * point). 1032 */ 1033 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2); 1034 1035 /* 1036 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1037 * due to the softdep code. 1038 */ 1039} 1040 1041/* 1042 * bdirty: 1043 * 1044 * Turn buffer into delayed write request. We must clear BIO_READ and 1045 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 1046 * itself to properly update it in the dirty/clean lists. We mark it 1047 * B_DONE to ensure that any asynchronization of the buffer properly 1048 * clears B_DONE ( else a panic will occur later ). 1049 * 1050 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 1051 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 1052 * should only be called if the buffer is known-good. 1053 * 1054 * Since the buffer is not on a queue, we do not update the numfreebuffers 1055 * count. 1056 * 1057 * The buffer must be on QUEUE_NONE. 1058 */ 1059void 1060bdirty(struct buf *bp) 1061{ 1062 1063 CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X", 1064 bp, bp->b_vp, bp->b_flags); 1065 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1066 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1067 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1068 BUF_ASSERT_HELD(bp); 1069 bp->b_flags &= ~(B_RELBUF); 1070 bp->b_iocmd = BIO_WRITE; 1071 1072 if ((bp->b_flags & B_DELWRI) == 0) { 1073 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI; 1074 reassignbuf(bp); 1075 atomic_add_int(&numdirtybuffers, 1); 1076 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2); 1077 } 1078} 1079 1080/* 1081 * bundirty: 1082 * 1083 * Clear B_DELWRI for buffer. 1084 * 1085 * Since the buffer is not on a queue, we do not update the numfreebuffers 1086 * count. 1087 * 1088 * The buffer must be on QUEUE_NONE. 1089 */ 1090 1091void 1092bundirty(struct buf *bp) 1093{ 1094 1095 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1096 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1097 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1098 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1099 BUF_ASSERT_HELD(bp); 1100 1101 if (bp->b_flags & B_DELWRI) { 1102 bp->b_flags &= ~B_DELWRI; 1103 reassignbuf(bp); 1104 atomic_subtract_int(&numdirtybuffers, 1); 1105 numdirtywakeup(lodirtybuffers); 1106 } 1107 /* 1108 * Since it is now being written, we can clear its deferred write flag. 1109 */ 1110 bp->b_flags &= ~B_DEFERRED; 1111} 1112 1113/* 1114 * bawrite: 1115 * 1116 * Asynchronous write. Start output on a buffer, but do not wait for 1117 * it to complete. The buffer is released when the output completes. 1118 * 1119 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1120 * B_INVAL buffers. Not us. 1121 */ 1122void 1123bawrite(struct buf *bp) 1124{ 1125 1126 bp->b_flags |= B_ASYNC; 1127 (void) bwrite(bp); 1128} 1129 1130/* 1131 * bwillwrite: 1132 * 1133 * Called prior to the locking of any vnodes when we are expecting to 1134 * write. We do not want to starve the buffer cache with too many 1135 * dirty buffers so we block here. By blocking prior to the locking 1136 * of any vnodes we attempt to avoid the situation where a locked vnode 1137 * prevents the various system daemons from flushing related buffers. 1138 */ 1139 1140void 1141bwillwrite(void) 1142{ 1143 1144 if (numdirtybuffers >= hidirtybuffers) { 1145 mtx_lock(&nblock); 1146 while (numdirtybuffers >= hidirtybuffers) { 1147 bd_wakeup(1); 1148 needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH; 1149 msleep(&needsbuffer, &nblock, 1150 (PRIBIO + 4), "flswai", 0); 1151 } 1152 mtx_unlock(&nblock); 1153 } 1154} 1155 1156/* 1157 * Return true if we have too many dirty buffers. 1158 */ 1159int 1160buf_dirty_count_severe(void) 1161{ 1162 1163 return(numdirtybuffers >= hidirtybuffers); 1164} 1165 1166/* 1167 * brelse: 1168 * 1169 * Release a busy buffer and, if requested, free its resources. The 1170 * buffer will be stashed in the appropriate bufqueue[] allowing it 1171 * to be accessed later as a cache entity or reused for other purposes. 1172 */ 1173void 1174brelse(struct buf *bp) 1175{ 1176 CTR3(KTR_BUF, "brelse(%p) vp %p flags %X", 1177 bp, bp->b_vp, bp->b_flags); 1178 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1179 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1180 1181 if (bp->b_flags & B_MANAGED) { 1182 bqrelse(bp); 1183 return; 1184 } 1185 1186 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) && 1187 bp->b_error == EIO && !(bp->b_flags & B_INVAL)) { 1188 /* 1189 * Failed write, redirty. Must clear BIO_ERROR to prevent 1190 * pages from being scrapped. If the error is anything 1191 * other than an I/O error (EIO), assume that retryingi 1192 * is futile. 1193 */ 1194 bp->b_ioflags &= ~BIO_ERROR; 1195 bdirty(bp); 1196 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || 1197 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) { 1198 /* 1199 * Either a failed I/O or we were asked to free or not 1200 * cache the buffer. 1201 */ 1202 bp->b_flags |= B_INVAL; 1203 if (!LIST_EMPTY(&bp->b_dep)) 1204 buf_deallocate(bp); 1205 if (bp->b_flags & B_DELWRI) { 1206 atomic_subtract_int(&numdirtybuffers, 1); 1207 numdirtywakeup(lodirtybuffers); 1208 } 1209 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1210 if ((bp->b_flags & B_VMIO) == 0) { 1211 if (bp->b_bufsize) 1212 allocbuf(bp, 0); 1213 if (bp->b_vp) 1214 brelvp(bp); 1215 } 1216 } 1217 1218 /* 1219 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 1220 * is called with B_DELWRI set, the underlying pages may wind up 1221 * getting freed causing a previous write (bdwrite()) to get 'lost' 1222 * because pages associated with a B_DELWRI bp are marked clean. 1223 * 1224 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1225 * if B_DELWRI is set. 1226 * 1227 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1228 * on pages to return pages to the VM page queues. 1229 */ 1230 if (bp->b_flags & B_DELWRI) 1231 bp->b_flags &= ~B_RELBUF; 1232 else if (vm_page_count_severe()) { 1233 /* 1234 * The locking of the BO_LOCK is not necessary since 1235 * BKGRDINPROG cannot be set while we hold the buf 1236 * lock, it can only be cleared if it is already 1237 * pending. 1238 */ 1239 if (bp->b_vp) { 1240 if (!(bp->b_vflags & BV_BKGRDINPROG)) 1241 bp->b_flags |= B_RELBUF; 1242 } else 1243 bp->b_flags |= B_RELBUF; 1244 } 1245 1246 /* 1247 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 1248 * constituted, not even NFS buffers now. Two flags effect this. If 1249 * B_INVAL, the struct buf is invalidated but the VM object is kept 1250 * around ( i.e. so it is trivial to reconstitute the buffer later ). 1251 * 1252 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be 1253 * invalidated. BIO_ERROR cannot be set for a failed write unless the 1254 * buffer is also B_INVAL because it hits the re-dirtying code above. 1255 * 1256 * Normally we can do this whether a buffer is B_DELWRI or not. If 1257 * the buffer is an NFS buffer, it is tracking piecemeal writes or 1258 * the commit state and we cannot afford to lose the buffer. If the 1259 * buffer has a background write in progress, we need to keep it 1260 * around to prevent it from being reconstituted and starting a second 1261 * background write. 1262 */ 1263 if ((bp->b_flags & B_VMIO) 1264 && !(bp->b_vp->v_mount != NULL && 1265 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 1266 !vn_isdisk(bp->b_vp, NULL) && 1267 (bp->b_flags & B_DELWRI)) 1268 ) { 1269 1270 int i, j, resid; 1271 vm_page_t m; 1272 off_t foff; 1273 vm_pindex_t poff; 1274 vm_object_t obj; 1275 1276 obj = bp->b_bufobj->bo_object; 1277 1278 /* 1279 * Get the base offset and length of the buffer. Note that 1280 * in the VMIO case if the buffer block size is not 1281 * page-aligned then b_data pointer may not be page-aligned. 1282 * But our b_pages[] array *IS* page aligned. 1283 * 1284 * block sizes less then DEV_BSIZE (usually 512) are not 1285 * supported due to the page granularity bits (m->valid, 1286 * m->dirty, etc...). 1287 * 1288 * See man buf(9) for more information 1289 */ 1290 resid = bp->b_bufsize; 1291 foff = bp->b_offset; 1292 VM_OBJECT_LOCK(obj); 1293 for (i = 0; i < bp->b_npages; i++) { 1294 int had_bogus = 0; 1295 1296 m = bp->b_pages[i]; 1297 1298 /* 1299 * If we hit a bogus page, fixup *all* the bogus pages 1300 * now. 1301 */ 1302 if (m == bogus_page) { 1303 poff = OFF_TO_IDX(bp->b_offset); 1304 had_bogus = 1; 1305 1306 for (j = i; j < bp->b_npages; j++) { 1307 vm_page_t mtmp; 1308 mtmp = bp->b_pages[j]; 1309 if (mtmp == bogus_page) { 1310 mtmp = vm_page_lookup(obj, poff + j); 1311 if (!mtmp) { 1312 panic("brelse: page missing\n"); 1313 } 1314 bp->b_pages[j] = mtmp; 1315 } 1316 } 1317 1318 if ((bp->b_flags & B_INVAL) == 0) { 1319 pmap_qenter( 1320 trunc_page((vm_offset_t)bp->b_data), 1321 bp->b_pages, bp->b_npages); 1322 } 1323 m = bp->b_pages[i]; 1324 } 1325 if ((bp->b_flags & B_NOCACHE) || 1326 (bp->b_ioflags & BIO_ERROR)) { 1327 int poffset = foff & PAGE_MASK; 1328 int presid = resid > (PAGE_SIZE - poffset) ? 1329 (PAGE_SIZE - poffset) : resid; 1330 1331 KASSERT(presid >= 0, ("brelse: extra page")); 1332 vm_page_lock_queues(); 1333 vm_page_set_invalid(m, poffset, presid); 1334 vm_page_unlock_queues(); 1335 if (had_bogus) 1336 printf("avoided corruption bug in bogus_page/brelse code\n"); 1337 } 1338 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1339 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1340 } 1341 VM_OBJECT_UNLOCK(obj); 1342 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1343 vfs_vmio_release(bp); 1344 1345 } else if (bp->b_flags & B_VMIO) { 1346 1347 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1348 vfs_vmio_release(bp); 1349 } 1350 1351 } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) { 1352 if (bp->b_bufsize != 0) 1353 allocbuf(bp, 0); 1354 if (bp->b_vp != NULL) 1355 brelvp(bp); 1356 } 1357 1358 if (BUF_LOCKRECURSED(bp)) { 1359 /* do not release to free list */ 1360 BUF_UNLOCK(bp); 1361 return; 1362 } 1363 1364 /* enqueue */ 1365 mtx_lock(&bqlock); 1366 /* Handle delayed bremfree() processing. */ 1367 if (bp->b_flags & B_REMFREE) 1368 bremfreel(bp); 1369 if (bp->b_qindex != QUEUE_NONE) 1370 panic("brelse: free buffer onto another queue???"); 1371 1372 /* 1373 * If the buffer has junk contents signal it and eventually 1374 * clean up B_DELWRI and diassociate the vnode so that gbincore() 1375 * doesn't find it. 1376 */ 1377 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 || 1378 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0) 1379 bp->b_flags |= B_INVAL; 1380 if (bp->b_flags & B_INVAL) { 1381 if (bp->b_flags & B_DELWRI) 1382 bundirty(bp); 1383 if (bp->b_vp) 1384 brelvp(bp); 1385 } 1386 1387 /* buffers with no memory */ 1388 if (bp->b_bufsize == 0) { 1389 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1390 if (bp->b_vflags & BV_BKGRDINPROG) 1391 panic("losing buffer 1"); 1392 if (bp->b_kvasize) { 1393 bp->b_qindex = QUEUE_EMPTYKVA; 1394 } else { 1395 bp->b_qindex = QUEUE_EMPTY; 1396 } 1397 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1398 /* buffers with junk contents */ 1399 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || 1400 (bp->b_ioflags & BIO_ERROR)) { 1401 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1402 if (bp->b_vflags & BV_BKGRDINPROG) 1403 panic("losing buffer 2"); 1404 bp->b_qindex = QUEUE_CLEAN; 1405 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1406 /* remaining buffers */ 1407 } else { 1408 if ((bp->b_flags & (B_DELWRI|B_NEEDSGIANT)) == 1409 (B_DELWRI|B_NEEDSGIANT)) 1410 bp->b_qindex = QUEUE_DIRTY_GIANT; 1411 else if (bp->b_flags & B_DELWRI) 1412 bp->b_qindex = QUEUE_DIRTY; 1413 else 1414 bp->b_qindex = QUEUE_CLEAN; 1415 if (bp->b_flags & B_AGE) 1416 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1417 else 1418 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 1419 } 1420 mtx_unlock(&bqlock); 1421 1422 /* 1423 * Fixup numfreebuffers count. The bp is on an appropriate queue 1424 * unless locked. We then bump numfreebuffers if it is not B_DELWRI. 1425 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1426 * if B_INVAL is set ). 1427 */ 1428 1429 if (!(bp->b_flags & B_DELWRI)) 1430 bufcountwakeup(); 1431 1432 /* 1433 * Something we can maybe free or reuse 1434 */ 1435 if (bp->b_bufsize || bp->b_kvasize) 1436 bufspacewakeup(); 1437 1438 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT); 1439 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 1440 panic("brelse: not dirty"); 1441 /* unlock */ 1442 BUF_UNLOCK(bp); 1443} 1444 1445/* 1446 * Release a buffer back to the appropriate queue but do not try to free 1447 * it. The buffer is expected to be used again soon. 1448 * 1449 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1450 * biodone() to requeue an async I/O on completion. It is also used when 1451 * known good buffers need to be requeued but we think we may need the data 1452 * again soon. 1453 * 1454 * XXX we should be able to leave the B_RELBUF hint set on completion. 1455 */ 1456void 1457bqrelse(struct buf *bp) 1458{ 1459 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1460 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1461 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1462 1463 if (BUF_LOCKRECURSED(bp)) { 1464 /* do not release to free list */ 1465 BUF_UNLOCK(bp); 1466 return; 1467 } 1468 1469 if (bp->b_flags & B_MANAGED) { 1470 if (bp->b_flags & B_REMFREE) { 1471 mtx_lock(&bqlock); 1472 bremfreel(bp); 1473 mtx_unlock(&bqlock); 1474 } 1475 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1476 BUF_UNLOCK(bp); 1477 return; 1478 } 1479 1480 mtx_lock(&bqlock); 1481 /* Handle delayed bremfree() processing. */ 1482 if (bp->b_flags & B_REMFREE) 1483 bremfreel(bp); 1484 if (bp->b_qindex != QUEUE_NONE) 1485 panic("bqrelse: free buffer onto another queue???"); 1486 /* buffers with stale but valid contents */ 1487 if (bp->b_flags & B_DELWRI) { 1488 if (bp->b_flags & B_NEEDSGIANT) 1489 bp->b_qindex = QUEUE_DIRTY_GIANT; 1490 else 1491 bp->b_qindex = QUEUE_DIRTY; 1492 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 1493 } else { 1494 /* 1495 * The locking of the BO_LOCK for checking of the 1496 * BV_BKGRDINPROG is not necessary since the 1497 * BV_BKGRDINPROG cannot be set while we hold the buf 1498 * lock, it can only be cleared if it is already 1499 * pending. 1500 */ 1501 if (!vm_page_count_severe() || (bp->b_vflags & BV_BKGRDINPROG)) { 1502 bp->b_qindex = QUEUE_CLEAN; 1503 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, 1504 b_freelist); 1505 } else { 1506 /* 1507 * We are too low on memory, we have to try to free 1508 * the buffer (most importantly: the wired pages 1509 * making up its backing store) *now*. 1510 */ 1511 mtx_unlock(&bqlock); 1512 brelse(bp); 1513 return; 1514 } 1515 } 1516 mtx_unlock(&bqlock); 1517 1518 if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 1519 bufcountwakeup(); 1520 1521 /* 1522 * Something we can maybe free or reuse. 1523 */ 1524 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1525 bufspacewakeup(); 1526 1527 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1528 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 1529 panic("bqrelse: not dirty"); 1530 /* unlock */ 1531 BUF_UNLOCK(bp); 1532} 1533 1534/* Give pages used by the bp back to the VM system (where possible) */ 1535static void 1536vfs_vmio_release(struct buf *bp) 1537{ 1538 int i; 1539 vm_page_t m; 1540 1541 VM_OBJECT_LOCK(bp->b_bufobj->bo_object); 1542 vm_page_lock_queues(); 1543 for (i = 0; i < bp->b_npages; i++) { 1544 m = bp->b_pages[i]; 1545 bp->b_pages[i] = NULL; 1546 /* 1547 * In order to keep page LRU ordering consistent, put 1548 * everything on the inactive queue. 1549 */ 1550 vm_page_unwire(m, 0); 1551 /* 1552 * We don't mess with busy pages, it is 1553 * the responsibility of the process that 1554 * busied the pages to deal with them. 1555 */ 1556 if ((m->oflags & VPO_BUSY) || (m->busy != 0)) 1557 continue; 1558 1559 if (m->wire_count == 0) { 1560 /* 1561 * Might as well free the page if we can and it has 1562 * no valid data. We also free the page if the 1563 * buffer was used for direct I/O 1564 */ 1565 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && 1566 m->hold_count == 0) { 1567 vm_page_free(m); 1568 } else if (bp->b_flags & B_DIRECT) { 1569 vm_page_try_to_free(m); 1570 } else if (vm_page_count_severe()) { 1571 vm_page_try_to_cache(m); 1572 } 1573 } 1574 } 1575 vm_page_unlock_queues(); 1576 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); 1577 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 1578 1579 if (bp->b_bufsize) { 1580 bufspacewakeup(); 1581 bp->b_bufsize = 0; 1582 } 1583 bp->b_npages = 0; 1584 bp->b_flags &= ~B_VMIO; 1585 if (bp->b_vp) 1586 brelvp(bp); 1587} 1588 1589/* 1590 * Check to see if a block at a particular lbn is available for a clustered 1591 * write. 1592 */ 1593static int 1594vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno) 1595{ 1596 struct buf *bpa; 1597 int match; 1598 1599 match = 0; 1600 1601 /* If the buf isn't in core skip it */ 1602 if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL) 1603 return (0); 1604 1605 /* If the buf is busy we don't want to wait for it */ 1606 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 1607 return (0); 1608 1609 /* Only cluster with valid clusterable delayed write buffers */ 1610 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) != 1611 (B_DELWRI | B_CLUSTEROK)) 1612 goto done; 1613 1614 if (bpa->b_bufsize != size) 1615 goto done; 1616 1617 /* 1618 * Check to see if it is in the expected place on disk and that the 1619 * block has been mapped. 1620 */ 1621 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno)) 1622 match = 1; 1623done: 1624 BUF_UNLOCK(bpa); 1625 return (match); 1626} 1627 1628/* 1629 * vfs_bio_awrite: 1630 * 1631 * Implement clustered async writes for clearing out B_DELWRI buffers. 1632 * This is much better then the old way of writing only one buffer at 1633 * a time. Note that we may not be presented with the buffers in the 1634 * correct order, so we search for the cluster in both directions. 1635 */ 1636int 1637vfs_bio_awrite(struct buf *bp) 1638{ 1639 struct bufobj *bo; 1640 int i; 1641 int j; 1642 daddr_t lblkno = bp->b_lblkno; 1643 struct vnode *vp = bp->b_vp; 1644 int ncl; 1645 int nwritten; 1646 int size; 1647 int maxcl; 1648 1649 bo = &vp->v_bufobj; 1650 /* 1651 * right now we support clustered writing only to regular files. If 1652 * we find a clusterable block we could be in the middle of a cluster 1653 * rather then at the beginning. 1654 */ 1655 if ((vp->v_type == VREG) && 1656 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1657 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1658 1659 size = vp->v_mount->mnt_stat.f_iosize; 1660 maxcl = MAXPHYS / size; 1661 1662 BO_LOCK(bo); 1663 for (i = 1; i < maxcl; i++) 1664 if (vfs_bio_clcheck(vp, size, lblkno + i, 1665 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0) 1666 break; 1667 1668 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 1669 if (vfs_bio_clcheck(vp, size, lblkno - j, 1670 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0) 1671 break; 1672 BO_UNLOCK(bo); 1673 --j; 1674 ncl = i + j; 1675 /* 1676 * this is a possible cluster write 1677 */ 1678 if (ncl != 1) { 1679 BUF_UNLOCK(bp); 1680 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl); 1681 return nwritten; 1682 } 1683 } 1684 bremfree(bp); 1685 bp->b_flags |= B_ASYNC; 1686 /* 1687 * default (old) behavior, writing out only one block 1688 * 1689 * XXX returns b_bufsize instead of b_bcount for nwritten? 1690 */ 1691 nwritten = bp->b_bufsize; 1692 (void) bwrite(bp); 1693 1694 return nwritten; 1695} 1696 1697/* 1698 * getnewbuf: 1699 * 1700 * Find and initialize a new buffer header, freeing up existing buffers 1701 * in the bufqueues as necessary. The new buffer is returned locked. 1702 * 1703 * Important: B_INVAL is not set. If the caller wishes to throw the 1704 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1705 * 1706 * We block if: 1707 * We have insufficient buffer headers 1708 * We have insufficient buffer space 1709 * buffer_map is too fragmented ( space reservation fails ) 1710 * If we have to flush dirty buffers ( but we try to avoid this ) 1711 * 1712 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1713 * Instead we ask the buf daemon to do it for us. We attempt to 1714 * avoid piecemeal wakeups of the pageout daemon. 1715 */ 1716 1717static struct buf * 1718getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize, 1719 int gbflags) 1720{ 1721 struct thread *td; 1722 struct buf *bp; 1723 struct buf *nbp; 1724 int defrag = 0; 1725 int nqindex; 1726 static int flushingbufs; 1727 1728 td = curthread; 1729 /* 1730 * We can't afford to block since we might be holding a vnode lock, 1731 * which may prevent system daemons from running. We deal with 1732 * low-memory situations by proactively returning memory and running 1733 * async I/O rather then sync I/O. 1734 */ 1735 atomic_add_int(&getnewbufcalls, 1); 1736 atomic_subtract_int(&getnewbufrestarts, 1); 1737restart: 1738 atomic_add_int(&getnewbufrestarts, 1); 1739 1740 /* 1741 * Setup for scan. If we do not have enough free buffers, 1742 * we setup a degenerate case that immediately fails. Note 1743 * that if we are specially marked process, we are allowed to 1744 * dip into our reserves. 1745 * 1746 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 1747 * 1748 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 1749 * However, there are a number of cases (defragging, reusing, ...) 1750 * where we cannot backup. 1751 */ 1752 mtx_lock(&bqlock); 1753 nqindex = QUEUE_EMPTYKVA; 1754 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 1755 1756 if (nbp == NULL) { 1757 /* 1758 * If no EMPTYKVA buffers and we are either 1759 * defragging or reusing, locate a CLEAN buffer 1760 * to free or reuse. If bufspace useage is low 1761 * skip this step so we can allocate a new buffer. 1762 */ 1763 if (defrag || bufspace >= lobufspace) { 1764 nqindex = QUEUE_CLEAN; 1765 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 1766 } 1767 1768 /* 1769 * If we could not find or were not allowed to reuse a 1770 * CLEAN buffer, check to see if it is ok to use an EMPTY 1771 * buffer. We can only use an EMPTY buffer if allocating 1772 * its KVA would not otherwise run us out of buffer space. 1773 */ 1774 if (nbp == NULL && defrag == 0 && 1775 bufspace + maxsize < hibufspace) { 1776 nqindex = QUEUE_EMPTY; 1777 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1778 } 1779 } 1780 1781 /* 1782 * Run scan, possibly freeing data and/or kva mappings on the fly 1783 * depending. 1784 */ 1785 1786 while ((bp = nbp) != NULL) { 1787 int qindex = nqindex; 1788 1789 /* 1790 * Calculate next bp ( we can only use it if we do not block 1791 * or do other fancy things ). 1792 */ 1793 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 1794 switch(qindex) { 1795 case QUEUE_EMPTY: 1796 nqindex = QUEUE_EMPTYKVA; 1797 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]))) 1798 break; 1799 /* FALLTHROUGH */ 1800 case QUEUE_EMPTYKVA: 1801 nqindex = QUEUE_CLEAN; 1802 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]))) 1803 break; 1804 /* FALLTHROUGH */ 1805 case QUEUE_CLEAN: 1806 /* 1807 * nbp is NULL. 1808 */ 1809 break; 1810 } 1811 } 1812 /* 1813 * If we are defragging then we need a buffer with 1814 * b_kvasize != 0. XXX this situation should no longer 1815 * occur, if defrag is non-zero the buffer's b_kvasize 1816 * should also be non-zero at this point. XXX 1817 */ 1818 if (defrag && bp->b_kvasize == 0) { 1819 printf("Warning: defrag empty buffer %p\n", bp); 1820 continue; 1821 } 1822 1823 /* 1824 * Start freeing the bp. This is somewhat involved. nbp 1825 * remains valid only for QUEUE_EMPTY[KVA] bp's. 1826 */ 1827 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 1828 continue; 1829 if (bp->b_vp) { 1830 BO_LOCK(bp->b_bufobj); 1831 if (bp->b_vflags & BV_BKGRDINPROG) { 1832 BO_UNLOCK(bp->b_bufobj); 1833 BUF_UNLOCK(bp); 1834 continue; 1835 } 1836 BO_UNLOCK(bp->b_bufobj); 1837 } 1838 CTR6(KTR_BUF, 1839 "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d " 1840 "queue %d (recycling)", bp, bp->b_vp, bp->b_flags, 1841 bp->b_kvasize, bp->b_bufsize, qindex); 1842 1843 /* 1844 * Sanity Checks 1845 */ 1846 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp)); 1847 1848 /* 1849 * Note: we no longer distinguish between VMIO and non-VMIO 1850 * buffers. 1851 */ 1852 1853 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex)); 1854 1855 bremfreel(bp); 1856 mtx_unlock(&bqlock); 1857 1858 if (qindex == QUEUE_CLEAN) { 1859 if (bp->b_flags & B_VMIO) { 1860 bp->b_flags &= ~B_ASYNC; 1861 vfs_vmio_release(bp); 1862 } 1863 if (bp->b_vp) 1864 brelvp(bp); 1865 } 1866 1867 /* 1868 * NOTE: nbp is now entirely invalid. We can only restart 1869 * the scan from this point on. 1870 * 1871 * Get the rest of the buffer freed up. b_kva* is still 1872 * valid after this operation. 1873 */ 1874 1875 if (bp->b_rcred != NOCRED) { 1876 crfree(bp->b_rcred); 1877 bp->b_rcred = NOCRED; 1878 } 1879 if (bp->b_wcred != NOCRED) { 1880 crfree(bp->b_wcred); 1881 bp->b_wcred = NOCRED; 1882 } 1883 if (!LIST_EMPTY(&bp->b_dep)) 1884 buf_deallocate(bp); 1885 if (bp->b_vflags & BV_BKGRDINPROG) 1886 panic("losing buffer 3"); 1887 KASSERT(bp->b_vp == NULL, 1888 ("bp: %p still has vnode %p. qindex: %d", 1889 bp, bp->b_vp, qindex)); 1890 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0, 1891 ("bp: %p still on a buffer list. xflags %X", 1892 bp, bp->b_xflags)); 1893 1894 if (bp->b_bufsize) 1895 allocbuf(bp, 0); 1896 1897 bp->b_flags = 0; 1898 bp->b_ioflags = 0; 1899 bp->b_xflags = 0; 1900 bp->b_vflags = 0; 1901 bp->b_vp = NULL; 1902 bp->b_blkno = bp->b_lblkno = 0; 1903 bp->b_offset = NOOFFSET; 1904 bp->b_iodone = 0; 1905 bp->b_error = 0; 1906 bp->b_resid = 0; 1907 bp->b_bcount = 0; 1908 bp->b_npages = 0; 1909 bp->b_dirtyoff = bp->b_dirtyend = 0; 1910 bp->b_bufobj = NULL; 1911 bp->b_pin_count = 0; 1912 bp->b_fsprivate1 = NULL; 1913 bp->b_fsprivate2 = NULL; 1914 bp->b_fsprivate3 = NULL; 1915 1916 LIST_INIT(&bp->b_dep); 1917 1918 /* 1919 * If we are defragging then free the buffer. 1920 */ 1921 if (defrag) { 1922 bp->b_flags |= B_INVAL; 1923 bfreekva(bp); 1924 brelse(bp); 1925 defrag = 0; 1926 goto restart; 1927 } 1928 1929 /* 1930 * Notify any waiters for the buffer lock about 1931 * identity change by freeing the buffer. 1932 */ 1933 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) { 1934 bp->b_flags |= B_INVAL; 1935 bfreekva(bp); 1936 brelse(bp); 1937 goto restart; 1938 } 1939 1940 /* 1941 * If we are overcomitted then recover the buffer and its 1942 * KVM space. This occurs in rare situations when multiple 1943 * processes are blocked in getnewbuf() or allocbuf(). 1944 */ 1945 if (bufspace >= hibufspace) 1946 flushingbufs = 1; 1947 if (flushingbufs && bp->b_kvasize != 0) { 1948 bp->b_flags |= B_INVAL; 1949 bfreekva(bp); 1950 brelse(bp); 1951 goto restart; 1952 } 1953 if (bufspace < lobufspace) 1954 flushingbufs = 0; 1955 break; 1956 } 1957 1958 /* 1959 * If we exhausted our list, sleep as appropriate. We may have to 1960 * wakeup various daemons and write out some dirty buffers. 1961 * 1962 * Generally we are sleeping due to insufficient buffer space. 1963 */ 1964 1965 if (bp == NULL) { 1966 int flags, norunbuf; 1967 char *waitmsg; 1968 int fl; 1969 1970 if (defrag) { 1971 flags = VFS_BIO_NEED_BUFSPACE; 1972 waitmsg = "nbufkv"; 1973 } else if (bufspace >= hibufspace) { 1974 waitmsg = "nbufbs"; 1975 flags = VFS_BIO_NEED_BUFSPACE; 1976 } else { 1977 waitmsg = "newbuf"; 1978 flags = VFS_BIO_NEED_ANY; 1979 } 1980 mtx_lock(&nblock); 1981 needsbuffer |= flags; 1982 mtx_unlock(&nblock); 1983 mtx_unlock(&bqlock); 1984 1985 bd_speedup(); /* heeeelp */ 1986 if (gbflags & GB_NOWAIT_BD) 1987 return (NULL); 1988 1989 mtx_lock(&nblock); 1990 while (needsbuffer & flags) { 1991 if (vp != NULL && (td->td_pflags & TDP_BUFNEED) == 0) { 1992 mtx_unlock(&nblock); 1993 /* 1994 * getblk() is called with a vnode 1995 * locked, and some majority of the 1996 * dirty buffers may as well belong to 1997 * the vnode. Flushing the buffers 1998 * there would make a progress that 1999 * cannot be achieved by the 2000 * buf_daemon, that cannot lock the 2001 * vnode. 2002 */ 2003 norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) | 2004 (td->td_pflags & TDP_NORUNNINGBUF); 2005 /* play bufdaemon */ 2006 td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF; 2007 fl = buf_do_flush(vp); 2008 td->td_pflags &= norunbuf; 2009 mtx_lock(&nblock); 2010 if (fl != 0) 2011 continue; 2012 if ((needsbuffer & flags) == 0) 2013 break; 2014 } 2015 if (msleep(&needsbuffer, &nblock, 2016 (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) { 2017 mtx_unlock(&nblock); 2018 return (NULL); 2019 } 2020 } 2021 mtx_unlock(&nblock); 2022 } else { 2023 /* 2024 * We finally have a valid bp. We aren't quite out of the 2025 * woods, we still have to reserve kva space. In order 2026 * to keep fragmentation sane we only allocate kva in 2027 * BKVASIZE chunks. 2028 */ 2029 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 2030 2031 if (maxsize != bp->b_kvasize) { 2032 vm_offset_t addr = 0; 2033 2034 bfreekva(bp); 2035 2036 vm_map_lock(buffer_map); 2037 if (vm_map_findspace(buffer_map, 2038 vm_map_min(buffer_map), maxsize, &addr)) { 2039 /* 2040 * Uh oh. Buffer map is to fragmented. We 2041 * must defragment the map. 2042 */ 2043 atomic_add_int(&bufdefragcnt, 1); 2044 vm_map_unlock(buffer_map); 2045 defrag = 1; 2046 bp->b_flags |= B_INVAL; 2047 brelse(bp); 2048 goto restart; 2049 } 2050 if (addr) { 2051 vm_map_insert(buffer_map, NULL, 0, 2052 addr, addr + maxsize, 2053 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 2054 2055 bp->b_kvabase = (caddr_t) addr; 2056 bp->b_kvasize = maxsize; 2057 atomic_add_long(&bufspace, bp->b_kvasize); 2058 atomic_add_int(&bufreusecnt, 1); 2059 } 2060 vm_map_unlock(buffer_map); 2061 } 2062 bp->b_saveaddr = bp->b_kvabase; 2063 bp->b_data = bp->b_saveaddr; 2064 } 2065 return(bp); 2066} 2067 2068/* 2069 * buf_daemon: 2070 * 2071 * buffer flushing daemon. Buffers are normally flushed by the 2072 * update daemon but if it cannot keep up this process starts to 2073 * take the load in an attempt to prevent getnewbuf() from blocking. 2074 */ 2075 2076static struct kproc_desc buf_kp = { 2077 "bufdaemon", 2078 buf_daemon, 2079 &bufdaemonproc 2080}; 2081SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp); 2082 2083static int 2084buf_do_flush(struct vnode *vp) 2085{ 2086 int flushed; 2087 2088 flushed = flushbufqueues(vp, QUEUE_DIRTY, 0); 2089 /* The list empty check here is slightly racy */ 2090 if (!TAILQ_EMPTY(&bufqueues[QUEUE_DIRTY_GIANT])) { 2091 mtx_lock(&Giant); 2092 flushed += flushbufqueues(vp, QUEUE_DIRTY_GIANT, 0); 2093 mtx_unlock(&Giant); 2094 } 2095 if (flushed == 0) { 2096 /* 2097 * Could not find any buffers without rollback 2098 * dependencies, so just write the first one 2099 * in the hopes of eventually making progress. 2100 */ 2101 flushbufqueues(vp, QUEUE_DIRTY, 1); 2102 if (!TAILQ_EMPTY( 2103 &bufqueues[QUEUE_DIRTY_GIANT])) { 2104 mtx_lock(&Giant); 2105 flushbufqueues(vp, QUEUE_DIRTY_GIANT, 1); 2106 mtx_unlock(&Giant); 2107 } 2108 } 2109 return (flushed); 2110} 2111 2112static void 2113buf_daemon() 2114{ 2115 2116 /* 2117 * This process needs to be suspended prior to shutdown sync. 2118 */ 2119 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc, 2120 SHUTDOWN_PRI_LAST); 2121 2122 /* 2123 * This process is allowed to take the buffer cache to the limit 2124 */ 2125 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED; 2126 mtx_lock(&bdlock); 2127 for (;;) { 2128 bd_request = 0; 2129 mtx_unlock(&bdlock); 2130 2131 kproc_suspend_check(bufdaemonproc); 2132 2133 /* 2134 * Do the flush. Limit the amount of in-transit I/O we 2135 * allow to build up, otherwise we would completely saturate 2136 * the I/O system. Wakeup any waiting processes before we 2137 * normally would so they can run in parallel with our drain. 2138 */ 2139 while (numdirtybuffers > lodirtybuffers) { 2140 if (buf_do_flush(NULL) == 0) 2141 break; 2142 uio_yield(); 2143 } 2144 2145 /* 2146 * Only clear bd_request if we have reached our low water 2147 * mark. The buf_daemon normally waits 1 second and 2148 * then incrementally flushes any dirty buffers that have 2149 * built up, within reason. 2150 * 2151 * If we were unable to hit our low water mark and couldn't 2152 * find any flushable buffers, we sleep half a second. 2153 * Otherwise we loop immediately. 2154 */ 2155 mtx_lock(&bdlock); 2156 if (numdirtybuffers <= lodirtybuffers) { 2157 /* 2158 * We reached our low water mark, reset the 2159 * request and sleep until we are needed again. 2160 * The sleep is just so the suspend code works. 2161 */ 2162 bd_request = 0; 2163 msleep(&bd_request, &bdlock, PVM, "psleep", hz); 2164 } else { 2165 /* 2166 * We couldn't find any flushable dirty buffers but 2167 * still have too many dirty buffers, we 2168 * have to sleep and try again. (rare) 2169 */ 2170 msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10); 2171 } 2172 } 2173} 2174 2175/* 2176 * flushbufqueues: 2177 * 2178 * Try to flush a buffer in the dirty queue. We must be careful to 2179 * free up B_INVAL buffers instead of write them, which NFS is 2180 * particularly sensitive to. 2181 */ 2182static int flushwithdeps = 0; 2183SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps, 2184 0, "Number of buffers flushed with dependecies that require rollbacks"); 2185 2186static int 2187flushbufqueues(struct vnode *lvp, int queue, int flushdeps) 2188{ 2189 struct buf sentinel; 2190 struct vnode *vp; 2191 struct mount *mp; 2192 struct buf *bp; 2193 int hasdeps; 2194 int flushed; 2195 int target; 2196 2197 if (lvp == NULL) { 2198 target = numdirtybuffers - lodirtybuffers; 2199 if (flushdeps && target > 2) 2200 target /= 2; 2201 } else 2202 target = flushbufqtarget; 2203 flushed = 0; 2204 bp = NULL; 2205 sentinel.b_qindex = QUEUE_SENTINEL; 2206 mtx_lock(&bqlock); 2207 TAILQ_INSERT_HEAD(&bufqueues[queue], &sentinel, b_freelist); 2208 while (flushed != target) { 2209 bp = TAILQ_NEXT(&sentinel, b_freelist); 2210 if (bp != NULL) { 2211 TAILQ_REMOVE(&bufqueues[queue], &sentinel, b_freelist); 2212 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, &sentinel, 2213 b_freelist); 2214 } else 2215 break; 2216 /* 2217 * Skip sentinels inserted by other invocations of the 2218 * flushbufqueues(), taking care to not reorder them. 2219 */ 2220 if (bp->b_qindex == QUEUE_SENTINEL) 2221 continue; 2222 /* 2223 * Only flush the buffers that belong to the 2224 * vnode locked by the curthread. 2225 */ 2226 if (lvp != NULL && bp->b_vp != lvp) 2227 continue; 2228 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2229 continue; 2230 if (bp->b_pin_count > 0) { 2231 BUF_UNLOCK(bp); 2232 continue; 2233 } 2234 BO_LOCK(bp->b_bufobj); 2235 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 || 2236 (bp->b_flags & B_DELWRI) == 0) { 2237 BO_UNLOCK(bp->b_bufobj); 2238 BUF_UNLOCK(bp); 2239 continue; 2240 } 2241 BO_UNLOCK(bp->b_bufobj); 2242 if (bp->b_flags & B_INVAL) { 2243 bremfreel(bp); 2244 mtx_unlock(&bqlock); 2245 brelse(bp); 2246 flushed++; 2247 numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2); 2248 mtx_lock(&bqlock); 2249 continue; 2250 } 2251 2252 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) { 2253 if (flushdeps == 0) { 2254 BUF_UNLOCK(bp); 2255 continue; 2256 } 2257 hasdeps = 1; 2258 } else 2259 hasdeps = 0; 2260 /* 2261 * We must hold the lock on a vnode before writing 2262 * one of its buffers. Otherwise we may confuse, or 2263 * in the case of a snapshot vnode, deadlock the 2264 * system. 2265 * 2266 * The lock order here is the reverse of the normal 2267 * of vnode followed by buf lock. This is ok because 2268 * the NOWAIT will prevent deadlock. 2269 */ 2270 vp = bp->b_vp; 2271 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2272 BUF_UNLOCK(bp); 2273 continue; 2274 } 2275 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_CANRECURSE) == 0) { 2276 mtx_unlock(&bqlock); 2277 CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X", 2278 bp, bp->b_vp, bp->b_flags); 2279 if (curproc == bufdaemonproc) 2280 vfs_bio_awrite(bp); 2281 else { 2282 bremfree(bp); 2283 bwrite(bp); 2284 } 2285 vn_finished_write(mp); 2286 VOP_UNLOCK(vp, 0); 2287 flushwithdeps += hasdeps; 2288 flushed++; 2289 2290 /* 2291 * Sleeping on runningbufspace while holding 2292 * vnode lock leads to deadlock. 2293 */ 2294 if (curproc == bufdaemonproc) 2295 waitrunningbufspace(); 2296 numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2); 2297 mtx_lock(&bqlock); 2298 continue; 2299 } 2300 vn_finished_write(mp); 2301 BUF_UNLOCK(bp); 2302 } 2303 TAILQ_REMOVE(&bufqueues[queue], &sentinel, b_freelist); 2304 mtx_unlock(&bqlock); 2305 return (flushed); 2306} 2307 2308/* 2309 * Check to see if a block is currently memory resident. 2310 */ 2311struct buf * 2312incore(struct bufobj *bo, daddr_t blkno) 2313{ 2314 struct buf *bp; 2315 2316 BO_LOCK(bo); 2317 bp = gbincore(bo, blkno); 2318 BO_UNLOCK(bo); 2319 return (bp); 2320} 2321 2322/* 2323 * Returns true if no I/O is needed to access the 2324 * associated VM object. This is like incore except 2325 * it also hunts around in the VM system for the data. 2326 */ 2327 2328static int 2329inmem(struct vnode * vp, daddr_t blkno) 2330{ 2331 vm_object_t obj; 2332 vm_offset_t toff, tinc, size; 2333 vm_page_t m; 2334 vm_ooffset_t off; 2335 2336 ASSERT_VOP_LOCKED(vp, "inmem"); 2337 2338 if (incore(&vp->v_bufobj, blkno)) 2339 return 1; 2340 if (vp->v_mount == NULL) 2341 return 0; 2342 obj = vp->v_object; 2343 if (obj == NULL) 2344 return (0); 2345 2346 size = PAGE_SIZE; 2347 if (size > vp->v_mount->mnt_stat.f_iosize) 2348 size = vp->v_mount->mnt_stat.f_iosize; 2349 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 2350 2351 VM_OBJECT_LOCK(obj); 2352 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2353 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 2354 if (!m) 2355 goto notinmem; 2356 tinc = size; 2357 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 2358 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 2359 if (vm_page_is_valid(m, 2360 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 2361 goto notinmem; 2362 } 2363 VM_OBJECT_UNLOCK(obj); 2364 return 1; 2365 2366notinmem: 2367 VM_OBJECT_UNLOCK(obj); 2368 return (0); 2369} 2370 2371/* 2372 * vfs_setdirty: 2373 * 2374 * Sets the dirty range for a buffer based on the status of the dirty 2375 * bits in the pages comprising the buffer. 2376 * 2377 * The range is limited to the size of the buffer. 2378 * 2379 * This routine is primarily used by NFS, but is generalized for the 2380 * B_VMIO case. 2381 */ 2382static void 2383vfs_setdirty(struct buf *bp) 2384{ 2385 2386 /* 2387 * Degenerate case - empty buffer 2388 */ 2389 2390 if (bp->b_bufsize == 0) 2391 return; 2392 2393 /* 2394 * We qualify the scan for modified pages on whether the 2395 * object has been flushed yet. 2396 */ 2397 2398 if ((bp->b_flags & B_VMIO) == 0) 2399 return; 2400 2401 VM_OBJECT_LOCK(bp->b_bufobj->bo_object); 2402 vfs_setdirty_locked_object(bp); 2403 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); 2404} 2405 2406static void 2407vfs_setdirty_locked_object(struct buf *bp) 2408{ 2409 vm_object_t object; 2410 int i; 2411 2412 object = bp->b_bufobj->bo_object; 2413 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2414 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 2415 vm_offset_t boffset; 2416 vm_offset_t eoffset; 2417 2418 vm_page_lock_queues(); 2419 /* 2420 * test the pages to see if they have been modified directly 2421 * by users through the VM system. 2422 */ 2423 for (i = 0; i < bp->b_npages; i++) 2424 vm_page_test_dirty(bp->b_pages[i]); 2425 2426 /* 2427 * Calculate the encompassing dirty range, boffset and eoffset, 2428 * (eoffset - boffset) bytes. 2429 */ 2430 2431 for (i = 0; i < bp->b_npages; i++) { 2432 if (bp->b_pages[i]->dirty) 2433 break; 2434 } 2435 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2436 2437 for (i = bp->b_npages - 1; i >= 0; --i) { 2438 if (bp->b_pages[i]->dirty) { 2439 break; 2440 } 2441 } 2442 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2443 2444 vm_page_unlock_queues(); 2445 /* 2446 * Fit it to the buffer. 2447 */ 2448 2449 if (eoffset > bp->b_bcount) 2450 eoffset = bp->b_bcount; 2451 2452 /* 2453 * If we have a good dirty range, merge with the existing 2454 * dirty range. 2455 */ 2456 2457 if (boffset < eoffset) { 2458 if (bp->b_dirtyoff > boffset) 2459 bp->b_dirtyoff = boffset; 2460 if (bp->b_dirtyend < eoffset) 2461 bp->b_dirtyend = eoffset; 2462 } 2463 } 2464} 2465 2466/* 2467 * getblk: 2468 * 2469 * Get a block given a specified block and offset into a file/device. 2470 * The buffers B_DONE bit will be cleared on return, making it almost 2471 * ready for an I/O initiation. B_INVAL may or may not be set on 2472 * return. The caller should clear B_INVAL prior to initiating a 2473 * READ. 2474 * 2475 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2476 * an existing buffer. 2477 * 2478 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2479 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2480 * and then cleared based on the backing VM. If the previous buffer is 2481 * non-0-sized but invalid, B_CACHE will be cleared. 2482 * 2483 * If getblk() must create a new buffer, the new buffer is returned with 2484 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2485 * case it is returned with B_INVAL clear and B_CACHE set based on the 2486 * backing VM. 2487 * 2488 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 2489 * B_CACHE bit is clear. 2490 * 2491 * What this means, basically, is that the caller should use B_CACHE to 2492 * determine whether the buffer is fully valid or not and should clear 2493 * B_INVAL prior to issuing a read. If the caller intends to validate 2494 * the buffer by loading its data area with something, the caller needs 2495 * to clear B_INVAL. If the caller does this without issuing an I/O, 2496 * the caller should set B_CACHE ( as an optimization ), else the caller 2497 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2498 * a write attempt or if it was a successfull read. If the caller 2499 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR 2500 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2501 */ 2502struct buf * 2503getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo, 2504 int flags) 2505{ 2506 struct buf *bp; 2507 struct bufobj *bo; 2508 int error; 2509 2510 CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size); 2511 ASSERT_VOP_LOCKED(vp, "getblk"); 2512 if (size > MAXBSIZE) 2513 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 2514 2515 bo = &vp->v_bufobj; 2516loop: 2517 /* 2518 * Block if we are low on buffers. Certain processes are allowed 2519 * to completely exhaust the buffer cache. 2520 * 2521 * If this check ever becomes a bottleneck it may be better to 2522 * move it into the else, when gbincore() fails. At the moment 2523 * it isn't a problem. 2524 * 2525 * XXX remove if 0 sections (clean this up after its proven) 2526 */ 2527 if (numfreebuffers == 0) { 2528 if (TD_IS_IDLETHREAD(curthread)) 2529 return NULL; 2530 mtx_lock(&nblock); 2531 needsbuffer |= VFS_BIO_NEED_ANY; 2532 mtx_unlock(&nblock); 2533 } 2534 2535 BO_LOCK(bo); 2536 bp = gbincore(bo, blkno); 2537 if (bp != NULL) { 2538 int lockflags; 2539 /* 2540 * Buffer is in-core. If the buffer is not busy, it must 2541 * be on a queue. 2542 */ 2543 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK; 2544 2545 if (flags & GB_LOCK_NOWAIT) 2546 lockflags |= LK_NOWAIT; 2547 2548 error = BUF_TIMELOCK(bp, lockflags, 2549 BO_MTX(bo), "getblk", slpflag, slptimeo); 2550 2551 /* 2552 * If we slept and got the lock we have to restart in case 2553 * the buffer changed identities. 2554 */ 2555 if (error == ENOLCK) 2556 goto loop; 2557 /* We timed out or were interrupted. */ 2558 else if (error) 2559 return (NULL); 2560 2561 /* 2562 * The buffer is locked. B_CACHE is cleared if the buffer is 2563 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set 2564 * and for a VMIO buffer B_CACHE is adjusted according to the 2565 * backing VM cache. 2566 */ 2567 if (bp->b_flags & B_INVAL) 2568 bp->b_flags &= ~B_CACHE; 2569 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 2570 bp->b_flags |= B_CACHE; 2571 bremfree(bp); 2572 2573 /* 2574 * check for size inconsistancies for non-VMIO case. 2575 */ 2576 2577 if (bp->b_bcount != size) { 2578 if ((bp->b_flags & B_VMIO) == 0 || 2579 (size > bp->b_kvasize)) { 2580 if (bp->b_flags & B_DELWRI) { 2581 /* 2582 * If buffer is pinned and caller does 2583 * not want sleep waiting for it to be 2584 * unpinned, bail out 2585 * */ 2586 if (bp->b_pin_count > 0) { 2587 if (flags & GB_LOCK_NOWAIT) { 2588 bqrelse(bp); 2589 return (NULL); 2590 } else { 2591 bunpin_wait(bp); 2592 } 2593 } 2594 bp->b_flags |= B_NOCACHE; 2595 bwrite(bp); 2596 } else { 2597 if (LIST_EMPTY(&bp->b_dep)) { 2598 bp->b_flags |= B_RELBUF; 2599 brelse(bp); 2600 } else { 2601 bp->b_flags |= B_NOCACHE; 2602 bwrite(bp); 2603 } 2604 } 2605 goto loop; 2606 } 2607 } 2608 2609 /* 2610 * If the size is inconsistant in the VMIO case, we can resize 2611 * the buffer. This might lead to B_CACHE getting set or 2612 * cleared. If the size has not changed, B_CACHE remains 2613 * unchanged from its previous state. 2614 */ 2615 2616 if (bp->b_bcount != size) 2617 allocbuf(bp, size); 2618 2619 KASSERT(bp->b_offset != NOOFFSET, 2620 ("getblk: no buffer offset")); 2621 2622 /* 2623 * A buffer with B_DELWRI set and B_CACHE clear must 2624 * be committed before we can return the buffer in 2625 * order to prevent the caller from issuing a read 2626 * ( due to B_CACHE not being set ) and overwriting 2627 * it. 2628 * 2629 * Most callers, including NFS and FFS, need this to 2630 * operate properly either because they assume they 2631 * can issue a read if B_CACHE is not set, or because 2632 * ( for example ) an uncached B_DELWRI might loop due 2633 * to softupdates re-dirtying the buffer. In the latter 2634 * case, B_CACHE is set after the first write completes, 2635 * preventing further loops. 2636 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 2637 * above while extending the buffer, we cannot allow the 2638 * buffer to remain with B_CACHE set after the write 2639 * completes or it will represent a corrupt state. To 2640 * deal with this we set B_NOCACHE to scrap the buffer 2641 * after the write. 2642 * 2643 * We might be able to do something fancy, like setting 2644 * B_CACHE in bwrite() except if B_DELWRI is already set, 2645 * so the below call doesn't set B_CACHE, but that gets real 2646 * confusing. This is much easier. 2647 */ 2648 2649 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 2650 bp->b_flags |= B_NOCACHE; 2651 bwrite(bp); 2652 goto loop; 2653 } 2654 bp->b_flags &= ~B_DONE; 2655 } else { 2656 int bsize, maxsize, vmio; 2657 off_t offset; 2658 2659 /* 2660 * Buffer is not in-core, create new buffer. The buffer 2661 * returned by getnewbuf() is locked. Note that the returned 2662 * buffer is also considered valid (not marked B_INVAL). 2663 */ 2664 BO_UNLOCK(bo); 2665 /* 2666 * If the user does not want us to create the buffer, bail out 2667 * here. 2668 */ 2669 if (flags & GB_NOCREAT) 2670 return NULL; 2671 bsize = bo->bo_bsize; 2672 offset = blkno * bsize; 2673 vmio = vp->v_object != NULL; 2674 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 2675 maxsize = imax(maxsize, bsize); 2676 2677 bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags); 2678 if (bp == NULL) { 2679 if (slpflag || slptimeo) 2680 return NULL; 2681 goto loop; 2682 } 2683 2684 /* 2685 * This code is used to make sure that a buffer is not 2686 * created while the getnewbuf routine is blocked. 2687 * This can be a problem whether the vnode is locked or not. 2688 * If the buffer is created out from under us, we have to 2689 * throw away the one we just created. 2690 * 2691 * Note: this must occur before we associate the buffer 2692 * with the vp especially considering limitations in 2693 * the splay tree implementation when dealing with duplicate 2694 * lblkno's. 2695 */ 2696 BO_LOCK(bo); 2697 if (gbincore(bo, blkno)) { 2698 BO_UNLOCK(bo); 2699 bp->b_flags |= B_INVAL; 2700 brelse(bp); 2701 goto loop; 2702 } 2703 2704 /* 2705 * Insert the buffer into the hash, so that it can 2706 * be found by incore. 2707 */ 2708 bp->b_blkno = bp->b_lblkno = blkno; 2709 bp->b_offset = offset; 2710 bgetvp(vp, bp); 2711 BO_UNLOCK(bo); 2712 2713 /* 2714 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 2715 * buffer size starts out as 0, B_CACHE will be set by 2716 * allocbuf() for the VMIO case prior to it testing the 2717 * backing store for validity. 2718 */ 2719 2720 if (vmio) { 2721 bp->b_flags |= B_VMIO; 2722#if defined(VFS_BIO_DEBUG) 2723 if (vn_canvmio(vp) != TRUE) 2724 printf("getblk: VMIO on vnode type %d\n", 2725 vp->v_type); 2726#endif 2727 KASSERT(vp->v_object == bp->b_bufobj->bo_object, 2728 ("ARGH! different b_bufobj->bo_object %p %p %p\n", 2729 bp, vp->v_object, bp->b_bufobj->bo_object)); 2730 } else { 2731 bp->b_flags &= ~B_VMIO; 2732 KASSERT(bp->b_bufobj->bo_object == NULL, 2733 ("ARGH! has b_bufobj->bo_object %p %p\n", 2734 bp, bp->b_bufobj->bo_object)); 2735 } 2736 2737 allocbuf(bp, size); 2738 bp->b_flags &= ~B_DONE; 2739 } 2740 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); 2741 BUF_ASSERT_HELD(bp); 2742 KASSERT(bp->b_bufobj == bo, 2743 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2744 return (bp); 2745} 2746 2747/* 2748 * Get an empty, disassociated buffer of given size. The buffer is initially 2749 * set to B_INVAL. 2750 */ 2751struct buf * 2752geteblk(int size, int flags) 2753{ 2754 struct buf *bp; 2755 int maxsize; 2756 2757 maxsize = (size + BKVAMASK) & ~BKVAMASK; 2758 while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) { 2759 if ((flags & GB_NOWAIT_BD) && 2760 (curthread->td_pflags & TDP_BUFNEED) != 0) 2761 return (NULL); 2762 } 2763 allocbuf(bp, size); 2764 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 2765 BUF_ASSERT_HELD(bp); 2766 return (bp); 2767} 2768 2769 2770/* 2771 * This code constitutes the buffer memory from either anonymous system 2772 * memory (in the case of non-VMIO operations) or from an associated 2773 * VM object (in the case of VMIO operations). This code is able to 2774 * resize a buffer up or down. 2775 * 2776 * Note that this code is tricky, and has many complications to resolve 2777 * deadlock or inconsistant data situations. Tread lightly!!! 2778 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 2779 * the caller. Calling this code willy nilly can result in the loss of data. 2780 * 2781 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 2782 * B_CACHE for the non-VMIO case. 2783 */ 2784 2785int 2786allocbuf(struct buf *bp, int size) 2787{ 2788 int newbsize, mbsize; 2789 int i; 2790 2791 BUF_ASSERT_HELD(bp); 2792 2793 if (bp->b_kvasize < size) 2794 panic("allocbuf: buffer too small"); 2795 2796 if ((bp->b_flags & B_VMIO) == 0) { 2797 caddr_t origbuf; 2798 int origbufsize; 2799 /* 2800 * Just get anonymous memory from the kernel. Don't 2801 * mess with B_CACHE. 2802 */ 2803 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2804 if (bp->b_flags & B_MALLOC) 2805 newbsize = mbsize; 2806 else 2807 newbsize = round_page(size); 2808 2809 if (newbsize < bp->b_bufsize) { 2810 /* 2811 * malloced buffers are not shrunk 2812 */ 2813 if (bp->b_flags & B_MALLOC) { 2814 if (newbsize) { 2815 bp->b_bcount = size; 2816 } else { 2817 free(bp->b_data, M_BIOBUF); 2818 if (bp->b_bufsize) { 2819 atomic_subtract_long( 2820 &bufmallocspace, 2821 bp->b_bufsize); 2822 bufspacewakeup(); 2823 bp->b_bufsize = 0; 2824 } 2825 bp->b_saveaddr = bp->b_kvabase; 2826 bp->b_data = bp->b_saveaddr; 2827 bp->b_bcount = 0; 2828 bp->b_flags &= ~B_MALLOC; 2829 } 2830 return 1; 2831 } 2832 vm_hold_free_pages( 2833 bp, 2834 (vm_offset_t) bp->b_data + newbsize, 2835 (vm_offset_t) bp->b_data + bp->b_bufsize); 2836 } else if (newbsize > bp->b_bufsize) { 2837 /* 2838 * We only use malloced memory on the first allocation. 2839 * and revert to page-allocated memory when the buffer 2840 * grows. 2841 */ 2842 /* 2843 * There is a potential smp race here that could lead 2844 * to bufmallocspace slightly passing the max. It 2845 * is probably extremely rare and not worth worrying 2846 * over. 2847 */ 2848 if ( (bufmallocspace < maxbufmallocspace) && 2849 (bp->b_bufsize == 0) && 2850 (mbsize <= PAGE_SIZE/2)) { 2851 2852 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 2853 bp->b_bufsize = mbsize; 2854 bp->b_bcount = size; 2855 bp->b_flags |= B_MALLOC; 2856 atomic_add_long(&bufmallocspace, mbsize); 2857 return 1; 2858 } 2859 origbuf = NULL; 2860 origbufsize = 0; 2861 /* 2862 * If the buffer is growing on its other-than-first allocation, 2863 * then we revert to the page-allocation scheme. 2864 */ 2865 if (bp->b_flags & B_MALLOC) { 2866 origbuf = bp->b_data; 2867 origbufsize = bp->b_bufsize; 2868 bp->b_data = bp->b_kvabase; 2869 if (bp->b_bufsize) { 2870 atomic_subtract_long(&bufmallocspace, 2871 bp->b_bufsize); 2872 bufspacewakeup(); 2873 bp->b_bufsize = 0; 2874 } 2875 bp->b_flags &= ~B_MALLOC; 2876 newbsize = round_page(newbsize); 2877 } 2878 vm_hold_load_pages( 2879 bp, 2880 (vm_offset_t) bp->b_data + bp->b_bufsize, 2881 (vm_offset_t) bp->b_data + newbsize); 2882 if (origbuf) { 2883 bcopy(origbuf, bp->b_data, origbufsize); 2884 free(origbuf, M_BIOBUF); 2885 } 2886 } 2887 } else { 2888 int desiredpages; 2889 2890 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2891 desiredpages = (size == 0) ? 0 : 2892 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 2893 2894 if (bp->b_flags & B_MALLOC) 2895 panic("allocbuf: VMIO buffer can't be malloced"); 2896 /* 2897 * Set B_CACHE initially if buffer is 0 length or will become 2898 * 0-length. 2899 */ 2900 if (size == 0 || bp->b_bufsize == 0) 2901 bp->b_flags |= B_CACHE; 2902 2903 if (newbsize < bp->b_bufsize) { 2904 /* 2905 * DEV_BSIZE aligned new buffer size is less then the 2906 * DEV_BSIZE aligned existing buffer size. Figure out 2907 * if we have to remove any pages. 2908 */ 2909 if (desiredpages < bp->b_npages) { 2910 vm_page_t m; 2911 2912 VM_OBJECT_LOCK(bp->b_bufobj->bo_object); 2913 vm_page_lock_queues(); 2914 for (i = desiredpages; i < bp->b_npages; i++) { 2915 /* 2916 * the page is not freed here -- it 2917 * is the responsibility of 2918 * vnode_pager_setsize 2919 */ 2920 m = bp->b_pages[i]; 2921 KASSERT(m != bogus_page, 2922 ("allocbuf: bogus page found")); 2923 while (vm_page_sleep_if_busy(m, TRUE, "biodep")) 2924 vm_page_lock_queues(); 2925 2926 bp->b_pages[i] = NULL; 2927 vm_page_unwire(m, 0); 2928 } 2929 vm_page_unlock_queues(); 2930 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); 2931 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2932 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 2933 bp->b_npages = desiredpages; 2934 } 2935 } else if (size > bp->b_bcount) { 2936 /* 2937 * We are growing the buffer, possibly in a 2938 * byte-granular fashion. 2939 */ 2940 struct vnode *vp; 2941 vm_object_t obj; 2942 vm_offset_t toff; 2943 vm_offset_t tinc; 2944 2945 /* 2946 * Step 1, bring in the VM pages from the object, 2947 * allocating them if necessary. We must clear 2948 * B_CACHE if these pages are not valid for the 2949 * range covered by the buffer. 2950 */ 2951 2952 vp = bp->b_vp; 2953 obj = bp->b_bufobj->bo_object; 2954 2955 VM_OBJECT_LOCK(obj); 2956 while (bp->b_npages < desiredpages) { 2957 vm_page_t m; 2958 vm_pindex_t pi; 2959 2960 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages; 2961 if ((m = vm_page_lookup(obj, pi)) == NULL) { 2962 /* 2963 * note: must allocate system pages 2964 * since blocking here could intefere 2965 * with paging I/O, no matter which 2966 * process we are. 2967 */ 2968 m = vm_page_alloc(obj, pi, 2969 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | 2970 VM_ALLOC_WIRED); 2971 if (m == NULL) { 2972 atomic_add_int(&vm_pageout_deficit, 2973 desiredpages - bp->b_npages); 2974 VM_OBJECT_UNLOCK(obj); 2975 VM_WAIT; 2976 VM_OBJECT_LOCK(obj); 2977 } else { 2978 if (m->valid == 0) 2979 bp->b_flags &= ~B_CACHE; 2980 bp->b_pages[bp->b_npages] = m; 2981 ++bp->b_npages; 2982 } 2983 continue; 2984 } 2985 2986 /* 2987 * We found a page. If we have to sleep on it, 2988 * retry because it might have gotten freed out 2989 * from under us. 2990 * 2991 * We can only test VPO_BUSY here. Blocking on 2992 * m->busy might lead to a deadlock: 2993 * 2994 * vm_fault->getpages->cluster_read->allocbuf 2995 * 2996 */ 2997 if (vm_page_sleep_if_busy(m, FALSE, "pgtblk")) 2998 continue; 2999 3000 /* 3001 * We have a good page. 3002 */ 3003 vm_page_lock_queues(); 3004 vm_page_wire(m); 3005 vm_page_unlock_queues(); 3006 bp->b_pages[bp->b_npages] = m; 3007 ++bp->b_npages; 3008 } 3009 3010 /* 3011 * Step 2. We've loaded the pages into the buffer, 3012 * we have to figure out if we can still have B_CACHE 3013 * set. Note that B_CACHE is set according to the 3014 * byte-granular range ( bcount and size ), new the 3015 * aligned range ( newbsize ). 3016 * 3017 * The VM test is against m->valid, which is DEV_BSIZE 3018 * aligned. Needless to say, the validity of the data 3019 * needs to also be DEV_BSIZE aligned. Note that this 3020 * fails with NFS if the server or some other client 3021 * extends the file's EOF. If our buffer is resized, 3022 * B_CACHE may remain set! XXX 3023 */ 3024 3025 toff = bp->b_bcount; 3026 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 3027 3028 while ((bp->b_flags & B_CACHE) && toff < size) { 3029 vm_pindex_t pi; 3030 3031 if (tinc > (size - toff)) 3032 tinc = size - toff; 3033 3034 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 3035 PAGE_SHIFT; 3036 3037 vfs_buf_test_cache( 3038 bp, 3039 bp->b_offset, 3040 toff, 3041 tinc, 3042 bp->b_pages[pi] 3043 ); 3044 toff += tinc; 3045 tinc = PAGE_SIZE; 3046 } 3047 VM_OBJECT_UNLOCK(obj); 3048 3049 /* 3050 * Step 3, fixup the KVM pmap. Remember that 3051 * bp->b_data is relative to bp->b_offset, but 3052 * bp->b_offset may be offset into the first page. 3053 */ 3054 3055 bp->b_data = (caddr_t) 3056 trunc_page((vm_offset_t)bp->b_data); 3057 pmap_qenter( 3058 (vm_offset_t)bp->b_data, 3059 bp->b_pages, 3060 bp->b_npages 3061 ); 3062 3063 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 3064 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 3065 } 3066 } 3067 if (newbsize < bp->b_bufsize) 3068 bufspacewakeup(); 3069 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3070 bp->b_bcount = size; /* requested buffer size */ 3071 return 1; 3072} 3073 3074void 3075biodone(struct bio *bp) 3076{ 3077 struct mtx *mtxp; 3078 void (*done)(struct bio *); 3079 3080 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3081 mtx_lock(mtxp); 3082 bp->bio_flags |= BIO_DONE; 3083 done = bp->bio_done; 3084 if (done == NULL) 3085 wakeup(bp); 3086 mtx_unlock(mtxp); 3087 if (done != NULL) 3088 done(bp); 3089} 3090 3091/* 3092 * Wait for a BIO to finish. 3093 * 3094 * XXX: resort to a timeout for now. The optimal locking (if any) for this 3095 * case is not yet clear. 3096 */ 3097int 3098biowait(struct bio *bp, const char *wchan) 3099{ 3100 struct mtx *mtxp; 3101 3102 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3103 mtx_lock(mtxp); 3104 while ((bp->bio_flags & BIO_DONE) == 0) 3105 msleep(bp, mtxp, PRIBIO, wchan, hz / 10); 3106 mtx_unlock(mtxp); 3107 if (bp->bio_error != 0) 3108 return (bp->bio_error); 3109 if (!(bp->bio_flags & BIO_ERROR)) 3110 return (0); 3111 return (EIO); 3112} 3113 3114void 3115biofinish(struct bio *bp, struct devstat *stat, int error) 3116{ 3117 3118 if (error) { 3119 bp->bio_error = error; 3120 bp->bio_flags |= BIO_ERROR; 3121 } 3122 if (stat != NULL) 3123 devstat_end_transaction_bio(stat, bp); 3124 biodone(bp); 3125} 3126 3127/* 3128 * bufwait: 3129 * 3130 * Wait for buffer I/O completion, returning error status. The buffer 3131 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR 3132 * error and cleared. 3133 */ 3134int 3135bufwait(struct buf *bp) 3136{ 3137 if (bp->b_iocmd == BIO_READ) 3138 bwait(bp, PRIBIO, "biord"); 3139 else 3140 bwait(bp, PRIBIO, "biowr"); 3141 if (bp->b_flags & B_EINTR) { 3142 bp->b_flags &= ~B_EINTR; 3143 return (EINTR); 3144 } 3145 if (bp->b_ioflags & BIO_ERROR) { 3146 return (bp->b_error ? bp->b_error : EIO); 3147 } else { 3148 return (0); 3149 } 3150} 3151 3152 /* 3153 * Call back function from struct bio back up to struct buf. 3154 */ 3155static void 3156bufdonebio(struct bio *bip) 3157{ 3158 struct buf *bp; 3159 3160 bp = bip->bio_caller2; 3161 bp->b_resid = bp->b_bcount - bip->bio_completed; 3162 bp->b_resid = bip->bio_resid; /* XXX: remove */ 3163 bp->b_ioflags = bip->bio_flags; 3164 bp->b_error = bip->bio_error; 3165 if (bp->b_error) 3166 bp->b_ioflags |= BIO_ERROR; 3167 bufdone(bp); 3168 g_destroy_bio(bip); 3169} 3170 3171void 3172dev_strategy(struct cdev *dev, struct buf *bp) 3173{ 3174 struct cdevsw *csw; 3175 struct bio *bip; 3176 3177 if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1))) 3178 panic("b_iocmd botch"); 3179 for (;;) { 3180 bip = g_new_bio(); 3181 if (bip != NULL) 3182 break; 3183 /* Try again later */ 3184 tsleep(&bp, PRIBIO, "dev_strat", hz/10); 3185 } 3186 bip->bio_cmd = bp->b_iocmd; 3187 bip->bio_offset = bp->b_iooffset; 3188 bip->bio_length = bp->b_bcount; 3189 bip->bio_bcount = bp->b_bcount; /* XXX: remove */ 3190 bip->bio_data = bp->b_data; 3191 bip->bio_done = bufdonebio; 3192 bip->bio_caller2 = bp; 3193 bip->bio_dev = dev; 3194 KASSERT(dev->si_refcount > 0, 3195 ("dev_strategy on un-referenced struct cdev *(%s)", 3196 devtoname(dev))); 3197 csw = dev_refthread(dev); 3198 if (csw == NULL) { 3199 g_destroy_bio(bip); 3200 bp->b_error = ENXIO; 3201 bp->b_ioflags = BIO_ERROR; 3202 bufdone(bp); 3203 return; 3204 } 3205 (*csw->d_strategy)(bip); 3206 dev_relthread(dev); 3207} 3208 3209/* 3210 * bufdone: 3211 * 3212 * Finish I/O on a buffer, optionally calling a completion function. 3213 * This is usually called from an interrupt so process blocking is 3214 * not allowed. 3215 * 3216 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 3217 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3218 * assuming B_INVAL is clear. 3219 * 3220 * For the VMIO case, we set B_CACHE if the op was a read and no 3221 * read error occured, or if the op was a write. B_CACHE is never 3222 * set if the buffer is invalid or otherwise uncacheable. 3223 * 3224 * biodone does not mess with B_INVAL, allowing the I/O routine or the 3225 * initiator to leave B_INVAL set to brelse the buffer out of existance 3226 * in the biodone routine. 3227 */ 3228void 3229bufdone(struct buf *bp) 3230{ 3231 struct bufobj *dropobj; 3232 void (*biodone)(struct buf *); 3233 3234 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 3235 dropobj = NULL; 3236 3237 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 3238 BUF_ASSERT_HELD(bp); 3239 3240 runningbufwakeup(bp); 3241 if (bp->b_iocmd == BIO_WRITE) 3242 dropobj = bp->b_bufobj; 3243 /* call optional completion function if requested */ 3244 if (bp->b_iodone != NULL) { 3245 biodone = bp->b_iodone; 3246 bp->b_iodone = NULL; 3247 (*biodone) (bp); 3248 if (dropobj) 3249 bufobj_wdrop(dropobj); 3250 return; 3251 } 3252 3253 bufdone_finish(bp); 3254 3255 if (dropobj) 3256 bufobj_wdrop(dropobj); 3257} 3258 3259void 3260bufdone_finish(struct buf *bp) 3261{ 3262 BUF_ASSERT_HELD(bp); 3263 3264 if (!LIST_EMPTY(&bp->b_dep)) 3265 buf_complete(bp); 3266 3267 if (bp->b_flags & B_VMIO) { 3268 int i; 3269 vm_ooffset_t foff; 3270 vm_page_t m; 3271 vm_object_t obj; 3272 int iosize; 3273 struct vnode *vp = bp->b_vp; 3274 boolean_t are_queues_locked; 3275 3276 obj = bp->b_bufobj->bo_object; 3277 3278#if defined(VFS_BIO_DEBUG) 3279 mp_fixme("usecount and vflag accessed without locks."); 3280 if (vp->v_usecount == 0) { 3281 panic("biodone: zero vnode ref count"); 3282 } 3283 3284 KASSERT(vp->v_object != NULL, 3285 ("biodone: vnode %p has no vm_object", vp)); 3286#endif 3287 3288 foff = bp->b_offset; 3289 KASSERT(bp->b_offset != NOOFFSET, 3290 ("biodone: no buffer offset")); 3291 3292 VM_OBJECT_LOCK(obj); 3293#if defined(VFS_BIO_DEBUG) 3294 if (obj->paging_in_progress < bp->b_npages) { 3295 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 3296 obj->paging_in_progress, bp->b_npages); 3297 } 3298#endif 3299 3300 /* 3301 * Set B_CACHE if the op was a normal read and no error 3302 * occured. B_CACHE is set for writes in the b*write() 3303 * routines. 3304 */ 3305 iosize = bp->b_bcount - bp->b_resid; 3306 if (bp->b_iocmd == BIO_READ && 3307 !(bp->b_flags & (B_INVAL|B_NOCACHE)) && 3308 !(bp->b_ioflags & BIO_ERROR)) { 3309 bp->b_flags |= B_CACHE; 3310 } 3311 if (bp->b_iocmd == BIO_READ) { 3312 vm_page_lock_queues(); 3313 are_queues_locked = TRUE; 3314 } else 3315 are_queues_locked = FALSE; 3316 for (i = 0; i < bp->b_npages; i++) { 3317 int bogusflag = 0; 3318 int resid; 3319 3320 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3321 if (resid > iosize) 3322 resid = iosize; 3323 3324 /* 3325 * cleanup bogus pages, restoring the originals 3326 */ 3327 m = bp->b_pages[i]; 3328 if (m == bogus_page) { 3329 bogusflag = 1; 3330 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3331 if (m == NULL) 3332 panic("biodone: page disappeared!"); 3333 bp->b_pages[i] = m; 3334 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3335 bp->b_pages, bp->b_npages); 3336 } 3337#if defined(VFS_BIO_DEBUG) 3338 if (OFF_TO_IDX(foff) != m->pindex) { 3339 printf( 3340"biodone: foff(%jd)/m->pindex(%ju) mismatch\n", 3341 (intmax_t)foff, (uintmax_t)m->pindex); 3342 } 3343#endif 3344 3345 /* 3346 * In the write case, the valid and clean bits are 3347 * already changed correctly ( see bdwrite() ), so we 3348 * only need to do this here in the read case. 3349 */ 3350 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) { 3351 vfs_page_set_valid(bp, foff, m); 3352 } 3353 3354 /* 3355 * when debugging new filesystems or buffer I/O methods, this 3356 * is the most common error that pops up. if you see this, you 3357 * have not set the page busy flag correctly!!! 3358 */ 3359 if (m->busy == 0) { 3360 printf("biodone: page busy < 0, " 3361 "pindex: %d, foff: 0x(%x,%x), " 3362 "resid: %d, index: %d\n", 3363 (int) m->pindex, (int)(foff >> 32), 3364 (int) foff & 0xffffffff, resid, i); 3365 if (!vn_isdisk(vp, NULL)) 3366 printf(" iosize: %jd, lblkno: %jd, flags: 0x%x, npages: %d\n", 3367 (intmax_t)bp->b_vp->v_mount->mnt_stat.f_iosize, 3368 (intmax_t) bp->b_lblkno, 3369 bp->b_flags, bp->b_npages); 3370 else 3371 printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n", 3372 (intmax_t) bp->b_lblkno, 3373 bp->b_flags, bp->b_npages); 3374 printf(" valid: 0x%lx, dirty: 0x%lx, wired: %d\n", 3375 (u_long)m->valid, (u_long)m->dirty, 3376 m->wire_count); 3377 panic("biodone: page busy < 0\n"); 3378 } 3379 vm_page_io_finish(m); 3380 vm_object_pip_subtract(obj, 1); 3381 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3382 iosize -= resid; 3383 } 3384 if (are_queues_locked) 3385 vm_page_unlock_queues(); 3386 vm_object_pip_wakeupn(obj, 0); 3387 VM_OBJECT_UNLOCK(obj); 3388 } 3389 3390 /* 3391 * For asynchronous completions, release the buffer now. The brelse 3392 * will do a wakeup there if necessary - so no need to do a wakeup 3393 * here in the async case. The sync case always needs to do a wakeup. 3394 */ 3395 3396 if (bp->b_flags & B_ASYNC) { 3397 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR)) 3398 brelse(bp); 3399 else 3400 bqrelse(bp); 3401 } else 3402 bdone(bp); 3403} 3404 3405/* 3406 * This routine is called in lieu of iodone in the case of 3407 * incomplete I/O. This keeps the busy status for pages 3408 * consistant. 3409 */ 3410void 3411vfs_unbusy_pages(struct buf *bp) 3412{ 3413 int i; 3414 vm_object_t obj; 3415 vm_page_t m; 3416 3417 runningbufwakeup(bp); 3418 if (!(bp->b_flags & B_VMIO)) 3419 return; 3420 3421 obj = bp->b_bufobj->bo_object; 3422 VM_OBJECT_LOCK(obj); 3423 for (i = 0; i < bp->b_npages; i++) { 3424 m = bp->b_pages[i]; 3425 if (m == bogus_page) { 3426 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 3427 if (!m) 3428 panic("vfs_unbusy_pages: page missing\n"); 3429 bp->b_pages[i] = m; 3430 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3431 bp->b_pages, bp->b_npages); 3432 } 3433 vm_object_pip_subtract(obj, 1); 3434 vm_page_io_finish(m); 3435 } 3436 vm_object_pip_wakeupn(obj, 0); 3437 VM_OBJECT_UNLOCK(obj); 3438} 3439 3440/* 3441 * vfs_page_set_valid: 3442 * 3443 * Set the valid bits in a page based on the supplied offset. The 3444 * range is restricted to the buffer's size. 3445 * 3446 * This routine is typically called after a read completes. 3447 */ 3448static void 3449vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m) 3450{ 3451 vm_ooffset_t soff, eoff; 3452 3453 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3454 /* 3455 * Start and end offsets in buffer. eoff - soff may not cross a 3456 * page boundry or cross the end of the buffer. The end of the 3457 * buffer, in this case, is our file EOF, not the allocation size 3458 * of the buffer. 3459 */ 3460 soff = off; 3461 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3462 if (eoff > bp->b_offset + bp->b_bcount) 3463 eoff = bp->b_offset + bp->b_bcount; 3464 3465 /* 3466 * Set valid range. This is typically the entire buffer and thus the 3467 * entire page. 3468 */ 3469 if (eoff > soff) { 3470 vm_page_set_validclean( 3471 m, 3472 (vm_offset_t) (soff & PAGE_MASK), 3473 (vm_offset_t) (eoff - soff) 3474 ); 3475 } 3476} 3477 3478/* 3479 * This routine is called before a device strategy routine. 3480 * It is used to tell the VM system that paging I/O is in 3481 * progress, and treat the pages associated with the buffer 3482 * almost as being VPO_BUSY. Also the object paging_in_progress 3483 * flag is handled to make sure that the object doesn't become 3484 * inconsistant. 3485 * 3486 * Since I/O has not been initiated yet, certain buffer flags 3487 * such as BIO_ERROR or B_INVAL may be in an inconsistant state 3488 * and should be ignored. 3489 */ 3490void 3491vfs_busy_pages(struct buf *bp, int clear_modify) 3492{ 3493 int i, bogus; 3494 vm_object_t obj; 3495 vm_ooffset_t foff; 3496 vm_page_t m; 3497 3498 if (!(bp->b_flags & B_VMIO)) 3499 return; 3500 3501 obj = bp->b_bufobj->bo_object; 3502 foff = bp->b_offset; 3503 KASSERT(bp->b_offset != NOOFFSET, 3504 ("vfs_busy_pages: no buffer offset")); 3505 VM_OBJECT_LOCK(obj); 3506 if (bp->b_bufsize != 0) 3507 vfs_setdirty_locked_object(bp); 3508retry: 3509 for (i = 0; i < bp->b_npages; i++) { 3510 m = bp->b_pages[i]; 3511 3512 if (vm_page_sleep_if_busy(m, FALSE, "vbpage")) 3513 goto retry; 3514 } 3515 bogus = 0; 3516 vm_page_lock_queues(); 3517 for (i = 0; i < bp->b_npages; i++) { 3518 m = bp->b_pages[i]; 3519 3520 if ((bp->b_flags & B_CLUSTER) == 0) { 3521 vm_object_pip_add(obj, 1); 3522 vm_page_io_start(m); 3523 } 3524 /* 3525 * When readying a buffer for a read ( i.e 3526 * clear_modify == 0 ), it is important to do 3527 * bogus_page replacement for valid pages in 3528 * partially instantiated buffers. Partially 3529 * instantiated buffers can, in turn, occur when 3530 * reconstituting a buffer from its VM backing store 3531 * base. We only have to do this if B_CACHE is 3532 * clear ( which causes the I/O to occur in the 3533 * first place ). The replacement prevents the read 3534 * I/O from overwriting potentially dirty VM-backed 3535 * pages. XXX bogus page replacement is, uh, bogus. 3536 * It may not work properly with small-block devices. 3537 * We need to find a better way. 3538 */ 3539 pmap_remove_all(m); 3540 if (clear_modify) 3541 vfs_page_set_valid(bp, foff, m); 3542 else if (m->valid == VM_PAGE_BITS_ALL && 3543 (bp->b_flags & B_CACHE) == 0) { 3544 bp->b_pages[i] = bogus_page; 3545 bogus++; 3546 } 3547 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3548 } 3549 vm_page_unlock_queues(); 3550 VM_OBJECT_UNLOCK(obj); 3551 if (bogus) 3552 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3553 bp->b_pages, bp->b_npages); 3554} 3555 3556/* 3557 * Tell the VM system that the pages associated with this buffer 3558 * are clean. This is used for delayed writes where the data is 3559 * going to go to disk eventually without additional VM intevention. 3560 * 3561 * Note that while we only really need to clean through to b_bcount, we 3562 * just go ahead and clean through to b_bufsize. 3563 */ 3564static void 3565vfs_clean_pages(struct buf *bp) 3566{ 3567 int i; 3568 vm_ooffset_t foff, noff, eoff; 3569 vm_page_t m; 3570 3571 if (!(bp->b_flags & B_VMIO)) 3572 return; 3573 3574 foff = bp->b_offset; 3575 KASSERT(bp->b_offset != NOOFFSET, 3576 ("vfs_clean_pages: no buffer offset")); 3577 VM_OBJECT_LOCK(bp->b_bufobj->bo_object); 3578 vm_page_lock_queues(); 3579 for (i = 0; i < bp->b_npages; i++) { 3580 m = bp->b_pages[i]; 3581 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3582 eoff = noff; 3583 3584 if (eoff > bp->b_offset + bp->b_bufsize) 3585 eoff = bp->b_offset + bp->b_bufsize; 3586 vfs_page_set_valid(bp, foff, m); 3587 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 3588 foff = noff; 3589 } 3590 vm_page_unlock_queues(); 3591 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); 3592} 3593 3594/* 3595 * vfs_bio_set_validclean: 3596 * 3597 * Set the range within the buffer to valid and clean. The range is 3598 * relative to the beginning of the buffer, b_offset. Note that b_offset 3599 * itself may be offset from the beginning of the first page. 3600 * 3601 */ 3602 3603void 3604vfs_bio_set_validclean(struct buf *bp, int base, int size) 3605{ 3606 int i, n; 3607 vm_page_t m; 3608 3609 if (!(bp->b_flags & B_VMIO)) 3610 return; 3611 /* 3612 * Fixup base to be relative to beginning of first page. 3613 * Set initial n to be the maximum number of bytes in the 3614 * first page that can be validated. 3615 */ 3616 3617 base += (bp->b_offset & PAGE_MASK); 3618 n = PAGE_SIZE - (base & PAGE_MASK); 3619 3620 VM_OBJECT_LOCK(bp->b_bufobj->bo_object); 3621 vm_page_lock_queues(); 3622 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 3623 m = bp->b_pages[i]; 3624 if (n > size) 3625 n = size; 3626 vm_page_set_validclean(m, base & PAGE_MASK, n); 3627 base += n; 3628 size -= n; 3629 n = PAGE_SIZE; 3630 } 3631 vm_page_unlock_queues(); 3632 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); 3633} 3634 3635/* 3636 * vfs_bio_clrbuf: 3637 * 3638 * clear a buffer. This routine essentially fakes an I/O, so we need 3639 * to clear BIO_ERROR and B_INVAL. 3640 * 3641 * Note that while we only theoretically need to clear through b_bcount, 3642 * we go ahead and clear through b_bufsize. 3643 */ 3644 3645void 3646vfs_bio_clrbuf(struct buf *bp) 3647{ 3648 int i, j, mask = 0; 3649 caddr_t sa, ea; 3650 3651 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) { 3652 clrbuf(bp); 3653 return; 3654 } 3655 3656 bp->b_flags &= ~B_INVAL; 3657 bp->b_ioflags &= ~BIO_ERROR; 3658 VM_OBJECT_LOCK(bp->b_bufobj->bo_object); 3659 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 3660 (bp->b_offset & PAGE_MASK) == 0) { 3661 if (bp->b_pages[0] == bogus_page) 3662 goto unlock; 3663 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 3664 VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED); 3665 if ((bp->b_pages[0]->valid & mask) == mask) 3666 goto unlock; 3667 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 3668 ((bp->b_pages[0]->valid & mask) == 0)) { 3669 bzero(bp->b_data, bp->b_bufsize); 3670 bp->b_pages[0]->valid |= mask; 3671 goto unlock; 3672 } 3673 } 3674 ea = sa = bp->b_data; 3675 for(i = 0; i < bp->b_npages; i++, sa = ea) { 3676 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 3677 ea = (caddr_t)(vm_offset_t)ulmin( 3678 (u_long)(vm_offset_t)ea, 3679 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 3680 if (bp->b_pages[i] == bogus_page) 3681 continue; 3682 j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 3683 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 3684 VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED); 3685 if ((bp->b_pages[i]->valid & mask) == mask) 3686 continue; 3687 if ((bp->b_pages[i]->valid & mask) == 0) { 3688 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) 3689 bzero(sa, ea - sa); 3690 } else { 3691 for (; sa < ea; sa += DEV_BSIZE, j++) { 3692 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 3693 (bp->b_pages[i]->valid & (1 << j)) == 0) 3694 bzero(sa, DEV_BSIZE); 3695 } 3696 } 3697 bp->b_pages[i]->valid |= mask; 3698 } 3699unlock: 3700 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); 3701 bp->b_resid = 0; 3702} 3703 3704/* 3705 * vm_hold_load_pages and vm_hold_free_pages get pages into 3706 * a buffers address space. The pages are anonymous and are 3707 * not associated with a file object. 3708 */ 3709static void 3710vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 3711{ 3712 vm_offset_t pg; 3713 vm_page_t p; 3714 int index; 3715 3716 to = round_page(to); 3717 from = round_page(from); 3718 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3719 3720 VM_OBJECT_LOCK(kernel_object); 3721 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3722tryagain: 3723 /* 3724 * note: must allocate system pages since blocking here 3725 * could intefere with paging I/O, no matter which 3726 * process we are. 3727 */ 3728 p = vm_page_alloc(kernel_object, 3729 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 3730 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); 3731 if (!p) { 3732 atomic_add_int(&vm_pageout_deficit, 3733 (to - pg) >> PAGE_SHIFT); 3734 VM_OBJECT_UNLOCK(kernel_object); 3735 VM_WAIT; 3736 VM_OBJECT_LOCK(kernel_object); 3737 goto tryagain; 3738 } 3739 p->valid = VM_PAGE_BITS_ALL; 3740 pmap_qenter(pg, &p, 1); 3741 bp->b_pages[index] = p; 3742 } 3743 VM_OBJECT_UNLOCK(kernel_object); 3744 bp->b_npages = index; 3745} 3746 3747/* Return pages associated with this buf to the vm system */ 3748static void 3749vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 3750{ 3751 vm_offset_t pg; 3752 vm_page_t p; 3753 int index, newnpages; 3754 3755 from = round_page(from); 3756 to = round_page(to); 3757 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3758 3759 VM_OBJECT_LOCK(kernel_object); 3760 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3761 p = bp->b_pages[index]; 3762 if (p && (index < bp->b_npages)) { 3763 if (p->busy) { 3764 printf( 3765 "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n", 3766 (intmax_t)bp->b_blkno, 3767 (intmax_t)bp->b_lblkno); 3768 } 3769 bp->b_pages[index] = NULL; 3770 pmap_qremove(pg, 1); 3771 vm_page_lock_queues(); 3772 vm_page_unwire(p, 0); 3773 vm_page_free(p); 3774 vm_page_unlock_queues(); 3775 } 3776 } 3777 VM_OBJECT_UNLOCK(kernel_object); 3778 bp->b_npages = newnpages; 3779} 3780 3781/* 3782 * Map an IO request into kernel virtual address space. 3783 * 3784 * All requests are (re)mapped into kernel VA space. 3785 * Notice that we use b_bufsize for the size of the buffer 3786 * to be mapped. b_bcount might be modified by the driver. 3787 * 3788 * Note that even if the caller determines that the address space should 3789 * be valid, a race or a smaller-file mapped into a larger space may 3790 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST 3791 * check the return value. 3792 */ 3793int 3794vmapbuf(struct buf *bp) 3795{ 3796 caddr_t addr, kva; 3797 vm_prot_t prot; 3798 int pidx, i; 3799 struct vm_page *m; 3800 struct pmap *pmap = &curproc->p_vmspace->vm_pmap; 3801 3802 if (bp->b_bufsize < 0) 3803 return (-1); 3804 prot = VM_PROT_READ; 3805 if (bp->b_iocmd == BIO_READ) 3806 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 3807 for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0; 3808 addr < bp->b_data + bp->b_bufsize; 3809 addr += PAGE_SIZE, pidx++) { 3810 /* 3811 * Do the vm_fault if needed; do the copy-on-write thing 3812 * when reading stuff off device into memory. 3813 * 3814 * NOTE! Must use pmap_extract() because addr may be in 3815 * the userland address space, and kextract is only guarenteed 3816 * to work for the kernland address space (see: sparc64 port). 3817 */ 3818retry: 3819 if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data, 3820 prot) < 0) { 3821 vm_page_lock_queues(); 3822 for (i = 0; i < pidx; ++i) { 3823 vm_page_unhold(bp->b_pages[i]); 3824 bp->b_pages[i] = NULL; 3825 } 3826 vm_page_unlock_queues(); 3827 return(-1); 3828 } 3829 m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot); 3830 if (m == NULL) 3831 goto retry; 3832 bp->b_pages[pidx] = m; 3833 } 3834 if (pidx > btoc(MAXPHYS)) 3835 panic("vmapbuf: mapped more than MAXPHYS"); 3836 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx); 3837 3838 kva = bp->b_saveaddr; 3839 bp->b_npages = pidx; 3840 bp->b_saveaddr = bp->b_data; 3841 bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK); 3842 return(0); 3843} 3844 3845/* 3846 * Free the io map PTEs associated with this IO operation. 3847 * We also invalidate the TLB entries and restore the original b_addr. 3848 */ 3849void 3850vunmapbuf(struct buf *bp) 3851{ 3852 int pidx; 3853 int npages; 3854 3855 npages = bp->b_npages; 3856 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); 3857 vm_page_lock_queues(); 3858 for (pidx = 0; pidx < npages; pidx++) 3859 vm_page_unhold(bp->b_pages[pidx]); 3860 vm_page_unlock_queues(); 3861 3862 bp->b_data = bp->b_saveaddr; 3863} 3864 3865void 3866bdone(struct buf *bp) 3867{ 3868 struct mtx *mtxp; 3869 3870 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3871 mtx_lock(mtxp); 3872 bp->b_flags |= B_DONE; 3873 wakeup(bp); 3874 mtx_unlock(mtxp); 3875} 3876 3877void 3878bwait(struct buf *bp, u_char pri, const char *wchan) 3879{ 3880 struct mtx *mtxp; 3881 3882 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3883 mtx_lock(mtxp); 3884 while ((bp->b_flags & B_DONE) == 0) 3885 msleep(bp, mtxp, pri, wchan, 0); 3886 mtx_unlock(mtxp); 3887} 3888 3889int 3890bufsync(struct bufobj *bo, int waitfor) 3891{ 3892 3893 return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread)); 3894} 3895 3896void 3897bufstrategy(struct bufobj *bo, struct buf *bp) 3898{ 3899 int i = 0; 3900 struct vnode *vp; 3901 3902 vp = bp->b_vp; 3903 KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy")); 3904 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 3905 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); 3906 i = VOP_STRATEGY(vp, bp); 3907 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); 3908} 3909 3910void 3911bufobj_wrefl(struct bufobj *bo) 3912{ 3913 3914 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 3915 ASSERT_BO_LOCKED(bo); 3916 bo->bo_numoutput++; 3917} 3918 3919void 3920bufobj_wref(struct bufobj *bo) 3921{ 3922 3923 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 3924 BO_LOCK(bo); 3925 bo->bo_numoutput++; 3926 BO_UNLOCK(bo); 3927} 3928 3929void 3930bufobj_wdrop(struct bufobj *bo) 3931{ 3932 3933 KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop")); 3934 BO_LOCK(bo); 3935 KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count")); 3936 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) { 3937 bo->bo_flag &= ~BO_WWAIT; 3938 wakeup(&bo->bo_numoutput); 3939 } 3940 BO_UNLOCK(bo); 3941} 3942 3943int 3944bufobj_wwait(struct bufobj *bo, int slpflag, int timeo) 3945{ 3946 int error; 3947 3948 KASSERT(bo != NULL, ("NULL bo in bufobj_wwait")); 3949 ASSERT_BO_LOCKED(bo); 3950 error = 0; 3951 while (bo->bo_numoutput) { 3952 bo->bo_flag |= BO_WWAIT; 3953 error = msleep(&bo->bo_numoutput, BO_MTX(bo), 3954 slpflag | (PRIBIO + 1), "bo_wwait", timeo); 3955 if (error) 3956 break; 3957 } 3958 return (error); 3959} 3960 3961void 3962bpin(struct buf *bp) 3963{ 3964 struct mtx *mtxp; 3965 3966 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3967 mtx_lock(mtxp); 3968 bp->b_pin_count++; 3969 mtx_unlock(mtxp); 3970} 3971 3972void 3973bunpin(struct buf *bp) 3974{ 3975 struct mtx *mtxp; 3976 3977 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3978 mtx_lock(mtxp); 3979 if (--bp->b_pin_count == 0) 3980 wakeup(bp); 3981 mtx_unlock(mtxp); 3982} 3983 3984void 3985bunpin_wait(struct buf *bp) 3986{ 3987 struct mtx *mtxp; 3988 3989 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3990 mtx_lock(mtxp); 3991 while (bp->b_pin_count > 0) 3992 msleep(bp, mtxp, PRIBIO, "bwunpin", 0); 3993 mtx_unlock(mtxp); 3994} 3995 3996#include "opt_ddb.h" 3997#ifdef DDB 3998#include <ddb/ddb.h> 3999 4000/* DDB command to show buffer data */ 4001DB_SHOW_COMMAND(buffer, db_show_buffer) 4002{ 4003 /* get args */ 4004 struct buf *bp = (struct buf *)addr; 4005 4006 if (!have_addr) { 4007 db_printf("usage: show buffer <addr>\n"); 4008 return; 4009 } 4010 4011 db_printf("buf at %p\n", bp); 4012 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 4013 db_printf( 4014 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n" 4015 "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_dep = %p\n", 4016 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4017 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno, 4018 bp->b_dep.lh_first); 4019 if (bp->b_npages) { 4020 int i; 4021 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 4022 for (i = 0; i < bp->b_npages; i++) { 4023 vm_page_t m; 4024 m = bp->b_pages[i]; 4025 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4026 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4027 if ((i + 1) < bp->b_npages) 4028 db_printf(","); 4029 } 4030 db_printf("\n"); 4031 } 4032 db_printf(" "); 4033 lockmgr_printinfo(&bp->b_lock); 4034} 4035 4036DB_SHOW_COMMAND(lockedbufs, lockedbufs) 4037{ 4038 struct buf *bp; 4039 int i; 4040 4041 for (i = 0; i < nbuf; i++) { 4042 bp = &buf[i]; 4043 if (BUF_ISLOCKED(bp)) { 4044 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4045 db_printf("\n"); 4046 } 4047 } 4048} 4049 4050DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs) 4051{ 4052 struct vnode *vp; 4053 struct buf *bp; 4054 4055 if (!have_addr) { 4056 db_printf("usage: show vnodebufs <addr>\n"); 4057 return; 4058 } 4059 vp = (struct vnode *)addr; 4060 db_printf("Clean buffers:\n"); 4061 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { 4062 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4063 db_printf("\n"); 4064 } 4065 db_printf("Dirty buffers:\n"); 4066 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 4067 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4068 db_printf("\n"); 4069 } 4070} 4071#endif /* DDB */ 4072