vfs_bio.c revision 65770
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $FreeBSD: head/sys/kern/vfs_bio.c 65770 2000-09-12 09:49:08Z bp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/bio.h> 33#include <sys/buf.h> 34#include <sys/eventhandler.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/mount.h> 38#include <sys/kernel.h> 39#include <sys/kthread.h> 40#include <sys/ktr.h> 41#include <sys/proc.h> 42#include <sys/reboot.h> 43#include <sys/resourcevar.h> 44#include <sys/sysctl.h> 45#include <sys/vmmeter.h> 46#include <sys/vnode.h> 47#include <vm/vm.h> 48#include <vm/vm_param.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <vm/vm_map.h> 55 56#include <machine/mutex.h> 57 58static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 59 60struct bio_ops bioops; /* I/O operation notification */ 61 62struct buf *buf; /* buffer header pool */ 63struct swqueue bswlist; 64struct simplelock buftimelock; /* Interlock on setting prio and timo */ 65 66static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 67 vm_offset_t to); 68static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 69 vm_offset_t to); 70static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 71 int pageno, vm_page_t m); 72static void vfs_clean_pages(struct buf * bp); 73static void vfs_setdirty(struct buf *bp); 74static void vfs_vmio_release(struct buf *bp); 75static void vfs_backgroundwritedone(struct buf *bp); 76static int flushbufqueues(void); 77 78static int bd_request; 79 80static void buf_daemon __P((void)); 81/* 82 * bogus page -- for I/O to/from partially complete buffers 83 * this is a temporary solution to the problem, but it is not 84 * really that bad. it would be better to split the buffer 85 * for input in the case of buffers partially already in memory, 86 * but the code is intricate enough already. 87 */ 88vm_page_t bogus_page; 89int runningbufspace; 90int vmiodirenable = FALSE; 91static vm_offset_t bogus_offset; 92 93static int bufspace, maxbufspace, 94 bufmallocspace, maxbufmallocspace, lobufspace, hibufspace; 95static int bufreusecnt, bufdefragcnt, buffreekvacnt; 96static int maxbdrun; 97static int needsbuffer; 98static int numdirtybuffers, hidirtybuffers; 99static int numfreebuffers, lofreebuffers, hifreebuffers; 100static int getnewbufcalls; 101static int getnewbufrestarts; 102 103SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 104 &numdirtybuffers, 0, ""); 105SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 106 &hidirtybuffers, 0, ""); 107SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 108 &numfreebuffers, 0, ""); 109SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 110 &lofreebuffers, 0, ""); 111SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 112 &hifreebuffers, 0, ""); 113SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, 114 &runningbufspace, 0, ""); 115SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, 116 &maxbufspace, 0, ""); 117SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, 118 &hibufspace, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, 120 &lobufspace, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 122 &bufspace, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, maxbdrun, CTLFLAG_RW, 124 &maxbdrun, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 126 &maxbufmallocspace, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 128 &bufmallocspace, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, 130 &getnewbufcalls, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, 132 &getnewbufrestarts, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, 134 &vmiodirenable, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, 136 &bufdefragcnt, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, 138 &buffreekvacnt, 0, ""); 139SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, 140 &bufreusecnt, 0, ""); 141 142static int bufhashmask; 143static LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 144struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } }; 145char *buf_wmesg = BUF_WMESG; 146 147extern int vm_swap_size; 148 149#define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 150#define VFS_BIO_NEED_DIRTYFLUSH 0x02 /* waiting for dirty buffer flush */ 151#define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 152#define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 153 154/* 155 * Buffer hash table code. Note that the logical block scans linearly, which 156 * gives us some L1 cache locality. 157 */ 158 159static __inline 160struct bufhashhdr * 161bufhash(struct vnode *vnp, daddr_t bn) 162{ 163 return(&bufhashtbl[(((uintptr_t)(vnp) >> 7) + (int)bn) & bufhashmask]); 164} 165 166/* 167 * numdirtywakeup: 168 * 169 * If someone is blocked due to there being too many dirty buffers, 170 * and numdirtybuffers is now reasonable, wake them up. 171 */ 172 173static __inline void 174numdirtywakeup(void) 175{ 176 if (numdirtybuffers < hidirtybuffers) { 177 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) { 178 needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH; 179 wakeup(&needsbuffer); 180 } 181 } 182} 183 184/* 185 * bufspacewakeup: 186 * 187 * Called when buffer space is potentially available for recovery. 188 * getnewbuf() will block on this flag when it is unable to free 189 * sufficient buffer space. Buffer space becomes recoverable when 190 * bp's get placed back in the queues. 191 */ 192 193static __inline void 194bufspacewakeup(void) 195{ 196 /* 197 * If someone is waiting for BUF space, wake them up. Even 198 * though we haven't freed the kva space yet, the waiting 199 * process will be able to now. 200 */ 201 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 202 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 203 wakeup(&needsbuffer); 204 } 205} 206 207/* 208 * bufcountwakeup: 209 * 210 * Called when a buffer has been added to one of the free queues to 211 * account for the buffer and to wakeup anyone waiting for free buffers. 212 * This typically occurs when large amounts of metadata are being handled 213 * by the buffer cache ( else buffer space runs out first, usually ). 214 */ 215 216static __inline void 217bufcountwakeup(void) 218{ 219 ++numfreebuffers; 220 if (needsbuffer) { 221 needsbuffer &= ~VFS_BIO_NEED_ANY; 222 if (numfreebuffers >= hifreebuffers) 223 needsbuffer &= ~VFS_BIO_NEED_FREE; 224 wakeup(&needsbuffer); 225 } 226} 227 228/* 229 * vfs_buf_test_cache: 230 * 231 * Called when a buffer is extended. This function clears the B_CACHE 232 * bit if the newly extended portion of the buffer does not contain 233 * valid data. 234 */ 235static __inline__ 236void 237vfs_buf_test_cache(struct buf *bp, 238 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 239 vm_page_t m) 240{ 241 if (bp->b_flags & B_CACHE) { 242 int base = (foff + off) & PAGE_MASK; 243 if (vm_page_is_valid(m, base, size) == 0) 244 bp->b_flags &= ~B_CACHE; 245 } 246} 247 248static __inline__ 249void 250bd_wakeup(int dirtybuflevel) 251{ 252 if (numdirtybuffers >= dirtybuflevel && bd_request == 0) { 253 bd_request = 1; 254 wakeup(&bd_request); 255 } 256} 257 258/* 259 * bd_speedup - speedup the buffer cache flushing code 260 */ 261 262static __inline__ 263void 264bd_speedup(void) 265{ 266 bd_wakeup(1); 267} 268 269/* 270 * Initialize buffer headers and related structures. 271 */ 272 273caddr_t 274bufhashinit(caddr_t vaddr) 275{ 276 /* first, make a null hash table */ 277 for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1) 278 ; 279 bufhashtbl = (void *)vaddr; 280 vaddr = vaddr + sizeof(*bufhashtbl) * bufhashmask; 281 --bufhashmask; 282 return(vaddr); 283} 284 285void 286bufinit(void) 287{ 288 struct buf *bp; 289 int i; 290 291 TAILQ_INIT(&bswlist); 292 LIST_INIT(&invalhash); 293 simple_lock_init(&buftimelock); 294 295 for (i = 0; i <= bufhashmask; i++) 296 LIST_INIT(&bufhashtbl[i]); 297 298 /* next, make a null set of free lists */ 299 for (i = 0; i < BUFFER_QUEUES; i++) 300 TAILQ_INIT(&bufqueues[i]); 301 302 /* finally, initialize each buffer header and stick on empty q */ 303 for (i = 0; i < nbuf; i++) { 304 bp = &buf[i]; 305 bzero(bp, sizeof *bp); 306 bp->b_flags = B_INVAL; /* we're just an empty header */ 307 bp->b_dev = NODEV; 308 bp->b_rcred = NOCRED; 309 bp->b_wcred = NOCRED; 310 bp->b_qindex = QUEUE_EMPTY; 311 bp->b_xflags = 0; 312 LIST_INIT(&bp->b_dep); 313 BUF_LOCKINIT(bp); 314 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 315 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 316 } 317 318 /* 319 * maxbufspace is the absolute maximum amount of buffer space we are 320 * allowed to reserve in KVM and in real terms. The absolute maximum 321 * is nominally used by buf_daemon. hibufspace is the nominal maximum 322 * used by most other processes. The differential is required to 323 * ensure that buf_daemon is able to run when other processes might 324 * be blocked waiting for buffer space. 325 * 326 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 327 * this may result in KVM fragmentation which is not handled optimally 328 * by the system. 329 */ 330 maxbufspace = nbuf * BKVASIZE; 331 hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 332 lobufspace = hibufspace - MAXBSIZE; 333 334/* 335 * Limit the amount of malloc memory since it is wired permanently into 336 * the kernel space. Even though this is accounted for in the buffer 337 * allocation, we don't want the malloced region to grow uncontrolled. 338 * The malloc scheme improves memory utilization significantly on average 339 * (small) directories. 340 */ 341 maxbufmallocspace = hibufspace / 20; 342 343/* 344 * Reduce the chance of a deadlock occuring by limiting the number 345 * of delayed-write dirty buffers we allow to stack up. 346 */ 347 hidirtybuffers = nbuf / 4 + 20; 348 numdirtybuffers = 0; 349/* 350 * To support extreme low-memory systems, make sure hidirtybuffers cannot 351 * eat up all available buffer space. This occurs when our minimum cannot 352 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 353 * BKVASIZE'd (8K) buffers. 354 */ 355 while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 356 hidirtybuffers >>= 1; 357 } 358 359/* 360 * Try to keep the number of free buffers in the specified range, 361 * and give special processes (e.g. like buf_daemon) access to an 362 * emergency reserve. 363 */ 364 lofreebuffers = nbuf / 18 + 5; 365 hifreebuffers = 2 * lofreebuffers; 366 numfreebuffers = nbuf; 367 368/* 369 * Maximum number of async ops initiated per buf_daemon loop. This is 370 * somewhat of a hack at the moment, we really need to limit ourselves 371 * based on the number of bytes of I/O in-transit that were initiated 372 * from buf_daemon. 373 */ 374 if ((maxbdrun = nswbuf / 4) < 4) 375 maxbdrun = 4; 376 377 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 378 bogus_page = vm_page_alloc(kernel_object, 379 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 380 VM_ALLOC_NORMAL); 381 cnt.v_wire_count++; 382 383} 384 385/* 386 * bfreekva() - free the kva allocation for a buffer. 387 * 388 * Must be called at splbio() or higher as this is the only locking for 389 * buffer_map. 390 * 391 * Since this call frees up buffer space, we call bufspacewakeup(). 392 */ 393static void 394bfreekva(struct buf * bp) 395{ 396 if (bp->b_kvasize) { 397 ++buffreekvacnt; 398 bufspace -= bp->b_kvasize; 399 vm_map_delete(buffer_map, 400 (vm_offset_t) bp->b_kvabase, 401 (vm_offset_t) bp->b_kvabase + bp->b_kvasize 402 ); 403 bp->b_kvasize = 0; 404 bufspacewakeup(); 405 } 406} 407 408/* 409 * bremfree: 410 * 411 * Remove the buffer from the appropriate free list. 412 */ 413void 414bremfree(struct buf * bp) 415{ 416 int s = splbio(); 417 int old_qindex = bp->b_qindex; 418 419 if (bp->b_qindex != QUEUE_NONE) { 420 KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp)); 421 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 422 bp->b_qindex = QUEUE_NONE; 423 runningbufspace += bp->b_bufsize; 424 } else { 425 if (BUF_REFCNT(bp) <= 1) 426 panic("bremfree: removing a buffer not on a queue"); 427 } 428 429 /* 430 * Fixup numfreebuffers count. If the buffer is invalid or not 431 * delayed-write, and it was on the EMPTY, LRU, or AGE queues, 432 * the buffer was free and we must decrement numfreebuffers. 433 */ 434 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 435 switch(old_qindex) { 436 case QUEUE_DIRTY: 437 case QUEUE_CLEAN: 438 case QUEUE_EMPTY: 439 case QUEUE_EMPTYKVA: 440 --numfreebuffers; 441 break; 442 default: 443 break; 444 } 445 } 446 splx(s); 447} 448 449 450/* 451 * Get a buffer with the specified data. Look in the cache first. We 452 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 453 * is set, the buffer is valid and we do not have to do anything ( see 454 * getblk() ). 455 */ 456int 457bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 458 struct buf ** bpp) 459{ 460 struct buf *bp; 461 462 bp = getblk(vp, blkno, size, 0, 0); 463 *bpp = bp; 464 465 /* if not found in cache, do some I/O */ 466 if ((bp->b_flags & B_CACHE) == 0) { 467 if (curproc != idleproc) 468 curproc->p_stats->p_ru.ru_inblock++; 469 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); 470 bp->b_iocmd = BIO_READ; 471 bp->b_flags &= ~B_INVAL; 472 bp->b_ioflags &= ~BIO_ERROR; 473 if (bp->b_rcred == NOCRED) { 474 if (cred != NOCRED) 475 crhold(cred); 476 bp->b_rcred = cred; 477 } 478 vfs_busy_pages(bp, 0); 479 VOP_STRATEGY(vp, bp); 480 return (bufwait(bp)); 481 } 482 return (0); 483} 484 485/* 486 * Operates like bread, but also starts asynchronous I/O on 487 * read-ahead blocks. We must clear BIO_ERROR and B_INVAL prior 488 * to initiating I/O . If B_CACHE is set, the buffer is valid 489 * and we do not have to do anything. 490 */ 491int 492breadn(struct vnode * vp, daddr_t blkno, int size, 493 daddr_t * rablkno, int *rabsize, 494 int cnt, struct ucred * cred, struct buf ** bpp) 495{ 496 struct buf *bp, *rabp; 497 int i; 498 int rv = 0, readwait = 0; 499 500 *bpp = bp = getblk(vp, blkno, size, 0, 0); 501 502 /* if not found in cache, do some I/O */ 503 if ((bp->b_flags & B_CACHE) == 0) { 504 if (curproc != idleproc) 505 curproc->p_stats->p_ru.ru_inblock++; 506 bp->b_iocmd = BIO_READ; 507 bp->b_flags &= ~B_INVAL; 508 bp->b_ioflags &= ~BIO_ERROR; 509 if (bp->b_rcred == NOCRED) { 510 if (cred != NOCRED) 511 crhold(cred); 512 bp->b_rcred = cred; 513 } 514 vfs_busy_pages(bp, 0); 515 VOP_STRATEGY(vp, bp); 516 ++readwait; 517 } 518 519 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 520 if (inmem(vp, *rablkno)) 521 continue; 522 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 523 524 if ((rabp->b_flags & B_CACHE) == 0) { 525 if (curproc != idleproc) 526 curproc->p_stats->p_ru.ru_inblock++; 527 rabp->b_flags |= B_ASYNC; 528 rabp->b_flags &= ~B_INVAL; 529 rabp->b_ioflags &= ~BIO_ERROR; 530 rabp->b_iocmd = BIO_READ; 531 if (rabp->b_rcred == NOCRED) { 532 if (cred != NOCRED) 533 crhold(cred); 534 rabp->b_rcred = cred; 535 } 536 vfs_busy_pages(rabp, 0); 537 BUF_KERNPROC(rabp); 538 VOP_STRATEGY(vp, rabp); 539 } else { 540 brelse(rabp); 541 } 542 } 543 544 if (readwait) { 545 rv = bufwait(bp); 546 } 547 return (rv); 548} 549 550/* 551 * Write, release buffer on completion. (Done by iodone 552 * if async). Do not bother writing anything if the buffer 553 * is invalid. 554 * 555 * Note that we set B_CACHE here, indicating that buffer is 556 * fully valid and thus cacheable. This is true even of NFS 557 * now so we set it generally. This could be set either here 558 * or in biodone() since the I/O is synchronous. We put it 559 * here. 560 */ 561int 562bwrite(struct buf * bp) 563{ 564 int oldflags, s; 565 struct buf *newbp; 566 567 if (bp->b_flags & B_INVAL) { 568 brelse(bp); 569 return (0); 570 } 571 572 oldflags = bp->b_flags; 573 574 if (BUF_REFCNT(bp) == 0) 575 panic("bwrite: buffer is not busy???"); 576 s = splbio(); 577 /* 578 * If a background write is already in progress, delay 579 * writing this block if it is asynchronous. Otherwise 580 * wait for the background write to complete. 581 */ 582 if (bp->b_xflags & BX_BKGRDINPROG) { 583 if (bp->b_flags & B_ASYNC) { 584 splx(s); 585 bdwrite(bp); 586 return (0); 587 } 588 bp->b_xflags |= BX_BKGRDWAIT; 589 tsleep(&bp->b_xflags, PRIBIO, "biord", 0); 590 if (bp->b_xflags & BX_BKGRDINPROG) 591 panic("bwrite: still writing"); 592 } 593 594 /* Mark the buffer clean */ 595 bundirty(bp); 596 597 /* 598 * If this buffer is marked for background writing and we 599 * do not have to wait for it, make a copy and write the 600 * copy so as to leave this buffer ready for further use. 601 */ 602 if ((bp->b_xflags & BX_BKGRDWRITE) && (bp->b_flags & B_ASYNC)) { 603 if (bp->b_iodone != NULL) { 604 printf("bp->b_iodone = %p\n", bp->b_iodone); 605 panic("bwrite: need chained iodone"); 606 } 607 608 /* get a new block */ 609 newbp = geteblk(bp->b_bufsize); 610 611 /* set it to be identical to the old block */ 612 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 613 bgetvp(bp->b_vp, newbp); 614 newbp->b_lblkno = bp->b_lblkno; 615 newbp->b_blkno = bp->b_blkno; 616 newbp->b_offset = bp->b_offset; 617 newbp->b_iodone = vfs_backgroundwritedone; 618 newbp->b_flags |= B_ASYNC; 619 newbp->b_flags &= ~B_INVAL; 620 621 /* move over the dependencies */ 622 if (LIST_FIRST(&bp->b_dep) != NULL) 623 buf_movedeps(bp, newbp); 624 625 /* 626 * Initiate write on the copy, release the original to 627 * the B_LOCKED queue so that it cannot go away until 628 * the background write completes. If not locked it could go 629 * away and then be reconstituted while it was being written. 630 * If the reconstituted buffer were written, we could end up 631 * with two background copies being written at the same time. 632 */ 633 bp->b_xflags |= BX_BKGRDINPROG; 634 bp->b_flags |= B_LOCKED; 635 bqrelse(bp); 636 bp = newbp; 637 } 638 639 bp->b_flags &= ~B_DONE; 640 bp->b_ioflags &= ~BIO_ERROR; 641 bp->b_flags |= B_WRITEINPROG | B_CACHE; 642 bp->b_iocmd = BIO_WRITE; 643 644 bp->b_vp->v_numoutput++; 645 vfs_busy_pages(bp, 1); 646 if (curproc != idleproc) 647 curproc->p_stats->p_ru.ru_oublock++; 648 splx(s); 649 if (oldflags & B_ASYNC) 650 BUF_KERNPROC(bp); 651 BUF_STRATEGY(bp); 652 653 if ((oldflags & B_ASYNC) == 0) { 654 int rtval = bufwait(bp); 655 brelse(bp); 656 return (rtval); 657 } 658 659 return (0); 660} 661 662/* 663 * Complete a background write started from bwrite. 664 */ 665static void 666vfs_backgroundwritedone(bp) 667 struct buf *bp; 668{ 669 struct buf *origbp; 670 671 /* 672 * Find the original buffer that we are writing. 673 */ 674 if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL) 675 panic("backgroundwritedone: lost buffer"); 676 /* 677 * Process dependencies then return any unfinished ones. 678 */ 679 if (LIST_FIRST(&bp->b_dep) != NULL) 680 buf_complete(bp); 681 if (LIST_FIRST(&bp->b_dep) != NULL) 682 buf_movedeps(bp, origbp); 683 /* 684 * Clear the BX_BKGRDINPROG flag in the original buffer 685 * and awaken it if it is waiting for the write to complete. 686 */ 687 origbp->b_xflags &= ~BX_BKGRDINPROG; 688 if (origbp->b_xflags & BX_BKGRDWAIT) { 689 origbp->b_xflags &= ~BX_BKGRDWAIT; 690 wakeup(&origbp->b_xflags); 691 } 692 /* 693 * Clear the B_LOCKED flag and remove it from the locked 694 * queue if it currently resides there. 695 */ 696 origbp->b_flags &= ~B_LOCKED; 697 if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 698 bremfree(origbp); 699 bqrelse(origbp); 700 } 701 /* 702 * This buffer is marked B_NOCACHE, so when it is released 703 * by biodone, it will be tossed. We mark it with BIO_READ 704 * to avoid biodone doing a second vwakeup. 705 */ 706 bp->b_flags |= B_NOCACHE; 707 bp->b_iocmd = BIO_READ; 708 bp->b_flags &= ~(B_CACHE | B_DONE); 709 bp->b_iodone = 0; 710 bufdone(bp); 711} 712 713/* 714 * Delayed write. (Buffer is marked dirty). Do not bother writing 715 * anything if the buffer is marked invalid. 716 * 717 * Note that since the buffer must be completely valid, we can safely 718 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 719 * biodone() in order to prevent getblk from writing the buffer 720 * out synchronously. 721 */ 722void 723bdwrite(struct buf * bp) 724{ 725 if (BUF_REFCNT(bp) == 0) 726 panic("bdwrite: buffer is not busy"); 727 728 if (bp->b_flags & B_INVAL) { 729 brelse(bp); 730 return; 731 } 732 bdirty(bp); 733 734 /* 735 * Set B_CACHE, indicating that the buffer is fully valid. This is 736 * true even of NFS now. 737 */ 738 bp->b_flags |= B_CACHE; 739 740 /* 741 * This bmap keeps the system from needing to do the bmap later, 742 * perhaps when the system is attempting to do a sync. Since it 743 * is likely that the indirect block -- or whatever other datastructure 744 * that the filesystem needs is still in memory now, it is a good 745 * thing to do this. Note also, that if the pageout daemon is 746 * requesting a sync -- there might not be enough memory to do 747 * the bmap then... So, this is important to do. 748 */ 749 if (bp->b_lblkno == bp->b_blkno) { 750 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 751 } 752 753 /* 754 * Set the *dirty* buffer range based upon the VM system dirty pages. 755 */ 756 vfs_setdirty(bp); 757 758 /* 759 * We need to do this here to satisfy the vnode_pager and the 760 * pageout daemon, so that it thinks that the pages have been 761 * "cleaned". Note that since the pages are in a delayed write 762 * buffer -- the VFS layer "will" see that the pages get written 763 * out on the next sync, or perhaps the cluster will be completed. 764 */ 765 vfs_clean_pages(bp); 766 bqrelse(bp); 767 768 /* 769 * Wakeup the buffer flushing daemon if we have saturated the 770 * buffer cache. 771 */ 772 773 bd_wakeup(hidirtybuffers); 774 775 /* 776 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 777 * due to the softdep code. 778 */ 779} 780 781/* 782 * bdirty: 783 * 784 * Turn buffer into delayed write request. We must clear BIO_READ and 785 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 786 * itself to properly update it in the dirty/clean lists. We mark it 787 * B_DONE to ensure that any asynchronization of the buffer properly 788 * clears B_DONE ( else a panic will occur later ). 789 * 790 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 791 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 792 * should only be called if the buffer is known-good. 793 * 794 * Since the buffer is not on a queue, we do not update the numfreebuffers 795 * count. 796 * 797 * Must be called at splbio(). 798 * The buffer must be on QUEUE_NONE. 799 */ 800void 801bdirty(bp) 802 struct buf *bp; 803{ 804 KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 805 bp->b_flags &= ~(B_RELBUF); 806 bp->b_iocmd = BIO_WRITE; 807 808 if ((bp->b_flags & B_DELWRI) == 0) { 809 bp->b_flags |= B_DONE | B_DELWRI; 810 reassignbuf(bp, bp->b_vp); 811 ++numdirtybuffers; 812 bd_wakeup(hidirtybuffers); 813 } 814} 815 816/* 817 * bundirty: 818 * 819 * Clear B_DELWRI for buffer. 820 * 821 * Since the buffer is not on a queue, we do not update the numfreebuffers 822 * count. 823 * 824 * Must be called at splbio(). 825 * The buffer must be on QUEUE_NONE. 826 */ 827 828void 829bundirty(bp) 830 struct buf *bp; 831{ 832 KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 833 834 if (bp->b_flags & B_DELWRI) { 835 bp->b_flags &= ~B_DELWRI; 836 reassignbuf(bp, bp->b_vp); 837 --numdirtybuffers; 838 numdirtywakeup(); 839 } 840 /* 841 * Since it is now being written, we can clear its deferred write flag. 842 */ 843 bp->b_flags &= ~B_DEFERRED; 844} 845 846/* 847 * bawrite: 848 * 849 * Asynchronous write. Start output on a buffer, but do not wait for 850 * it to complete. The buffer is released when the output completes. 851 * 852 * bwrite() ( or the VOP routine anyway ) is responsible for handling 853 * B_INVAL buffers. Not us. 854 */ 855void 856bawrite(struct buf * bp) 857{ 858 bp->b_flags |= B_ASYNC; 859 (void) BUF_WRITE(bp); 860} 861 862/* 863 * bowrite: 864 * 865 * Ordered write. Start output on a buffer, and flag it so that the 866 * device will write it in the order it was queued. The buffer is 867 * released when the output completes. bwrite() ( or the VOP routine 868 * anyway ) is responsible for handling B_INVAL buffers. 869 */ 870int 871bowrite(struct buf * bp) 872{ 873 bp->b_ioflags |= BIO_ORDERED; 874 bp->b_flags |= B_ASYNC; 875 return (BUF_WRITE(bp)); 876} 877 878/* 879 * bwillwrite: 880 * 881 * Called prior to the locking of any vnodes when we are expecting to 882 * write. We do not want to starve the buffer cache with too many 883 * dirty buffers so we block here. By blocking prior to the locking 884 * of any vnodes we attempt to avoid the situation where a locked vnode 885 * prevents the various system daemons from flushing related buffers. 886 */ 887 888void 889bwillwrite(void) 890{ 891 int slop = hidirtybuffers / 10; 892 893 if (numdirtybuffers > hidirtybuffers + slop) { 894 int s; 895 896 s = splbio(); 897 while (numdirtybuffers > hidirtybuffers) { 898 bd_wakeup(hidirtybuffers); 899 needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH; 900 tsleep(&needsbuffer, (PRIBIO + 4), "flswai", 0); 901 } 902 splx(s); 903 } 904} 905 906/* 907 * brelse: 908 * 909 * Release a busy buffer and, if requested, free its resources. The 910 * buffer will be stashed in the appropriate bufqueue[] allowing it 911 * to be accessed later as a cache entity or reused for other purposes. 912 */ 913void 914brelse(struct buf * bp) 915{ 916 int s; 917 918 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 919 920 s = splbio(); 921 922 if (bp->b_flags & B_LOCKED) 923 bp->b_ioflags &= ~BIO_ERROR; 924 925 if (bp->b_iocmd == BIO_WRITE && 926 (bp->b_ioflags & BIO_ERROR) && 927 !(bp->b_flags & B_INVAL)) { 928 /* 929 * Failed write, redirty. Must clear BIO_ERROR to prevent 930 * pages from being scrapped. If B_INVAL is set then 931 * this case is not run and the next case is run to 932 * destroy the buffer. B_INVAL can occur if the buffer 933 * is outside the range supported by the underlying device. 934 */ 935 bp->b_ioflags &= ~BIO_ERROR; 936 bdirty(bp); 937 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || 938 (bp->b_ioflags & BIO_ERROR) || 939 bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) { 940 /* 941 * Either a failed I/O or we were asked to free or not 942 * cache the buffer. 943 */ 944 bp->b_flags |= B_INVAL; 945 if (LIST_FIRST(&bp->b_dep) != NULL) 946 buf_deallocate(bp); 947 if (bp->b_flags & B_DELWRI) { 948 --numdirtybuffers; 949 numdirtywakeup(); 950 } 951 bp->b_flags &= ~(B_DELWRI | B_CACHE); 952 if ((bp->b_flags & B_VMIO) == 0) { 953 if (bp->b_bufsize) 954 allocbuf(bp, 0); 955 if (bp->b_vp) 956 brelvp(bp); 957 } 958 } 959 960 /* 961 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 962 * is called with B_DELWRI set, the underlying pages may wind up 963 * getting freed causing a previous write (bdwrite()) to get 'lost' 964 * because pages associated with a B_DELWRI bp are marked clean. 965 * 966 * We still allow the B_INVAL case to call vfs_vmio_release(), even 967 * if B_DELWRI is set. 968 */ 969 970 if (bp->b_flags & B_DELWRI) 971 bp->b_flags &= ~B_RELBUF; 972 973 /* 974 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 975 * constituted, not even NFS buffers now. Two flags effect this. If 976 * B_INVAL, the struct buf is invalidated but the VM object is kept 977 * around ( i.e. so it is trivial to reconstitute the buffer later ). 978 * 979 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be 980 * invalidated. BIO_ERROR cannot be set for a failed write unless the 981 * buffer is also B_INVAL because it hits the re-dirtying code above. 982 * 983 * Normally we can do this whether a buffer is B_DELWRI or not. If 984 * the buffer is an NFS buffer, it is tracking piecemeal writes or 985 * the commit state and we cannot afford to lose the buffer. If the 986 * buffer has a background write in progress, we need to keep it 987 * around to prevent it from being reconstituted and starting a second 988 * background write. 989 */ 990 if ((bp->b_flags & B_VMIO) 991 && !(bp->b_vp->v_tag == VT_NFS && 992 !vn_isdisk(bp->b_vp, NULL) && 993 (bp->b_flags & B_DELWRI) && 994 (bp->b_xflags & BX_BKGRDINPROG)) 995 ) { 996 997 int i, j, resid; 998 vm_page_t m; 999 off_t foff; 1000 vm_pindex_t poff; 1001 vm_object_t obj; 1002 struct vnode *vp; 1003 1004 vp = bp->b_vp; 1005 1006 /* 1007 * Get the base offset and length of the buffer. Note that 1008 * for block sizes that are less then PAGE_SIZE, the b_data 1009 * base of the buffer does not represent exactly b_offset and 1010 * neither b_offset nor b_size are necessarily page aligned. 1011 * Instead, the starting position of b_offset is: 1012 * 1013 * b_data + (b_offset & PAGE_MASK) 1014 * 1015 * block sizes less then DEV_BSIZE (usually 512) are not 1016 * supported due to the page granularity bits (m->valid, 1017 * m->dirty, etc...). 1018 * 1019 * See man buf(9) for more information 1020 */ 1021 1022 resid = bp->b_bufsize; 1023 foff = bp->b_offset; 1024 1025 for (i = 0; i < bp->b_npages; i++) { 1026 m = bp->b_pages[i]; 1027 vm_page_flag_clear(m, PG_ZERO); 1028 if (m == bogus_page) { 1029 1030 VOP_GETVOBJECT(vp, &obj); 1031 poff = OFF_TO_IDX(bp->b_offset); 1032 1033 for (j = i; j < bp->b_npages; j++) { 1034 m = bp->b_pages[j]; 1035 if (m == bogus_page) { 1036 m = vm_page_lookup(obj, poff + j); 1037 if (!m) { 1038 panic("brelse: page missing\n"); 1039 } 1040 bp->b_pages[j] = m; 1041 } 1042 } 1043 1044 if ((bp->b_flags & B_INVAL) == 0) { 1045 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 1046 } 1047 } 1048 if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) { 1049 int poffset = foff & PAGE_MASK; 1050 int presid = resid > (PAGE_SIZE - poffset) ? 1051 (PAGE_SIZE - poffset) : resid; 1052 1053 KASSERT(presid >= 0, ("brelse: extra page")); 1054 vm_page_set_invalid(m, poffset, presid); 1055 } 1056 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1057 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 1058 } 1059 1060 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1061 vfs_vmio_release(bp); 1062 1063 } else if (bp->b_flags & B_VMIO) { 1064 1065 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1066 vfs_vmio_release(bp); 1067 1068 } 1069 1070 if (bp->b_qindex != QUEUE_NONE) 1071 panic("brelse: free buffer onto another queue???"); 1072 if (BUF_REFCNT(bp) > 1) { 1073 /* do not release to free list */ 1074 BUF_UNLOCK(bp); 1075 splx(s); 1076 return; 1077 } 1078 1079 /* enqueue */ 1080 1081 /* buffers with no memory */ 1082 if (bp->b_bufsize == 0) { 1083 bp->b_flags |= B_INVAL; 1084 bp->b_xflags &= ~BX_BKGRDWRITE; 1085 if (bp->b_xflags & BX_BKGRDINPROG) 1086 panic("losing buffer 1"); 1087 if (bp->b_kvasize) { 1088 bp->b_qindex = QUEUE_EMPTYKVA; 1089 } else { 1090 bp->b_qindex = QUEUE_EMPTY; 1091 } 1092 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1093 LIST_REMOVE(bp, b_hash); 1094 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1095 bp->b_dev = NODEV; 1096 /* buffers with junk contents */ 1097 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || (bp->b_ioflags & BIO_ERROR)) { 1098 bp->b_flags |= B_INVAL; 1099 bp->b_xflags &= ~BX_BKGRDWRITE; 1100 if (bp->b_xflags & BX_BKGRDINPROG) 1101 panic("losing buffer 2"); 1102 bp->b_qindex = QUEUE_CLEAN; 1103 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1104 LIST_REMOVE(bp, b_hash); 1105 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1106 bp->b_dev = NODEV; 1107 1108 /* buffers that are locked */ 1109 } else if (bp->b_flags & B_LOCKED) { 1110 bp->b_qindex = QUEUE_LOCKED; 1111 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 1112 1113 /* remaining buffers */ 1114 } else { 1115 switch(bp->b_flags & (B_DELWRI|B_AGE)) { 1116 case B_DELWRI | B_AGE: 1117 bp->b_qindex = QUEUE_DIRTY; 1118 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_DIRTY], bp, b_freelist); 1119 break; 1120 case B_DELWRI: 1121 bp->b_qindex = QUEUE_DIRTY; 1122 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist); 1123 break; 1124 case B_AGE: 1125 bp->b_qindex = QUEUE_CLEAN; 1126 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1127 break; 1128 default: 1129 bp->b_qindex = QUEUE_CLEAN; 1130 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1131 break; 1132 } 1133 } 1134 1135 /* 1136 * If B_INVAL, clear B_DELWRI. We've already placed the buffer 1137 * on the correct queue. 1138 */ 1139 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) { 1140 bp->b_flags &= ~B_DELWRI; 1141 --numdirtybuffers; 1142 numdirtywakeup(); 1143 } 1144 1145 runningbufspace -= bp->b_bufsize; 1146 1147 /* 1148 * Fixup numfreebuffers count. The bp is on an appropriate queue 1149 * unless locked. We then bump numfreebuffers if it is not B_DELWRI. 1150 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1151 * if B_INVAL is set ). 1152 */ 1153 1154 if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI)) 1155 bufcountwakeup(); 1156 1157 /* 1158 * Something we can maybe free. 1159 */ 1160 1161 if (bp->b_bufsize || bp->b_kvasize) 1162 bufspacewakeup(); 1163 1164 /* unlock */ 1165 BUF_UNLOCK(bp); 1166 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1167 bp->b_ioflags &= ~BIO_ORDERED; 1168 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 1169 panic("brelse: not dirty"); 1170 splx(s); 1171} 1172 1173/* 1174 * Release a buffer back to the appropriate queue but do not try to free 1175 * it. 1176 * 1177 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1178 * biodone() to requeue an async I/O on completion. It is also used when 1179 * known good buffers need to be requeued but we think we may need the data 1180 * again soon. 1181 */ 1182void 1183bqrelse(struct buf * bp) 1184{ 1185 int s; 1186 1187 s = splbio(); 1188 1189 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1190 1191 if (bp->b_qindex != QUEUE_NONE) 1192 panic("bqrelse: free buffer onto another queue???"); 1193 if (BUF_REFCNT(bp) > 1) { 1194 /* do not release to free list */ 1195 BUF_UNLOCK(bp); 1196 splx(s); 1197 return; 1198 } 1199 if (bp->b_flags & B_LOCKED) { 1200 bp->b_ioflags &= ~BIO_ERROR; 1201 bp->b_qindex = QUEUE_LOCKED; 1202 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 1203 /* buffers with stale but valid contents */ 1204 } else if (bp->b_flags & B_DELWRI) { 1205 bp->b_qindex = QUEUE_DIRTY; 1206 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist); 1207 } else { 1208 bp->b_qindex = QUEUE_CLEAN; 1209 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1210 } 1211 1212 runningbufspace -= bp->b_bufsize; 1213 1214 if ((bp->b_flags & B_LOCKED) == 0 && 1215 ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) { 1216 bufcountwakeup(); 1217 } 1218 1219 /* 1220 * Something we can maybe wakeup 1221 */ 1222 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1223 bufspacewakeup(); 1224 1225 /* unlock */ 1226 BUF_UNLOCK(bp); 1227 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1228 bp->b_ioflags &= ~BIO_ORDERED; 1229 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 1230 panic("bqrelse: not dirty"); 1231 splx(s); 1232} 1233 1234static void 1235vfs_vmio_release(bp) 1236 struct buf *bp; 1237{ 1238 int i, s; 1239 vm_page_t m; 1240 1241 s = splvm(); 1242 for (i = 0; i < bp->b_npages; i++) { 1243 m = bp->b_pages[i]; 1244 bp->b_pages[i] = NULL; 1245 /* 1246 * In order to keep page LRU ordering consistent, put 1247 * everything on the inactive queue. 1248 */ 1249 vm_page_unwire(m, 0); 1250 /* 1251 * We don't mess with busy pages, it is 1252 * the responsibility of the process that 1253 * busied the pages to deal with them. 1254 */ 1255 if ((m->flags & PG_BUSY) || (m->busy != 0)) 1256 continue; 1257 1258 if (m->wire_count == 0) { 1259 vm_page_flag_clear(m, PG_ZERO); 1260 /* 1261 * Might as well free the page if we can and it has 1262 * no valid data. 1263 */ 1264 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { 1265 vm_page_busy(m); 1266 vm_page_protect(m, VM_PROT_NONE); 1267 vm_page_free(m); 1268 } 1269 } 1270 } 1271 runningbufspace -= bp->b_bufsize; 1272 splx(s); 1273 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 1274 if (bp->b_bufsize) 1275 bufspacewakeup(); 1276 bp->b_npages = 0; 1277 bp->b_bufsize = 0; 1278 bp->b_flags &= ~B_VMIO; 1279 if (bp->b_vp) 1280 brelvp(bp); 1281} 1282 1283/* 1284 * Check to see if a block is currently memory resident. 1285 */ 1286struct buf * 1287gbincore(struct vnode * vp, daddr_t blkno) 1288{ 1289 struct buf *bp; 1290 struct bufhashhdr *bh; 1291 1292 bh = bufhash(vp, blkno); 1293 1294 /* Search hash chain */ 1295 LIST_FOREACH(bp, bh, b_hash) { 1296 /* hit */ 1297 if (bp->b_vp == vp && bp->b_lblkno == blkno && 1298 (bp->b_flags & B_INVAL) == 0) { 1299 break; 1300 } 1301 } 1302 return (bp); 1303} 1304 1305/* 1306 * vfs_bio_awrite: 1307 * 1308 * Implement clustered async writes for clearing out B_DELWRI buffers. 1309 * This is much better then the old way of writing only one buffer at 1310 * a time. Note that we may not be presented with the buffers in the 1311 * correct order, so we search for the cluster in both directions. 1312 */ 1313int 1314vfs_bio_awrite(struct buf * bp) 1315{ 1316 int i; 1317 int j; 1318 daddr_t lblkno = bp->b_lblkno; 1319 struct vnode *vp = bp->b_vp; 1320 int s; 1321 int ncl; 1322 struct buf *bpa; 1323 int nwritten; 1324 int size; 1325 int maxcl; 1326 1327 s = splbio(); 1328 /* 1329 * right now we support clustered writing only to regular files. If 1330 * we find a clusterable block we could be in the middle of a cluster 1331 * rather then at the beginning. 1332 */ 1333 if ((vp->v_type == VREG) && 1334 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1335 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1336 1337 size = vp->v_mount->mnt_stat.f_iosize; 1338 maxcl = MAXPHYS / size; 1339 1340 for (i = 1; i < maxcl; i++) { 1341 if ((bpa = gbincore(vp, lblkno + i)) && 1342 BUF_REFCNT(bpa) == 0 && 1343 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1344 (B_DELWRI | B_CLUSTEROK)) && 1345 (bpa->b_bufsize == size)) { 1346 if ((bpa->b_blkno == bpa->b_lblkno) || 1347 (bpa->b_blkno != 1348 bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 1349 break; 1350 } else { 1351 break; 1352 } 1353 } 1354 for (j = 1; i + j <= maxcl && j <= lblkno; j++) { 1355 if ((bpa = gbincore(vp, lblkno - j)) && 1356 BUF_REFCNT(bpa) == 0 && 1357 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1358 (B_DELWRI | B_CLUSTEROK)) && 1359 (bpa->b_bufsize == size)) { 1360 if ((bpa->b_blkno == bpa->b_lblkno) || 1361 (bpa->b_blkno != 1362 bp->b_blkno - ((j * size) >> DEV_BSHIFT))) 1363 break; 1364 } else { 1365 break; 1366 } 1367 } 1368 --j; 1369 ncl = i + j; 1370 /* 1371 * this is a possible cluster write 1372 */ 1373 if (ncl != 1) { 1374 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl); 1375 splx(s); 1376 return nwritten; 1377 } 1378 } 1379 1380 BUF_LOCK(bp, LK_EXCLUSIVE); 1381 bremfree(bp); 1382 bp->b_flags |= B_ASYNC; 1383 1384 splx(s); 1385 /* 1386 * default (old) behavior, writing out only one block 1387 * 1388 * XXX returns b_bufsize instead of b_bcount for nwritten? 1389 */ 1390 nwritten = bp->b_bufsize; 1391 (void) BUF_WRITE(bp); 1392 1393 return nwritten; 1394} 1395 1396/* 1397 * getnewbuf: 1398 * 1399 * Find and initialize a new buffer header, freeing up existing buffers 1400 * in the bufqueues as necessary. The new buffer is returned locked. 1401 * 1402 * Important: B_INVAL is not set. If the caller wishes to throw the 1403 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1404 * 1405 * We block if: 1406 * We have insufficient buffer headers 1407 * We have insufficient buffer space 1408 * buffer_map is too fragmented ( space reservation fails ) 1409 * If we have to flush dirty buffers ( but we try to avoid this ) 1410 * 1411 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1412 * Instead we ask the buf daemon to do it for us. We attempt to 1413 * avoid piecemeal wakeups of the pageout daemon. 1414 */ 1415 1416static struct buf * 1417getnewbuf(int slpflag, int slptimeo, int size, int maxsize) 1418{ 1419 struct buf *bp; 1420 struct buf *nbp; 1421 int defrag = 0; 1422 int nqindex; 1423 int isspecial; 1424 static int flushingbufs; 1425 1426 if (curproc != idleproc && 1427 (curproc->p_flag & (P_COWINPROGRESS|P_BUFEXHAUST)) == 0) 1428 isspecial = 0; 1429 else 1430 isspecial = 1; 1431 1432 ++getnewbufcalls; 1433 --getnewbufrestarts; 1434restart: 1435 ++getnewbufrestarts; 1436 1437 /* 1438 * Setup for scan. If we do not have enough free buffers, 1439 * we setup a degenerate case that immediately fails. Note 1440 * that if we are specially marked process, we are allowed to 1441 * dip into our reserves. 1442 * 1443 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 1444 * 1445 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 1446 * However, there are a number of cases (defragging, reusing, ...) 1447 * where we cannot backup. 1448 */ 1449 1450 if (isspecial == 0 && numfreebuffers < lofreebuffers) { 1451 /* 1452 * This will cause an immediate failure 1453 */ 1454 nqindex = QUEUE_CLEAN; 1455 nbp = NULL; 1456 } else { 1457 /* 1458 * Locate a buffer which already has KVA assigned. First 1459 * try EMPTYKVA buffers. 1460 */ 1461 nqindex = QUEUE_EMPTYKVA; 1462 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 1463 1464 if (nbp == NULL) { 1465 /* 1466 * If no EMPTYKVA buffers and we are either 1467 * defragging or reusing, locate a CLEAN buffer 1468 * to free or reuse. If bufspace useage is low 1469 * skip this step so we can allocate a new buffer. 1470 */ 1471 if (defrag || bufspace >= lobufspace) { 1472 nqindex = QUEUE_CLEAN; 1473 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 1474 } 1475 1476 /* 1477 * Nada. If we are allowed to allocate an EMPTY 1478 * buffer, go get one. 1479 */ 1480 if (nbp == NULL && defrag == 0 && 1481 (isspecial || bufspace < hibufspace)) { 1482 nqindex = QUEUE_EMPTY; 1483 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1484 } 1485 } 1486 } 1487 1488 /* 1489 * Run scan, possibly freeing data and/or kva mappings on the fly 1490 * depending. 1491 */ 1492 1493 while ((bp = nbp) != NULL) { 1494 int qindex = nqindex; 1495 1496 /* 1497 * Calculate next bp ( we can only use it if we do not block 1498 * or do other fancy things ). 1499 */ 1500 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 1501 switch(qindex) { 1502 case QUEUE_EMPTY: 1503 nqindex = QUEUE_EMPTYKVA; 1504 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]))) 1505 break; 1506 /* fall through */ 1507 case QUEUE_EMPTYKVA: 1508 nqindex = QUEUE_CLEAN; 1509 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]))) 1510 break; 1511 /* fall through */ 1512 case QUEUE_CLEAN: 1513 /* 1514 * nbp is NULL. 1515 */ 1516 break; 1517 } 1518 } 1519 1520 /* 1521 * Sanity Checks 1522 */ 1523 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp)); 1524 1525 /* 1526 * Note: we no longer distinguish between VMIO and non-VMIO 1527 * buffers. 1528 */ 1529 1530 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex)); 1531 1532 /* 1533 * If we are defragging then we need a buffer with 1534 * b_kvasize != 0. XXX this situation should no longer 1535 * occur, if defrag is non-zero the buffer's b_kvasize 1536 * should also be non-zero at this point. XXX 1537 */ 1538 if (defrag && bp->b_kvasize == 0) { 1539 printf("Warning: defrag empty buffer %p\n", bp); 1540 continue; 1541 } 1542 1543 /* 1544 * Start freeing the bp. This is somewhat involved. nbp 1545 * remains valid only for QUEUE_EMPTY[KVA] bp's. 1546 */ 1547 1548 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1549 panic("getnewbuf: locked buf"); 1550 bremfree(bp); 1551 1552 if (qindex == QUEUE_CLEAN) { 1553 if (bp->b_flags & B_VMIO) { 1554 bp->b_flags &= ~B_ASYNC; 1555 vfs_vmio_release(bp); 1556 } 1557 if (bp->b_vp) 1558 brelvp(bp); 1559 } 1560 1561 /* 1562 * NOTE: nbp is now entirely invalid. We can only restart 1563 * the scan from this point on. 1564 * 1565 * Get the rest of the buffer freed up. b_kva* is still 1566 * valid after this operation. 1567 */ 1568 1569 if (bp->b_rcred != NOCRED) { 1570 crfree(bp->b_rcred); 1571 bp->b_rcred = NOCRED; 1572 } 1573 if (bp->b_wcred != NOCRED) { 1574 crfree(bp->b_wcred); 1575 bp->b_wcred = NOCRED; 1576 } 1577 if (LIST_FIRST(&bp->b_dep) != NULL) 1578 buf_deallocate(bp); 1579 if (bp->b_xflags & BX_BKGRDINPROG) 1580 panic("losing buffer 3"); 1581 LIST_REMOVE(bp, b_hash); 1582 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1583 1584 if (bp->b_bufsize) 1585 allocbuf(bp, 0); 1586 1587 bp->b_flags = 0; 1588 bp->b_ioflags = 0; 1589 bp->b_xflags = 0; 1590 bp->b_dev = NODEV; 1591 bp->b_vp = NULL; 1592 bp->b_blkno = bp->b_lblkno = 0; 1593 bp->b_offset = NOOFFSET; 1594 bp->b_iodone = 0; 1595 bp->b_error = 0; 1596 bp->b_resid = 0; 1597 bp->b_bcount = 0; 1598 bp->b_npages = 0; 1599 bp->b_dirtyoff = bp->b_dirtyend = 0; 1600 1601 LIST_INIT(&bp->b_dep); 1602 1603 /* 1604 * If we are defragging then free the buffer. 1605 */ 1606 if (defrag) { 1607 bp->b_flags |= B_INVAL; 1608 bfreekva(bp); 1609 brelse(bp); 1610 defrag = 0; 1611 goto restart; 1612 } 1613 1614 /* 1615 * If we are a normal process then deal with bufspace 1616 * hysteresis. A normal process tries to keep bufspace 1617 * between lobufspace and hibufspace. Note: if we encounter 1618 * a buffer with b_kvasize == 0 then it means we started 1619 * our scan on the EMPTY list and should allocate a new 1620 * buffer. 1621 */ 1622 if (isspecial == 0) { 1623 if (bufspace > hibufspace) 1624 flushingbufs = 1; 1625 if (flushingbufs && bp->b_kvasize != 0) { 1626 bp->b_flags |= B_INVAL; 1627 bfreekva(bp); 1628 brelse(bp); 1629 goto restart; 1630 } 1631 if (bufspace < lobufspace) 1632 flushingbufs = 0; 1633 } 1634 break; 1635 } 1636 1637 /* 1638 * If we exhausted our list, sleep as appropriate. We may have to 1639 * wakeup various daemons and write out some dirty buffers. 1640 * 1641 * Generally we are sleeping due to insufficient buffer space. 1642 */ 1643 1644 if (bp == NULL) { 1645 int flags; 1646 char *waitmsg; 1647 1648 if (defrag) { 1649 flags = VFS_BIO_NEED_BUFSPACE; 1650 waitmsg = "nbufkv"; 1651 } else if (bufspace >= hibufspace) { 1652 waitmsg = "nbufbs"; 1653 flags = VFS_BIO_NEED_BUFSPACE; 1654 } else { 1655 waitmsg = "newbuf"; 1656 flags = VFS_BIO_NEED_ANY; 1657 } 1658 1659 bd_speedup(); /* heeeelp */ 1660 1661 needsbuffer |= flags; 1662 while (needsbuffer & flags) { 1663 if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, 1664 waitmsg, slptimeo)) 1665 return (NULL); 1666 } 1667 } else { 1668 /* 1669 * We finally have a valid bp. We aren't quite out of the 1670 * woods, we still have to reserve kva space. In order 1671 * to keep fragmentation sane we only allocate kva in 1672 * BKVASIZE chunks. 1673 */ 1674 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 1675 1676 if (maxsize != bp->b_kvasize) { 1677 vm_offset_t addr = 0; 1678 1679 bfreekva(bp); 1680 1681 if (vm_map_findspace(buffer_map, 1682 vm_map_min(buffer_map), maxsize, &addr)) { 1683 /* 1684 * Uh oh. Buffer map is to fragmented. We 1685 * must defragment the map. 1686 */ 1687 ++bufdefragcnt; 1688 defrag = 1; 1689 bp->b_flags |= B_INVAL; 1690 brelse(bp); 1691 goto restart; 1692 } 1693 if (addr) { 1694 vm_map_insert(buffer_map, NULL, 0, 1695 addr, addr + maxsize, 1696 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1697 1698 bp->b_kvabase = (caddr_t) addr; 1699 bp->b_kvasize = maxsize; 1700 bufspace += bp->b_kvasize; 1701 ++bufreusecnt; 1702 } 1703 } 1704 bp->b_data = bp->b_kvabase; 1705 } 1706 return(bp); 1707} 1708 1709/* 1710 * waitfreebuffers: 1711 * 1712 * Wait for sufficient free buffers. Only called from normal processes. 1713 */ 1714 1715static void 1716waitfreebuffers(int slpflag, int slptimeo) 1717{ 1718 while (numfreebuffers < hifreebuffers) { 1719 if (numfreebuffers >= hifreebuffers) 1720 break; 1721 needsbuffer |= VFS_BIO_NEED_FREE; 1722 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1723 break; 1724 } 1725} 1726 1727/* 1728 * buf_daemon: 1729 * 1730 * buffer flushing daemon. Buffers are normally flushed by the 1731 * update daemon but if it cannot keep up this process starts to 1732 * take the load in an attempt to prevent getnewbuf() from blocking. 1733 */ 1734 1735static struct proc *bufdaemonproc; 1736static int bd_interval; 1737static int bd_flushto; 1738static int bd_flushinc; 1739 1740static struct kproc_desc buf_kp = { 1741 "bufdaemon", 1742 buf_daemon, 1743 &bufdaemonproc 1744}; 1745SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp) 1746 1747static void 1748buf_daemon() 1749{ 1750 int s; 1751 1752 mtx_enter(&Giant, MTX_DEF); 1753 1754 /* 1755 * This process needs to be suspended prior to shutdown sync. 1756 */ 1757 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, bufdaemonproc, 1758 SHUTDOWN_PRI_LAST); 1759 1760 /* 1761 * This process is allowed to take the buffer cache to the limit 1762 */ 1763 curproc->p_flag |= P_BUFEXHAUST; 1764 s = splbio(); 1765 1766 bd_interval = 5 * hz; /* dynamically adjusted */ 1767 bd_flushto = hidirtybuffers; /* dynamically adjusted */ 1768 bd_flushinc = 1; 1769 1770 for (;;) { 1771 kproc_suspend_loop(bufdaemonproc); 1772 1773 bd_request = 0; 1774 1775 /* 1776 * Do the flush. Limit the number of buffers we flush in one 1777 * go. The failure condition occurs when processes are writing 1778 * buffers faster then we can dispose of them. In this case 1779 * we may be flushing so often that the previous set of flushes 1780 * have not had time to complete, causing us to run out of 1781 * physical buffers and block. 1782 */ 1783 { 1784 int runcount = maxbdrun; 1785 1786 while (numdirtybuffers > bd_flushto && runcount) { 1787 --runcount; 1788 if (flushbufqueues() == 0) 1789 break; 1790 } 1791 } 1792 1793 if (bd_request || 1794 tsleep(&bd_request, PVM, "psleep", bd_interval) == 0) { 1795 /* 1796 * Another request is pending or we were woken up 1797 * without timing out. Flush more. 1798 */ 1799 --bd_flushto; 1800 if (bd_flushto >= numdirtybuffers - 5) { 1801 bd_flushto = numdirtybuffers - 10; 1802 bd_flushinc = 1; 1803 } 1804 if (bd_flushto < 2) 1805 bd_flushto = 2; 1806 } else { 1807 /* 1808 * We slept and timed out, we can slow down. 1809 */ 1810 bd_flushto += bd_flushinc; 1811 if (bd_flushto > hidirtybuffers) 1812 bd_flushto = hidirtybuffers; 1813 ++bd_flushinc; 1814 if (bd_flushinc > hidirtybuffers / 20 + 1) 1815 bd_flushinc = hidirtybuffers / 20 + 1; 1816 } 1817 1818 /* 1819 * Set the interval on a linear scale based on hidirtybuffers 1820 * with a maximum frequency of 1/10 second. 1821 */ 1822 bd_interval = bd_flushto * 5 * hz / hidirtybuffers; 1823 if (bd_interval < hz / 10) 1824 bd_interval = hz / 10; 1825 } 1826} 1827 1828/* 1829 * flushbufqueues: 1830 * 1831 * Try to flush a buffer in the dirty queue. We must be careful to 1832 * free up B_INVAL buffers instead of write them, which NFS is 1833 * particularly sensitive to. 1834 */ 1835 1836static int 1837flushbufqueues(void) 1838{ 1839 struct buf *bp; 1840 int r = 0; 1841 1842 bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]); 1843 1844 while (bp) { 1845 KASSERT((bp->b_flags & B_DELWRI), ("unexpected clean buffer %p", bp)); 1846 if ((bp->b_flags & B_DELWRI) != 0 && 1847 (bp->b_xflags & BX_BKGRDINPROG) == 0) { 1848 if (bp->b_flags & B_INVAL) { 1849 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1850 panic("flushbufqueues: locked buf"); 1851 bremfree(bp); 1852 brelse(bp); 1853 ++r; 1854 break; 1855 } 1856 if (LIST_FIRST(&bp->b_dep) != NULL && 1857 (bp->b_flags & B_DEFERRED) == 0 && 1858 buf_countdeps(bp, 0)) { 1859 TAILQ_REMOVE(&bufqueues[QUEUE_DIRTY], 1860 bp, b_freelist); 1861 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], 1862 bp, b_freelist); 1863 bp->b_flags |= B_DEFERRED; 1864 bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]); 1865 continue; 1866 } 1867 vfs_bio_awrite(bp); 1868 ++r; 1869 break; 1870 } 1871 bp = TAILQ_NEXT(bp, b_freelist); 1872 } 1873 return (r); 1874} 1875 1876/* 1877 * Check to see if a block is currently memory resident. 1878 */ 1879struct buf * 1880incore(struct vnode * vp, daddr_t blkno) 1881{ 1882 struct buf *bp; 1883 1884 int s = splbio(); 1885 bp = gbincore(vp, blkno); 1886 splx(s); 1887 return (bp); 1888} 1889 1890/* 1891 * Returns true if no I/O is needed to access the 1892 * associated VM object. This is like incore except 1893 * it also hunts around in the VM system for the data. 1894 */ 1895 1896int 1897inmem(struct vnode * vp, daddr_t blkno) 1898{ 1899 vm_object_t obj; 1900 vm_offset_t toff, tinc, size; 1901 vm_page_t m; 1902 vm_ooffset_t off; 1903 1904 if (incore(vp, blkno)) 1905 return 1; 1906 if (vp->v_mount == NULL) 1907 return 0; 1908 if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0) 1909 return 0; 1910 1911 size = PAGE_SIZE; 1912 if (size > vp->v_mount->mnt_stat.f_iosize) 1913 size = vp->v_mount->mnt_stat.f_iosize; 1914 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 1915 1916 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1917 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1918 if (!m) 1919 return 0; 1920 tinc = size; 1921 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 1922 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 1923 if (vm_page_is_valid(m, 1924 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1925 return 0; 1926 } 1927 return 1; 1928} 1929 1930/* 1931 * vfs_setdirty: 1932 * 1933 * Sets the dirty range for a buffer based on the status of the dirty 1934 * bits in the pages comprising the buffer. 1935 * 1936 * The range is limited to the size of the buffer. 1937 * 1938 * This routine is primarily used by NFS, but is generalized for the 1939 * B_VMIO case. 1940 */ 1941static void 1942vfs_setdirty(struct buf *bp) 1943{ 1944 int i; 1945 vm_object_t object; 1946 1947 /* 1948 * Degenerate case - empty buffer 1949 */ 1950 1951 if (bp->b_bufsize == 0) 1952 return; 1953 1954 /* 1955 * We qualify the scan for modified pages on whether the 1956 * object has been flushed yet. The OBJ_WRITEABLE flag 1957 * is not cleared simply by protecting pages off. 1958 */ 1959 1960 if ((bp->b_flags & B_VMIO) == 0) 1961 return; 1962 1963 object = bp->b_pages[0]->object; 1964 1965 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY)) 1966 printf("Warning: object %p writeable but not mightbedirty\n", object); 1967 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY)) 1968 printf("Warning: object %p mightbedirty but not writeable\n", object); 1969 1970 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 1971 vm_offset_t boffset; 1972 vm_offset_t eoffset; 1973 1974 /* 1975 * test the pages to see if they have been modified directly 1976 * by users through the VM system. 1977 */ 1978 for (i = 0; i < bp->b_npages; i++) { 1979 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1980 vm_page_test_dirty(bp->b_pages[i]); 1981 } 1982 1983 /* 1984 * Calculate the encompassing dirty range, boffset and eoffset, 1985 * (eoffset - boffset) bytes. 1986 */ 1987 1988 for (i = 0; i < bp->b_npages; i++) { 1989 if (bp->b_pages[i]->dirty) 1990 break; 1991 } 1992 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1993 1994 for (i = bp->b_npages - 1; i >= 0; --i) { 1995 if (bp->b_pages[i]->dirty) { 1996 break; 1997 } 1998 } 1999 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2000 2001 /* 2002 * Fit it to the buffer. 2003 */ 2004 2005 if (eoffset > bp->b_bcount) 2006 eoffset = bp->b_bcount; 2007 2008 /* 2009 * If we have a good dirty range, merge with the existing 2010 * dirty range. 2011 */ 2012 2013 if (boffset < eoffset) { 2014 if (bp->b_dirtyoff > boffset) 2015 bp->b_dirtyoff = boffset; 2016 if (bp->b_dirtyend < eoffset) 2017 bp->b_dirtyend = eoffset; 2018 } 2019 } 2020} 2021 2022/* 2023 * getblk: 2024 * 2025 * Get a block given a specified block and offset into a file/device. 2026 * The buffers B_DONE bit will be cleared on return, making it almost 2027 * ready for an I/O initiation. B_INVAL may or may not be set on 2028 * return. The caller should clear B_INVAL prior to initiating a 2029 * READ. 2030 * 2031 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2032 * an existing buffer. 2033 * 2034 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2035 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2036 * and then cleared based on the backing VM. If the previous buffer is 2037 * non-0-sized but invalid, B_CACHE will be cleared. 2038 * 2039 * If getblk() must create a new buffer, the new buffer is returned with 2040 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2041 * case it is returned with B_INVAL clear and B_CACHE set based on the 2042 * backing VM. 2043 * 2044 * getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos 2045 * B_CACHE bit is clear. 2046 * 2047 * What this means, basically, is that the caller should use B_CACHE to 2048 * determine whether the buffer is fully valid or not and should clear 2049 * B_INVAL prior to issuing a read. If the caller intends to validate 2050 * the buffer by loading its data area with something, the caller needs 2051 * to clear B_INVAL. If the caller does this without issuing an I/O, 2052 * the caller should set B_CACHE ( as an optimization ), else the caller 2053 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2054 * a write attempt or if it was a successfull read. If the caller 2055 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR 2056 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2057 */ 2058struct buf * 2059getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 2060{ 2061 struct buf *bp; 2062 int s; 2063 struct bufhashhdr *bh; 2064 2065 if (size > MAXBSIZE) 2066 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 2067 2068 s = splbio(); 2069loop: 2070 /* 2071 * Block if we are low on buffers. Certain processes are allowed 2072 * to completely exhaust the buffer cache. 2073 * 2074 * If this check ever becomes a bottleneck it may be better to 2075 * move it into the else, when gbincore() fails. At the moment 2076 * it isn't a problem. 2077 */ 2078 if (curproc == idleproc || (curproc->p_flag & P_BUFEXHAUST)) { 2079 if (numfreebuffers == 0) { 2080 if (curproc == idleproc) 2081 return NULL; 2082 needsbuffer |= VFS_BIO_NEED_ANY; 2083 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 2084 slptimeo); 2085 } 2086 } else if (numfreebuffers < lofreebuffers) { 2087 waitfreebuffers(slpflag, slptimeo); 2088 } 2089 2090 if ((bp = gbincore(vp, blkno))) { 2091 /* 2092 * Buffer is in-core. If the buffer is not busy, it must 2093 * be on a queue. 2094 */ 2095 2096 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 2097 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL, 2098 "getblk", slpflag, slptimeo) == ENOLCK) 2099 goto loop; 2100 splx(s); 2101 return (struct buf *) NULL; 2102 } 2103 2104 /* 2105 * The buffer is locked. B_CACHE is cleared if the buffer is 2106 * invalid. Ohterwise, for a non-VMIO buffer, B_CACHE is set 2107 * and for a VMIO buffer B_CACHE is adjusted according to the 2108 * backing VM cache. 2109 */ 2110 if (bp->b_flags & B_INVAL) 2111 bp->b_flags &= ~B_CACHE; 2112 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 2113 bp->b_flags |= B_CACHE; 2114 bremfree(bp); 2115 2116 /* 2117 * check for size inconsistancies for non-VMIO case. 2118 */ 2119 2120 if (bp->b_bcount != size) { 2121 if ((bp->b_flags & B_VMIO) == 0 || 2122 (size > bp->b_kvasize)) { 2123 if (bp->b_flags & B_DELWRI) { 2124 bp->b_flags |= B_NOCACHE; 2125 BUF_WRITE(bp); 2126 } else { 2127 if ((bp->b_flags & B_VMIO) && 2128 (LIST_FIRST(&bp->b_dep) == NULL)) { 2129 bp->b_flags |= B_RELBUF; 2130 brelse(bp); 2131 } else { 2132 bp->b_flags |= B_NOCACHE; 2133 BUF_WRITE(bp); 2134 } 2135 } 2136 goto loop; 2137 } 2138 } 2139 2140 /* 2141 * If the size is inconsistant in the VMIO case, we can resize 2142 * the buffer. This might lead to B_CACHE getting set or 2143 * cleared. If the size has not changed, B_CACHE remains 2144 * unchanged from its previous state. 2145 */ 2146 2147 if (bp->b_bcount != size) 2148 allocbuf(bp, size); 2149 2150 KASSERT(bp->b_offset != NOOFFSET, 2151 ("getblk: no buffer offset")); 2152 2153 /* 2154 * A buffer with B_DELWRI set and B_CACHE clear must 2155 * be committed before we can return the buffer in 2156 * order to prevent the caller from issuing a read 2157 * ( due to B_CACHE not being set ) and overwriting 2158 * it. 2159 * 2160 * Most callers, including NFS and FFS, need this to 2161 * operate properly either because they assume they 2162 * can issue a read if B_CACHE is not set, or because 2163 * ( for example ) an uncached B_DELWRI might loop due 2164 * to softupdates re-dirtying the buffer. In the latter 2165 * case, B_CACHE is set after the first write completes, 2166 * preventing further loops. 2167 */ 2168 2169 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 2170 BUF_WRITE(bp); 2171 goto loop; 2172 } 2173 2174 splx(s); 2175 bp->b_flags &= ~B_DONE; 2176 } else { 2177 /* 2178 * Buffer is not in-core, create new buffer. The buffer 2179 * returned by getnewbuf() is locked. Note that the returned 2180 * buffer is also considered valid (not marked B_INVAL). 2181 */ 2182 int bsize, maxsize, vmio; 2183 off_t offset; 2184 2185 if (vn_isdisk(vp, NULL)) 2186 bsize = DEV_BSIZE; 2187 else if (vp->v_mountedhere) 2188 bsize = vp->v_mountedhere->mnt_stat.f_iosize; 2189 else if (vp->v_mount) 2190 bsize = vp->v_mount->mnt_stat.f_iosize; 2191 else 2192 bsize = size; 2193 2194 offset = (off_t)blkno * bsize; 2195 vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF); 2196 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 2197 maxsize = imax(maxsize, bsize); 2198 2199 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) { 2200 if (slpflag || slptimeo) { 2201 splx(s); 2202 return NULL; 2203 } 2204 goto loop; 2205 } 2206 2207 /* 2208 * This code is used to make sure that a buffer is not 2209 * created while the getnewbuf routine is blocked. 2210 * This can be a problem whether the vnode is locked or not. 2211 * If the buffer is created out from under us, we have to 2212 * throw away the one we just created. There is now window 2213 * race because we are safely running at splbio() from the 2214 * point of the duplicate buffer creation through to here, 2215 * and we've locked the buffer. 2216 */ 2217 if (gbincore(vp, blkno)) { 2218 bp->b_flags |= B_INVAL; 2219 brelse(bp); 2220 goto loop; 2221 } 2222 2223 /* 2224 * Insert the buffer into the hash, so that it can 2225 * be found by incore. 2226 */ 2227 bp->b_blkno = bp->b_lblkno = blkno; 2228 bp->b_offset = offset; 2229 2230 bgetvp(vp, bp); 2231 LIST_REMOVE(bp, b_hash); 2232 bh = bufhash(vp, blkno); 2233 LIST_INSERT_HEAD(bh, bp, b_hash); 2234 2235 /* 2236 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 2237 * buffer size starts out as 0, B_CACHE will be set by 2238 * allocbuf() for the VMIO case prior to it testing the 2239 * backing store for validity. 2240 */ 2241 2242 if (vmio) { 2243 bp->b_flags |= B_VMIO; 2244#if defined(VFS_BIO_DEBUG) 2245 if (vp->v_type != VREG && vp->v_type != VBLK) 2246 printf("getblk: vmioing file type %d???\n", vp->v_type); 2247#endif 2248 } else { 2249 bp->b_flags &= ~B_VMIO; 2250 } 2251 2252 allocbuf(bp, size); 2253 2254 splx(s); 2255 bp->b_flags &= ~B_DONE; 2256 } 2257 return (bp); 2258} 2259 2260/* 2261 * Get an empty, disassociated buffer of given size. The buffer is initially 2262 * set to B_INVAL. 2263 */ 2264struct buf * 2265geteblk(int size) 2266{ 2267 struct buf *bp; 2268 int s; 2269 int maxsize; 2270 2271 maxsize = (size + BKVAMASK) & ~BKVAMASK; 2272 2273 s = splbio(); 2274 while ((bp = getnewbuf(0, 0, size, maxsize)) == 0); 2275 splx(s); 2276 allocbuf(bp, size); 2277 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 2278 return (bp); 2279} 2280 2281 2282/* 2283 * This code constitutes the buffer memory from either anonymous system 2284 * memory (in the case of non-VMIO operations) or from an associated 2285 * VM object (in the case of VMIO operations). This code is able to 2286 * resize a buffer up or down. 2287 * 2288 * Note that this code is tricky, and has many complications to resolve 2289 * deadlock or inconsistant data situations. Tread lightly!!! 2290 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 2291 * the caller. Calling this code willy nilly can result in the loss of data. 2292 * 2293 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 2294 * B_CACHE for the non-VMIO case. 2295 */ 2296 2297int 2298allocbuf(struct buf *bp, int size) 2299{ 2300 int newbsize, mbsize; 2301 int i; 2302 2303 if (BUF_REFCNT(bp) == 0) 2304 panic("allocbuf: buffer not busy"); 2305 2306 if (bp->b_kvasize < size) 2307 panic("allocbuf: buffer too small"); 2308 2309 if ((bp->b_flags & B_VMIO) == 0) { 2310 caddr_t origbuf; 2311 int origbufsize; 2312 /* 2313 * Just get anonymous memory from the kernel. Don't 2314 * mess with B_CACHE. 2315 */ 2316 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2317#if !defined(NO_B_MALLOC) 2318 if (bp->b_flags & B_MALLOC) 2319 newbsize = mbsize; 2320 else 2321#endif 2322 newbsize = round_page(size); 2323 2324 if (newbsize < bp->b_bufsize) { 2325#if !defined(NO_B_MALLOC) 2326 /* 2327 * malloced buffers are not shrunk 2328 */ 2329 if (bp->b_flags & B_MALLOC) { 2330 if (newbsize) { 2331 bp->b_bcount = size; 2332 } else { 2333 free(bp->b_data, M_BIOBUF); 2334 bufmallocspace -= bp->b_bufsize; 2335 runningbufspace -= bp->b_bufsize; 2336 if (bp->b_bufsize) 2337 bufspacewakeup(); 2338 bp->b_data = bp->b_kvabase; 2339 bp->b_bufsize = 0; 2340 bp->b_bcount = 0; 2341 bp->b_flags &= ~B_MALLOC; 2342 } 2343 return 1; 2344 } 2345#endif 2346 vm_hold_free_pages( 2347 bp, 2348 (vm_offset_t) bp->b_data + newbsize, 2349 (vm_offset_t) bp->b_data + bp->b_bufsize); 2350 } else if (newbsize > bp->b_bufsize) { 2351#if !defined(NO_B_MALLOC) 2352 /* 2353 * We only use malloced memory on the first allocation. 2354 * and revert to page-allocated memory when the buffer 2355 * grows. 2356 */ 2357 if ( (bufmallocspace < maxbufmallocspace) && 2358 (bp->b_bufsize == 0) && 2359 (mbsize <= PAGE_SIZE/2)) { 2360 2361 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 2362 bp->b_bufsize = mbsize; 2363 bp->b_bcount = size; 2364 bp->b_flags |= B_MALLOC; 2365 bufmallocspace += mbsize; 2366 runningbufspace += bp->b_bufsize; 2367 return 1; 2368 } 2369#endif 2370 origbuf = NULL; 2371 origbufsize = 0; 2372#if !defined(NO_B_MALLOC) 2373 /* 2374 * If the buffer is growing on its other-than-first allocation, 2375 * then we revert to the page-allocation scheme. 2376 */ 2377 if (bp->b_flags & B_MALLOC) { 2378 origbuf = bp->b_data; 2379 origbufsize = bp->b_bufsize; 2380 bp->b_data = bp->b_kvabase; 2381 bufmallocspace -= bp->b_bufsize; 2382 runningbufspace -= bp->b_bufsize; 2383 if (bp->b_bufsize) 2384 bufspacewakeup(); 2385 bp->b_bufsize = 0; 2386 bp->b_flags &= ~B_MALLOC; 2387 newbsize = round_page(newbsize); 2388 } 2389#endif 2390 vm_hold_load_pages( 2391 bp, 2392 (vm_offset_t) bp->b_data + bp->b_bufsize, 2393 (vm_offset_t) bp->b_data + newbsize); 2394#if !defined(NO_B_MALLOC) 2395 if (origbuf) { 2396 bcopy(origbuf, bp->b_data, origbufsize); 2397 free(origbuf, M_BIOBUF); 2398 } 2399#endif 2400 } 2401 } else { 2402 vm_page_t m; 2403 int desiredpages; 2404 2405 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2406 desiredpages = (size == 0) ? 0 : 2407 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 2408 2409#if !defined(NO_B_MALLOC) 2410 if (bp->b_flags & B_MALLOC) 2411 panic("allocbuf: VMIO buffer can't be malloced"); 2412#endif 2413 /* 2414 * Set B_CACHE initially if buffer is 0 length or will become 2415 * 0-length. 2416 */ 2417 if (size == 0 || bp->b_bufsize == 0) 2418 bp->b_flags |= B_CACHE; 2419 2420 if (newbsize < bp->b_bufsize) { 2421 /* 2422 * DEV_BSIZE aligned new buffer size is less then the 2423 * DEV_BSIZE aligned existing buffer size. Figure out 2424 * if we have to remove any pages. 2425 */ 2426 if (desiredpages < bp->b_npages) { 2427 for (i = desiredpages; i < bp->b_npages; i++) { 2428 /* 2429 * the page is not freed here -- it 2430 * is the responsibility of 2431 * vnode_pager_setsize 2432 */ 2433 m = bp->b_pages[i]; 2434 KASSERT(m != bogus_page, 2435 ("allocbuf: bogus page found")); 2436 while (vm_page_sleep_busy(m, TRUE, "biodep")) 2437 ; 2438 2439 bp->b_pages[i] = NULL; 2440 vm_page_unwire(m, 0); 2441 } 2442 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2443 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 2444 bp->b_npages = desiredpages; 2445 } 2446 } else if (size > bp->b_bcount) { 2447 /* 2448 * We are growing the buffer, possibly in a 2449 * byte-granular fashion. 2450 */ 2451 struct vnode *vp; 2452 vm_object_t obj; 2453 vm_offset_t toff; 2454 vm_offset_t tinc; 2455 2456 /* 2457 * Step 1, bring in the VM pages from the object, 2458 * allocating them if necessary. We must clear 2459 * B_CACHE if these pages are not valid for the 2460 * range covered by the buffer. 2461 */ 2462 2463 vp = bp->b_vp; 2464 VOP_GETVOBJECT(vp, &obj); 2465 2466 while (bp->b_npages < desiredpages) { 2467 vm_page_t m; 2468 vm_pindex_t pi; 2469 2470 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages; 2471 if ((m = vm_page_lookup(obj, pi)) == NULL) { 2472 m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL); 2473 if (m == NULL) { 2474 VM_WAIT; 2475 vm_pageout_deficit += desiredpages - bp->b_npages; 2476 } else { 2477 vm_page_wire(m); 2478 vm_page_wakeup(m); 2479 bp->b_flags &= ~B_CACHE; 2480 bp->b_pages[bp->b_npages] = m; 2481 ++bp->b_npages; 2482 } 2483 continue; 2484 } 2485 2486 /* 2487 * We found a page. If we have to sleep on it, 2488 * retry because it might have gotten freed out 2489 * from under us. 2490 * 2491 * We can only test PG_BUSY here. Blocking on 2492 * m->busy might lead to a deadlock: 2493 * 2494 * vm_fault->getpages->cluster_read->allocbuf 2495 * 2496 */ 2497 2498 if (vm_page_sleep_busy(m, FALSE, "pgtblk")) 2499 continue; 2500 2501 /* 2502 * We have a good page. Should we wakeup the 2503 * page daemon? 2504 */ 2505 if ((curproc != pageproc) && 2506 ((m->queue - m->pc) == PQ_CACHE) && 2507 ((cnt.v_free_count + cnt.v_cache_count) < 2508 (cnt.v_free_min + cnt.v_cache_min))) { 2509 pagedaemon_wakeup(); 2510 } 2511 vm_page_flag_clear(m, PG_ZERO); 2512 vm_page_wire(m); 2513 bp->b_pages[bp->b_npages] = m; 2514 ++bp->b_npages; 2515 } 2516 2517 /* 2518 * Step 2. We've loaded the pages into the buffer, 2519 * we have to figure out if we can still have B_CACHE 2520 * set. Note that B_CACHE is set according to the 2521 * byte-granular range ( bcount and size ), new the 2522 * aligned range ( newbsize ). 2523 * 2524 * The VM test is against m->valid, which is DEV_BSIZE 2525 * aligned. Needless to say, the validity of the data 2526 * needs to also be DEV_BSIZE aligned. Note that this 2527 * fails with NFS if the server or some other client 2528 * extends the file's EOF. If our buffer is resized, 2529 * B_CACHE may remain set! XXX 2530 */ 2531 2532 toff = bp->b_bcount; 2533 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 2534 2535 while ((bp->b_flags & B_CACHE) && toff < size) { 2536 vm_pindex_t pi; 2537 2538 if (tinc > (size - toff)) 2539 tinc = size - toff; 2540 2541 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 2542 PAGE_SHIFT; 2543 2544 vfs_buf_test_cache( 2545 bp, 2546 bp->b_offset, 2547 toff, 2548 tinc, 2549 bp->b_pages[pi] 2550 ); 2551 toff += tinc; 2552 tinc = PAGE_SIZE; 2553 } 2554 2555 /* 2556 * Step 3, fixup the KVM pmap. Remember that 2557 * bp->b_data is relative to bp->b_offset, but 2558 * bp->b_offset may be offset into the first page. 2559 */ 2560 2561 bp->b_data = (caddr_t) 2562 trunc_page((vm_offset_t)bp->b_data); 2563 pmap_qenter( 2564 (vm_offset_t)bp->b_data, 2565 bp->b_pages, 2566 bp->b_npages 2567 ); 2568 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 2569 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 2570 } 2571 } 2572 runningbufspace += (newbsize - bp->b_bufsize); 2573 if (newbsize < bp->b_bufsize) 2574 bufspacewakeup(); 2575 bp->b_bufsize = newbsize; /* actual buffer allocation */ 2576 bp->b_bcount = size; /* requested buffer size */ 2577 return 1; 2578} 2579 2580/* 2581 * bufwait: 2582 * 2583 * Wait for buffer I/O completion, returning error status. The buffer 2584 * is left locked and B_DONE on return. B_EINTR is converted into a EINTR 2585 * error and cleared. 2586 */ 2587int 2588bufwait(register struct buf * bp) 2589{ 2590 int s; 2591 2592 s = splbio(); 2593 while ((bp->b_flags & B_DONE) == 0) { 2594 if (bp->b_iocmd == BIO_READ) 2595 tsleep(bp, PRIBIO, "biord", 0); 2596 else 2597 tsleep(bp, PRIBIO, "biowr", 0); 2598 } 2599 splx(s); 2600 if (bp->b_flags & B_EINTR) { 2601 bp->b_flags &= ~B_EINTR; 2602 return (EINTR); 2603 } 2604 if (bp->b_ioflags & BIO_ERROR) { 2605 return (bp->b_error ? bp->b_error : EIO); 2606 } else { 2607 return (0); 2608 } 2609} 2610 2611 /* 2612 * Call back function from struct bio back up to struct buf. 2613 * The corresponding initialization lives in sys/conf.h:DEV_STRATEGY(). 2614 */ 2615void 2616bufdonebio(struct bio *bp) 2617{ 2618 bufdone(bp->bio_caller2); 2619} 2620 2621/* 2622 * bufdone: 2623 * 2624 * Finish I/O on a buffer, optionally calling a completion function. 2625 * This is usually called from an interrupt so process blocking is 2626 * not allowed. 2627 * 2628 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 2629 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 2630 * assuming B_INVAL is clear. 2631 * 2632 * For the VMIO case, we set B_CACHE if the op was a read and no 2633 * read error occured, or if the op was a write. B_CACHE is never 2634 * set if the buffer is invalid or otherwise uncacheable. 2635 * 2636 * biodone does not mess with B_INVAL, allowing the I/O routine or the 2637 * initiator to leave B_INVAL set to brelse the buffer out of existance 2638 * in the biodone routine. 2639 */ 2640void 2641bufdone(struct buf *bp) 2642{ 2643 int s, error; 2644 void (*biodone) __P((struct buf *)); 2645 2646 s = splbio(); 2647 2648 KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp))); 2649 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 2650 2651 bp->b_flags |= B_DONE; 2652 2653 if (bp->b_iocmd == BIO_DELETE) { 2654 brelse(bp); 2655 splx(s); 2656 return; 2657 } 2658 2659 if (bp->b_iocmd == BIO_WRITE) { 2660 vwakeup(bp); 2661 } 2662 2663 /* call optional completion function if requested */ 2664 if (bp->b_iodone != NULL) { 2665 biodone = bp->b_iodone; 2666 bp->b_iodone = NULL; 2667 (*biodone) (bp); 2668 splx(s); 2669 return; 2670 } 2671 if (LIST_FIRST(&bp->b_dep) != NULL) 2672 buf_complete(bp); 2673 2674 if (bp->b_flags & B_VMIO) { 2675 int i, resid; 2676 vm_ooffset_t foff; 2677 vm_page_t m; 2678 vm_object_t obj; 2679 int iosize; 2680 struct vnode *vp = bp->b_vp; 2681 2682 error = VOP_GETVOBJECT(vp, &obj); 2683 2684#if defined(VFS_BIO_DEBUG) 2685 if (vp->v_usecount == 0) { 2686 panic("biodone: zero vnode ref count"); 2687 } 2688 2689 if (error) { 2690 panic("biodone: missing VM object"); 2691 } 2692 2693 if ((vp->v_flag & VOBJBUF) == 0) { 2694 panic("biodone: vnode is not setup for merged cache"); 2695 } 2696#endif 2697 2698 foff = bp->b_offset; 2699 KASSERT(bp->b_offset != NOOFFSET, 2700 ("biodone: no buffer offset")); 2701 2702 if (error) { 2703 panic("biodone: no object"); 2704 } 2705#if defined(VFS_BIO_DEBUG) 2706 if (obj->paging_in_progress < bp->b_npages) { 2707 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 2708 obj->paging_in_progress, bp->b_npages); 2709 } 2710#endif 2711 2712 /* 2713 * Set B_CACHE if the op was a normal read and no error 2714 * occured. B_CACHE is set for writes in the b*write() 2715 * routines. 2716 */ 2717 iosize = bp->b_bcount - bp->b_resid; 2718 if (bp->b_iocmd == BIO_READ && 2719 !(bp->b_flags & (B_INVAL|B_NOCACHE)) && 2720 !(bp->b_ioflags & BIO_ERROR)) { 2721 bp->b_flags |= B_CACHE; 2722 } 2723 2724 for (i = 0; i < bp->b_npages; i++) { 2725 int bogusflag = 0; 2726 m = bp->b_pages[i]; 2727 if (m == bogus_page) { 2728 bogusflag = 1; 2729 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 2730 if (!m) { 2731#if defined(VFS_BIO_DEBUG) 2732 printf("biodone: page disappeared\n"); 2733#endif 2734 vm_object_pip_subtract(obj, 1); 2735 bp->b_flags &= ~B_CACHE; 2736 continue; 2737 } 2738 bp->b_pages[i] = m; 2739 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2740 } 2741#if defined(VFS_BIO_DEBUG) 2742 if (OFF_TO_IDX(foff) != m->pindex) { 2743 printf( 2744"biodone: foff(%lu)/m->pindex(%d) mismatch\n", 2745 (unsigned long)foff, m->pindex); 2746 } 2747#endif 2748 resid = IDX_TO_OFF(m->pindex + 1) - foff; 2749 if (resid > iosize) 2750 resid = iosize; 2751 2752 /* 2753 * In the write case, the valid and clean bits are 2754 * already changed correctly ( see bdwrite() ), so we 2755 * only need to do this here in the read case. 2756 */ 2757 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) { 2758 vfs_page_set_valid(bp, foff, i, m); 2759 } 2760 vm_page_flag_clear(m, PG_ZERO); 2761 2762 /* 2763 * when debugging new filesystems or buffer I/O methods, this 2764 * is the most common error that pops up. if you see this, you 2765 * have not set the page busy flag correctly!!! 2766 */ 2767 if (m->busy == 0) { 2768 printf("biodone: page busy < 0, " 2769 "pindex: %d, foff: 0x(%x,%x), " 2770 "resid: %d, index: %d\n", 2771 (int) m->pindex, (int)(foff >> 32), 2772 (int) foff & 0xffffffff, resid, i); 2773 if (!vn_isdisk(vp, NULL)) 2774 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2775 bp->b_vp->v_mount->mnt_stat.f_iosize, 2776 (int) bp->b_lblkno, 2777 bp->b_flags, bp->b_npages); 2778 else 2779 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2780 (int) bp->b_lblkno, 2781 bp->b_flags, bp->b_npages); 2782 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2783 m->valid, m->dirty, m->wire_count); 2784 panic("biodone: page busy < 0\n"); 2785 } 2786 vm_page_io_finish(m); 2787 vm_object_pip_subtract(obj, 1); 2788 foff += resid; 2789 iosize -= resid; 2790 } 2791 if (obj) 2792 vm_object_pip_wakeupn(obj, 0); 2793 } 2794 /* 2795 * For asynchronous completions, release the buffer now. The brelse 2796 * will do a wakeup there if necessary - so no need to do a wakeup 2797 * here in the async case. The sync case always needs to do a wakeup. 2798 */ 2799 2800 if (bp->b_flags & B_ASYNC) { 2801 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR)) 2802 brelse(bp); 2803 else 2804 bqrelse(bp); 2805 } else { 2806 wakeup(bp); 2807 } 2808 splx(s); 2809} 2810 2811/* 2812 * This routine is called in lieu of iodone in the case of 2813 * incomplete I/O. This keeps the busy status for pages 2814 * consistant. 2815 */ 2816void 2817vfs_unbusy_pages(struct buf * bp) 2818{ 2819 int i; 2820 2821 if (bp->b_flags & B_VMIO) { 2822 struct vnode *vp = bp->b_vp; 2823 vm_object_t obj; 2824 2825 VOP_GETVOBJECT(vp, &obj); 2826 2827 for (i = 0; i < bp->b_npages; i++) { 2828 vm_page_t m = bp->b_pages[i]; 2829 2830 if (m == bogus_page) { 2831 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2832 if (!m) { 2833 panic("vfs_unbusy_pages: page missing\n"); 2834 } 2835 bp->b_pages[i] = m; 2836 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2837 } 2838 vm_object_pip_subtract(obj, 1); 2839 vm_page_flag_clear(m, PG_ZERO); 2840 vm_page_io_finish(m); 2841 } 2842 vm_object_pip_wakeupn(obj, 0); 2843 } 2844} 2845 2846/* 2847 * vfs_page_set_valid: 2848 * 2849 * Set the valid bits in a page based on the supplied offset. The 2850 * range is restricted to the buffer's size. 2851 * 2852 * This routine is typically called after a read completes. 2853 */ 2854static void 2855vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2856{ 2857 vm_ooffset_t soff, eoff; 2858 2859 /* 2860 * Start and end offsets in buffer. eoff - soff may not cross a 2861 * page boundry or cross the end of the buffer. The end of the 2862 * buffer, in this case, is our file EOF, not the allocation size 2863 * of the buffer. 2864 */ 2865 soff = off; 2866 eoff = (off + PAGE_SIZE) & ~PAGE_MASK; 2867 if (eoff > bp->b_offset + bp->b_bcount) 2868 eoff = bp->b_offset + bp->b_bcount; 2869 2870 /* 2871 * Set valid range. This is typically the entire buffer and thus the 2872 * entire page. 2873 */ 2874 if (eoff > soff) { 2875 vm_page_set_validclean( 2876 m, 2877 (vm_offset_t) (soff & PAGE_MASK), 2878 (vm_offset_t) (eoff - soff) 2879 ); 2880 } 2881} 2882 2883/* 2884 * This routine is called before a device strategy routine. 2885 * It is used to tell the VM system that paging I/O is in 2886 * progress, and treat the pages associated with the buffer 2887 * almost as being PG_BUSY. Also the object paging_in_progress 2888 * flag is handled to make sure that the object doesn't become 2889 * inconsistant. 2890 * 2891 * Since I/O has not been initiated yet, certain buffer flags 2892 * such as BIO_ERROR or B_INVAL may be in an inconsistant state 2893 * and should be ignored. 2894 */ 2895void 2896vfs_busy_pages(struct buf * bp, int clear_modify) 2897{ 2898 int i, bogus; 2899 2900 if (bp->b_flags & B_VMIO) { 2901 struct vnode *vp = bp->b_vp; 2902 vm_object_t obj; 2903 vm_ooffset_t foff; 2904 2905 VOP_GETVOBJECT(vp, &obj); 2906 foff = bp->b_offset; 2907 KASSERT(bp->b_offset != NOOFFSET, 2908 ("vfs_busy_pages: no buffer offset")); 2909 vfs_setdirty(bp); 2910 2911retry: 2912 for (i = 0; i < bp->b_npages; i++) { 2913 vm_page_t m = bp->b_pages[i]; 2914 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 2915 goto retry; 2916 } 2917 2918 bogus = 0; 2919 for (i = 0; i < bp->b_npages; i++) { 2920 vm_page_t m = bp->b_pages[i]; 2921 2922 vm_page_flag_clear(m, PG_ZERO); 2923 if ((bp->b_flags & B_CLUSTER) == 0) { 2924 vm_object_pip_add(obj, 1); 2925 vm_page_io_start(m); 2926 } 2927 2928 /* 2929 * When readying a buffer for a read ( i.e 2930 * clear_modify == 0 ), it is important to do 2931 * bogus_page replacement for valid pages in 2932 * partially instantiated buffers. Partially 2933 * instantiated buffers can, in turn, occur when 2934 * reconstituting a buffer from its VM backing store 2935 * base. We only have to do this if B_CACHE is 2936 * clear ( which causes the I/O to occur in the 2937 * first place ). The replacement prevents the read 2938 * I/O from overwriting potentially dirty VM-backed 2939 * pages. XXX bogus page replacement is, uh, bogus. 2940 * It may not work properly with small-block devices. 2941 * We need to find a better way. 2942 */ 2943 2944 vm_page_protect(m, VM_PROT_NONE); 2945 if (clear_modify) 2946 vfs_page_set_valid(bp, foff, i, m); 2947 else if (m->valid == VM_PAGE_BITS_ALL && 2948 (bp->b_flags & B_CACHE) == 0) { 2949 bp->b_pages[i] = bogus_page; 2950 bogus++; 2951 } 2952 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2953 } 2954 if (bogus) 2955 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2956 } 2957} 2958 2959/* 2960 * Tell the VM system that the pages associated with this buffer 2961 * are clean. This is used for delayed writes where the data is 2962 * going to go to disk eventually without additional VM intevention. 2963 * 2964 * Note that while we only really need to clean through to b_bcount, we 2965 * just go ahead and clean through to b_bufsize. 2966 */ 2967static void 2968vfs_clean_pages(struct buf * bp) 2969{ 2970 int i; 2971 2972 if (bp->b_flags & B_VMIO) { 2973 vm_ooffset_t foff; 2974 2975 foff = bp->b_offset; 2976 KASSERT(bp->b_offset != NOOFFSET, 2977 ("vfs_clean_pages: no buffer offset")); 2978 for (i = 0; i < bp->b_npages; i++) { 2979 vm_page_t m = bp->b_pages[i]; 2980 vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2981 vm_ooffset_t eoff = noff; 2982 2983 if (eoff > bp->b_offset + bp->b_bufsize) 2984 eoff = bp->b_offset + bp->b_bufsize; 2985 vfs_page_set_valid(bp, foff, i, m); 2986 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 2987 foff = noff; 2988 } 2989 } 2990} 2991 2992/* 2993 * vfs_bio_set_validclean: 2994 * 2995 * Set the range within the buffer to valid and clean. The range is 2996 * relative to the beginning of the buffer, b_offset. Note that b_offset 2997 * itself may be offset from the beginning of the first page. 2998 */ 2999 3000void 3001vfs_bio_set_validclean(struct buf *bp, int base, int size) 3002{ 3003 if (bp->b_flags & B_VMIO) { 3004 int i; 3005 int n; 3006 3007 /* 3008 * Fixup base to be relative to beginning of first page. 3009 * Set initial n to be the maximum number of bytes in the 3010 * first page that can be validated. 3011 */ 3012 3013 base += (bp->b_offset & PAGE_MASK); 3014 n = PAGE_SIZE - (base & PAGE_MASK); 3015 3016 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 3017 vm_page_t m = bp->b_pages[i]; 3018 3019 if (n > size) 3020 n = size; 3021 3022 vm_page_set_validclean(m, base & PAGE_MASK, n); 3023 base += n; 3024 size -= n; 3025 n = PAGE_SIZE; 3026 } 3027 } 3028} 3029 3030/* 3031 * vfs_bio_clrbuf: 3032 * 3033 * clear a buffer. This routine essentially fakes an I/O, so we need 3034 * to clear BIO_ERROR and B_INVAL. 3035 * 3036 * Note that while we only theoretically need to clear through b_bcount, 3037 * we go ahead and clear through b_bufsize. 3038 */ 3039 3040void 3041vfs_bio_clrbuf(struct buf *bp) { 3042 int i, mask = 0; 3043 caddr_t sa, ea; 3044 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 3045 bp->b_flags &= ~B_INVAL; 3046 bp->b_ioflags &= ~BIO_ERROR; 3047 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 3048 (bp->b_offset & PAGE_MASK) == 0) { 3049 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 3050 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 3051 ((bp->b_pages[0]->valid & mask) != mask)) { 3052 bzero(bp->b_data, bp->b_bufsize); 3053 } 3054 bp->b_pages[0]->valid |= mask; 3055 bp->b_resid = 0; 3056 return; 3057 } 3058 ea = sa = bp->b_data; 3059 for(i=0;i<bp->b_npages;i++,sa=ea) { 3060 int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 3061 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 3062 ea = (caddr_t)(vm_offset_t)ulmin( 3063 (u_long)(vm_offset_t)ea, 3064 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 3065 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 3066 if ((bp->b_pages[i]->valid & mask) == mask) 3067 continue; 3068 if ((bp->b_pages[i]->valid & mask) == 0) { 3069 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 3070 bzero(sa, ea - sa); 3071 } 3072 } else { 3073 for (; sa < ea; sa += DEV_BSIZE, j++) { 3074 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 3075 (bp->b_pages[i]->valid & (1<<j)) == 0) 3076 bzero(sa, DEV_BSIZE); 3077 } 3078 } 3079 bp->b_pages[i]->valid |= mask; 3080 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 3081 } 3082 bp->b_resid = 0; 3083 } else { 3084 clrbuf(bp); 3085 } 3086} 3087 3088/* 3089 * vm_hold_load_pages and vm_hold_unload pages get pages into 3090 * a buffers address space. The pages are anonymous and are 3091 * not associated with a file object. 3092 */ 3093void 3094vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 3095{ 3096 vm_offset_t pg; 3097 vm_page_t p; 3098 int index; 3099 3100 to = round_page(to); 3101 from = round_page(from); 3102 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3103 3104 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3105 3106tryagain: 3107 3108 p = vm_page_alloc(kernel_object, 3109 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 3110 VM_ALLOC_NORMAL); 3111 if (!p) { 3112 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 3113 VM_WAIT; 3114 goto tryagain; 3115 } 3116 vm_page_wire(p); 3117 p->valid = VM_PAGE_BITS_ALL; 3118 vm_page_flag_clear(p, PG_ZERO); 3119 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 3120 bp->b_pages[index] = p; 3121 vm_page_wakeup(p); 3122 } 3123 bp->b_npages = index; 3124} 3125 3126void 3127vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 3128{ 3129 vm_offset_t pg; 3130 vm_page_t p; 3131 int index, newnpages; 3132 3133 from = round_page(from); 3134 to = round_page(to); 3135 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3136 3137 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3138 p = bp->b_pages[index]; 3139 if (p && (index < bp->b_npages)) { 3140 if (p->busy) { 3141 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 3142 bp->b_blkno, bp->b_lblkno); 3143 } 3144 bp->b_pages[index] = NULL; 3145 pmap_kremove(pg); 3146 vm_page_busy(p); 3147 vm_page_unwire(p, 0); 3148 vm_page_free(p); 3149 } 3150 } 3151 bp->b_npages = newnpages; 3152} 3153 3154 3155#include "opt_ddb.h" 3156#ifdef DDB 3157#include <ddb/ddb.h> 3158 3159DB_SHOW_COMMAND(buffer, db_show_buffer) 3160{ 3161 /* get args */ 3162 struct buf *bp = (struct buf *)addr; 3163 3164 if (!have_addr) { 3165 db_printf("usage: show buffer <addr>\n"); 3166 return; 3167 } 3168 3169 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 3170 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 3171 "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, " 3172 "b_blkno = %d, b_pblkno = %d\n", 3173 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 3174 major(bp->b_dev), minor(bp->b_dev), 3175 bp->b_data, bp->b_blkno, bp->b_pblkno); 3176 if (bp->b_npages) { 3177 int i; 3178 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 3179 for (i = 0; i < bp->b_npages; i++) { 3180 vm_page_t m; 3181 m = bp->b_pages[i]; 3182 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 3183 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 3184 if ((i + 1) < bp->b_npages) 3185 db_printf(","); 3186 } 3187 db_printf("\n"); 3188 } 3189} 3190#endif /* DDB */ 3191