vfs_bio.c revision 26471
1231200Smm/* 2231200Smm * Copyright (c) 1994 John S. Dyson 3231200Smm * All rights reserved. 4232153Smm * 5231200Smm * Redistribution and use in source and binary forms, with or without 6231200Smm * modification, are permitted provided that the following conditions 7231200Smm * are met: 8231200Smm * 1. Redistributions of source code must retain the above copyright 9231200Smm * notice immediately at the beginning of the file, without modification, 10231200Smm * this list of conditions, and the following disclaimer. 11231200Smm * 2. Redistributions in binary form must reproduce the above copyright 12231200Smm * notice, this list of conditions and the following disclaimer in the 13231200Smm * documentation and/or other materials provided with the distribution. 14231200Smm * 3. Absolutely no warranty of function or purpose is made by the author 15231200Smm * John S. Dyson. 16231200Smm * 4. This work was done expressly for inclusion into FreeBSD. Other use 17231200Smm * is allowed if this notation is included. 18231200Smm * 5. Modifications may be freely made to this file if the above conditions 19231200Smm * are met. 20231200Smm * 21231200Smm * $Id: vfs_bio.c,v 1.118 1997/06/03 09:42:43 dfr Exp $ 22231200Smm */ 23231200Smm 24231200Smm/* 25231200Smm * this file contains a new buffer I/O scheme implementing a coherent 26231200Smm * VM object and buffer cache scheme. Pains have been taken to make 27231200Smm * sure that the performance degradation associated with schemes such 28231200Smm * as this is not realized. 29231200Smm * 30231200Smm * Author: John S. Dyson 31231200Smm * Significant help during the development and debugging phases 32231200Smm * had been provided by David Greenman, also of the FreeBSD core team. 33231200Smm */ 34231200Smm 35231200Smm#include "opt_bounce.h" 36231200Smm 37231200Smm#define VMIO 38231200Smm#include <sys/param.h> 39231200Smm#include <sys/systm.h> 40231200Smm#include <sys/sysproto.h> 41231200Smm#include <sys/kernel.h> 42231200Smm#include <sys/sysctl.h> 43231200Smm#include <sys/proc.h> 44231200Smm#include <sys/vnode.h> 45231200Smm#include <sys/vmmeter.h> 46231200Smm#include <vm/vm.h> 47231200Smm#include <vm/vm_param.h> 48231200Smm#include <vm/vm_prot.h> 49231200Smm#include <vm/vm_kern.h> 50231200Smm#include <vm/vm_pageout.h> 51231200Smm#include <vm/vm_page.h> 52231200Smm#include <vm/vm_object.h> 53231200Smm#include <vm/vm_extern.h> 54231200Smm#include <vm/vm_map.h> 55231200Smm#include <sys/buf.h> 56231200Smm#include <sys/mount.h> 57231200Smm#include <sys/malloc.h> 58231200Smm#include <sys/resourcevar.h> 59231200Smm#include <sys/proc.h> 60231200Smm 61231200Smm#include <miscfs/specfs/specdev.h> 62231200Smm 63231200Smmstatic void vfs_update __P((void)); 64231200Smmstatic struct proc *updateproc; 65231200Smmstatic struct kproc_desc up_kp = { 66231200Smm "update", 67231200Smm vfs_update, 68231200Smm &updateproc 69231200Smm}; 70231200SmmSYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 71231200Smm 72231200Smmstruct buf *buf; /* buffer header pool */ 73231200Smmstruct swqueue bswlist; 74231200Smm 75231200Smmint count_lock_queue __P((void)); 76231200Smmstatic void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 77231200Smm vm_offset_t to); 78231200Smmstatic void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 79231200Smm vm_offset_t to); 80231200Smmstatic void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 81231200Smm vm_offset_t off, vm_offset_t size, 82231200Smm vm_page_t m); 83231200Smmstatic void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 84231200Smm int pageno, vm_page_t m); 85231200Smmstatic void vfs_clean_pages(struct buf * bp); 86231200Smmstatic void vfs_setdirty(struct buf *bp); 87231200Smmstatic void vfs_vmio_release(struct buf *bp); 88231200Smm 89231200Smmint needsbuffer; 90231200Smm 91231200Smm/* 92231200Smm * Internal update daemon, process 3 93231200Smm * The variable vfs_update_wakeup allows for internal syncs. 94231200Smm */ 95231200Smmint vfs_update_wakeup; 96231200Smm 97231200Smm 98231200Smm/* 99231200Smm * buffers base kva 100231200Smm */ 101231200Smm 102231200Smm/* 103231200Smm * bogus page -- for I/O to/from partially complete buffers 104231200Smm * this is a temporary solution to the problem, but it is not 105231200Smm * really that bad. it would be better to split the buffer 106231200Smm * for input in the case of buffers partially already in memory, 107231200Smm * but the code is intricate enough already. 108231200Smm */ 109231200Smmvm_page_t bogus_page; 110231200Smmstatic vm_offset_t bogus_offset; 111231200Smm 112231200Smmstatic int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 113231200Smm bufmallocspace, maxbufmallocspace; 114231200Smm 115231200Smmstatic struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 116231200Smmstatic struct bqueues bufqueues[BUFFER_QUEUES]; 117231200Smm 118231200Smmextern int vm_swap_size; 119231200Smm 120231200Smm#define BUF_MAXUSE 16 121231200Smm 122231200Smm/* 123231200Smm * Initialize buffer headers and related structures. 124231200Smm */ 125231200Smmvoid 126231200Smmbufinit() 127231200Smm{ 128231200Smm struct buf *bp; 129231200Smm int i; 130231200Smm 131231200Smm TAILQ_INIT(&bswlist); 132231200Smm LIST_INIT(&invalhash); 133231200Smm 134231200Smm /* first, make a null hash table */ 135231200Smm for (i = 0; i < BUFHSZ; i++) 136231200Smm LIST_INIT(&bufhashtbl[i]); 137231200Smm 138231200Smm /* next, make a null set of free lists */ 139231200Smm for (i = 0; i < BUFFER_QUEUES; i++) 140231200Smm TAILQ_INIT(&bufqueues[i]); 141231200Smm 142231200Smm /* finally, initialize each buffer header and stick on empty q */ 143231200Smm for (i = 0; i < nbuf; i++) { 144231200Smm bp = &buf[i]; 145231200Smm bzero(bp, sizeof *bp); 146231200Smm bp->b_flags = B_INVAL; /* we're just an empty header */ 147231200Smm bp->b_dev = NODEV; 148231200Smm bp->b_rcred = NOCRED; 149231200Smm bp->b_wcred = NOCRED; 150231200Smm bp->b_qindex = QUEUE_EMPTY; 151231200Smm bp->b_vnbufs.le_next = NOLIST; 152231200Smm TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 153231200Smm LIST_INSERT_HEAD(&invalhash, bp, b_hash); 154231200Smm } 155231200Smm/* 156231200Smm * maxbufspace is currently calculated to support all filesystem blocks 157231200Smm * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 158231200Smm * cache is still the same as it would be for 8K filesystems. This 159231200Smm * keeps the size of the buffer cache "in check" for big block filesystems. 160231200Smm */ 161231200Smm maxbufspace = (nbuf + 8) * DFLTBSIZE; 162231200Smm/* 163231200Smm * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 164231200Smm */ 165231200Smm maxvmiobufspace = 2 * maxbufspace / 3; 166231200Smm/* 167231200Smm * Limit the amount of malloc memory since it is wired permanently into 168231200Smm * the kernel space. Even though this is accounted for in the buffer 169231200Smm * allocation, we don't want the malloced region to grow uncontrolled. 170231200Smm * The malloc scheme improves memory utilization significantly on average 171231200Smm * (small) directories. 172231200Smm */ 173231200Smm maxbufmallocspace = maxbufspace / 20; 174231200Smm 175231200Smm bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 176231200Smm bogus_page = vm_page_alloc(kernel_object, 177231200Smm ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 178231200Smm VM_ALLOC_NORMAL); 179231200Smm 180232153Smm} 181232153Smm 182231200Smm/* 183231200Smm * Free the kva allocation for a buffer 184231200Smm * Must be called only at splbio or higher, 185231200Smm * as this is the only locking for buffer_map. 186231200Smm */ 187231200Smmstatic void 188231200Smmbfreekva(struct buf * bp) 189231200Smm{ 190231200Smm if (bp->b_kvasize == 0) 191231200Smm return; 192231200Smm 193231200Smm vm_map_delete(buffer_map, 194231200Smm (vm_offset_t) bp->b_kvabase, 195231200Smm (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 196231200Smm 197231200Smm bp->b_kvasize = 0; 198231200Smm 199231200Smm} 200231200Smm 201231200Smm/* 202231200Smm * remove the buffer from the appropriate free list 203231200Smm */ 204231200Smmvoid 205231200Smmbremfree(struct buf * bp) 206231200Smm{ 207231200Smm int s = splbio(); 208231200Smm 209231200Smm if (bp->b_qindex != QUEUE_NONE) { 210231200Smm TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 211231200Smm bp->b_qindex = QUEUE_NONE; 212231200Smm } else { 213231200Smm panic("bremfree: removing a buffer when not on a queue"); 214231200Smm } 215231200Smm splx(s); 216231200Smm} 217232153Smm 218232153Smm/* 219231200Smm * Get a buffer with the specified data. Look in the cache first. 220232153Smm */ 221232153Smmint 222232153Smmbread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 223232153Smm struct buf ** bpp) 224231200Smm{ 225231200Smm struct buf *bp; 226231200Smm 227231200Smm bp = getblk(vp, blkno, size, 0, 0); 228231200Smm *bpp = bp; 229231200Smm 230231200Smm /* if not found in cache, do some I/O */ 231231200Smm if ((bp->b_flags & B_CACHE) == 0) { 232231200Smm if (curproc != NULL) 233231200Smm curproc->p_stats->p_ru.ru_inblock++; 234231200Smm bp->b_flags |= B_READ; 235231200Smm bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 236231200Smm if (bp->b_rcred == NOCRED) { 237231200Smm if (cred != NOCRED) 238231200Smm crhold(cred); 239231200Smm bp->b_rcred = cred; 240231200Smm } 241231200Smm vfs_busy_pages(bp, 0); 242231200Smm VOP_STRATEGY(bp); 243231200Smm return (biowait(bp)); 244231200Smm } 245231200Smm return (0); 246231200Smm} 247231200Smm 248231200Smm/* 249231200Smm * Operates like bread, but also starts asynchronous I/O on 250231200Smm * read-ahead blocks. 251231200Smm */ 252231200Smmint 253231200Smmbreadn(struct vnode * vp, daddr_t blkno, int size, 254231200Smm daddr_t * rablkno, int *rabsize, 255231200Smm int cnt, struct ucred * cred, struct buf ** bpp) 256231200Smm{ 257231200Smm struct buf *bp, *rabp; 258231200Smm int i; 259231200Smm int rv = 0, readwait = 0; 260231200Smm 261231200Smm *bpp = bp = getblk(vp, blkno, size, 0, 0); 262231200Smm 263231200Smm /* if not found in cache, do some I/O */ 264231200Smm if ((bp->b_flags & B_CACHE) == 0) { 265231200Smm if (curproc != NULL) 266231200Smm curproc->p_stats->p_ru.ru_inblock++; 267231200Smm bp->b_flags |= B_READ; 268231200Smm bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 269231200Smm if (bp->b_rcred == NOCRED) { 270231200Smm if (cred != NOCRED) 271231200Smm crhold(cred); 272231200Smm bp->b_rcred = cred; 273231200Smm } 274231200Smm vfs_busy_pages(bp, 0); 275231200Smm VOP_STRATEGY(bp); 276231200Smm ++readwait; 277231200Smm } 278231200Smm for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 279231200Smm if (inmem(vp, *rablkno)) 280231200Smm continue; 281232153Smm rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 282231200Smm 283231200Smm if ((rabp->b_flags & B_CACHE) == 0) { 284231200Smm if (curproc != NULL) 285231200Smm curproc->p_stats->p_ru.ru_inblock++; 286231200Smm rabp->b_flags |= B_READ | B_ASYNC; 287231200Smm rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 288231200Smm if (rabp->b_rcred == NOCRED) { 289231200Smm if (cred != NOCRED) 290231200Smm crhold(cred); 291231200Smm rabp->b_rcred = cred; 292231200Smm } 293231200Smm vfs_busy_pages(rabp, 0); 294231200Smm VOP_STRATEGY(rabp); 295231200Smm } else { 296231200Smm brelse(rabp); 297231200Smm } 298231200Smm } 299231200Smm 300231200Smm if (readwait) { 301231200Smm rv = biowait(bp); 302231200Smm } 303231200Smm return (rv); 304231200Smm} 305232153Smm 306231200Smm/* 307231200Smm * Write, release buffer on completion. (Done by iodone 308231200Smm * if async.) 309231200Smm */ 310232153Smmint 311232153Smmbwrite(struct buf * bp) 312232153Smm{ 313232153Smm int oldflags = bp->b_flags; 314232153Smm 315232153Smm if (bp->b_flags & B_INVAL) { 316232153Smm brelse(bp); 317232153Smm return (0); 318232153Smm } 319232153Smm if (!(bp->b_flags & B_BUSY)) 320232153Smm panic("bwrite: buffer is not busy???"); 321231200Smm 322232153Smm bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 323232153Smm bp->b_flags |= B_WRITEINPROG; 324231200Smm 325231200Smm if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 326232153Smm reassignbuf(bp, bp->b_vp); 327232153Smm } 328232153Smm 329232153Smm bp->b_vp->v_numoutput++; 330232153Smm vfs_busy_pages(bp, 1); 331232153Smm if (curproc != NULL) 332232153Smm curproc->p_stats->p_ru.ru_oublock++; 333232153Smm VOP_STRATEGY(bp); 334232153Smm 335232153Smm /* 336232153Smm * Handle ordered writes here. 337232153Smm * If the write was originally flagged as ordered, 338232153Smm * then we check to see if it was converted to async. 339232153Smm * If it was converted to async, and is done now, then 340232153Smm * we release the buffer. Otherwise we clear the 341232153Smm * ordered flag because it is not needed anymore. 342232153Smm * 343232153Smm * Note that biodone has been modified so that it does 344232153Smm * not release ordered buffers. This allows us to have 345232153Smm * a chance to determine whether or not the driver 346232153Smm * has set the async flag in the strategy routine. Otherwise 347232153Smm * if biodone was not modified, then the buffer may have been 348232153Smm * reused before we have had a chance to check the flag. 349232153Smm */ 350232153Smm 351232153Smm if ((oldflags & B_ORDERED) == B_ORDERED) { 352232153Smm int s; 353232153Smm s = splbio(); 354232153Smm if (bp->b_flags & B_ASYNC) { 355232153Smm if ((bp->b_flags & B_DONE)) { 356232153Smm if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 357232153Smm brelse(bp); 358232153Smm else 359232153Smm bqrelse(bp); 360232153Smm } 361232153Smm splx(s); 362232153Smm return (0); 363232153Smm } else { 364232153Smm bp->b_flags &= ~B_ORDERED; 365232153Smm } 366232153Smm splx(s); 367231200Smm } 368231200Smm 369231200Smm if ((oldflags & B_ASYNC) == 0) { 370232153Smm int rtval = biowait(bp); 371232153Smm 372232153Smm if (oldflags & B_DELWRI) { 373232153Smm reassignbuf(bp, bp->b_vp); 374232153Smm } 375232153Smm brelse(bp); 376232153Smm return (rtval); 377232153Smm } 378232153Smm return (0); 379232153Smm} 380232153Smm 381232153Smmint 382232153Smmvn_bwrite(ap) 383232153Smm struct vop_bwrite_args *ap; 384232153Smm{ 385232153Smm return (bwrite(ap->a_bp)); 386231200Smm} 387231200Smm 388231200Smm/* 389231200Smm * Delayed write. (Buffer is marked dirty). 390231200Smm */ 391231200Smmvoid 392232153Smmbdwrite(struct buf * bp) 393232153Smm{ 394231200Smm 395231200Smm if ((bp->b_flags & B_BUSY) == 0) { 396231200Smm panic("bdwrite: buffer is not busy"); 397231200Smm } 398231200Smm if (bp->b_flags & B_INVAL) { 399231200Smm brelse(bp); 400231200Smm return; 401231200Smm } 402231200Smm if (bp->b_flags & B_TAPE) { 403231200Smm bawrite(bp); 404231200Smm return; 405231200Smm } 406231200Smm bp->b_flags &= ~(B_READ|B_RELBUF); 407232153Smm if ((bp->b_flags & B_DELWRI) == 0) { 408232153Smm bp->b_flags |= B_DONE | B_DELWRI; 409231200Smm reassignbuf(bp, bp->b_vp); 410231200Smm } 411231200Smm 412231200Smm /* 413231200Smm * This bmap keeps the system from needing to do the bmap later, 414231200Smm * perhaps when the system is attempting to do a sync. Since it 415231200Smm * is likely that the indirect block -- or whatever other datastructure 416231200Smm * that the filesystem needs is still in memory now, it is a good 417231200Smm * thing to do this. Note also, that if the pageout daemon is 418231200Smm * requesting a sync -- there might not be enough memory to do 419231200Smm * the bmap then... So, this is important to do. 420231200Smm */ 421231200Smm if( bp->b_lblkno == bp->b_blkno) { 422231200Smm VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 423232153Smm } 424232153Smm 425231200Smm /* 426231200Smm * Set the *dirty* buffer range based upon the VM system dirty pages. 427231200Smm */ 428231200Smm vfs_setdirty(bp); 429231200Smm 430231200Smm /* 431231200Smm * We need to do this here to satisfy the vnode_pager and the 432231200Smm * pageout daemon, so that it thinks that the pages have been 433231200Smm * "cleaned". Note that since the pages are in a delayed write 434231200Smm * buffer -- the VFS layer "will" see that the pages get written 435231200Smm * out on the next sync, or perhaps the cluster will be completed. 436231200Smm */ 437231200Smm vfs_clean_pages(bp); 438231200Smm bqrelse(bp); 439231200Smm return; 440231200Smm} 441232153Smm 442232153Smm/* 443231200Smm * Asynchronous write. 444231200Smm * Start output on a buffer, but do not wait for it to complete. 445231200Smm * The buffer is released when the output completes. 446231200Smm */ 447231200Smmvoid 448231200Smmbawrite(struct buf * bp) 449231200Smm{ 450231200Smm bp->b_flags |= B_ASYNC; 451231200Smm (void) VOP_BWRITE(bp); 452231200Smm} 453231200Smm 454231200Smm/* 455231200Smm * Ordered write. 456231200Smm * Start output on a buffer, but only wait for it to complete if the 457231200Smm * output device cannot guarantee ordering in some other way. Devices 458232153Smm * that can perform asynchronous ordered writes will set the B_ASYNC 459232153Smm * flag in their strategy routine. 460231200Smm * The buffer is released when the output completes. 461231200Smm */ 462231200Smmint 463231200Smmbowrite(struct buf * bp) 464231200Smm{ 465231200Smm bp->b_flags |= B_ORDERED; 466231200Smm return (VOP_BWRITE(bp)); 467231200Smm} 468231200Smm 469231200Smm/* 470231200Smm * Release a buffer. 471231200Smm */ 472231200Smmvoid 473231200Smmbrelse(struct buf * bp) 474231200Smm{ 475231200Smm int s; 476231200Smm 477231200Smm if (bp->b_flags & B_CLUSTER) { 478231200Smm relpbuf(bp); 479231200Smm return; 480231200Smm } 481231200Smm /* anyone need a "free" block? */ 482232153Smm s = splbio(); 483231200Smm 484231200Smm /* anyone need this block? */ 485232153Smm if (bp->b_flags & B_WANTED) { 486231200Smm bp->b_flags &= ~(B_WANTED | B_AGE); 487231200Smm wakeup(bp); 488231200Smm } 489231200Smm 490232153Smm if (bp->b_flags & B_LOCKED) 491231200Smm bp->b_flags &= ~B_ERROR; 492231200Smm 493232153Smm if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 494231200Smm (bp->b_bufsize <= 0)) { 495231200Smm bp->b_flags |= B_INVAL; 496231200Smm bp->b_flags &= ~(B_DELWRI | B_CACHE); 497231200Smm if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 498231200Smm if (bp->b_bufsize) 499231200Smm allocbuf(bp, 0); 500231200Smm brelvp(bp); 501231200Smm } 502231200Smm } 503231200Smm 504231200Smm /* 505231200Smm * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 506231200Smm * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 507231200Smm * but the VM object is kept around. The B_NOCACHE flag is used to 508231200Smm * invalidate the pages in the VM object. 509231200Smm * 510231200Smm * If the buffer is a partially filled NFS buffer, keep it 511232153Smm * since invalidating it now will lose informatio. The valid 512231200Smm * flags in the vm_pages have only DEV_BSIZE resolution but 513231200Smm * the b_validoff, b_validend fields have byte resolution. 514232153Smm * This can avoid unnecessary re-reads of the buffer. 515231200Smm * XXX this seems to cause performance problems. 516231200Smm */ 517231200Smm if ((bp->b_flags & B_VMIO) 518231200Smm && !(bp->b_vp->v_tag == VT_NFS && 519232153Smm (bp->b_flags & B_DELWRI) != 0) 520231200Smm#ifdef notdef 521231200Smm && (bp->b_vp->v_tag != VT_NFS 522232153Smm || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 523231200Smm || bp->b_validend == 0 524231200Smm || (bp->b_validoff == 0 525231200Smm && bp->b_validend == bp->b_bufsize)) 526231200Smm#endif 527231200Smm ) { 528231200Smm vm_ooffset_t foff; 529231200Smm vm_object_t obj; 530231200Smm int i, resid; 531231200Smm vm_page_t m; 532231200Smm struct vnode *vp; 533231200Smm int iototal = bp->b_bufsize; 534231200Smm 535231200Smm vp = bp->b_vp; 536231200Smm if (!vp) 537231200Smm panic("brelse: missing vp"); 538231200Smm 539232153Smm if (bp->b_npages) { 540232153Smm vm_pindex_t poff; 541231200Smm obj = (vm_object_t) vp->v_object; 542231200Smm if (vp->v_type == VBLK) 543231200Smm foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 544231200Smm else 545231200Smm foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 546232153Smm poff = OFF_TO_IDX(foff); 547232153Smm for (i = 0; i < bp->b_npages; i++) { 548231200Smm m = bp->b_pages[i]; 549231200Smm if (m == bogus_page) { 550231200Smm m = vm_page_lookup(obj, poff + i); 551231200Smm if (!m) { 552232153Smm panic("brelse: page missing\n"); 553231200Smm } 554231200Smm bp->b_pages[i] = m; 555231200Smm pmap_qenter(trunc_page(bp->b_data), 556232153Smm bp->b_pages, bp->b_npages); 557232153Smm } 558232153Smm resid = IDX_TO_OFF(m->pindex+1) - foff; 559232153Smm if (resid > iototal) 560231200Smm resid = iototal; 561231200Smm if (resid > 0) { 562231200Smm /* 563231200Smm * Don't invalidate the page if the local machine has already 564231200Smm * modified it. This is the lesser of two evils, and should 565232153Smm * be fixed. 566232153Smm */ 567232153Smm if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 568231200Smm vm_page_test_dirty(m); 569231200Smm if (m->dirty == 0) { 570231200Smm vm_page_set_invalid(m, (vm_offset_t) foff, resid); 571231200Smm if (m->valid == 0) 572231200Smm vm_page_protect(m, VM_PROT_NONE); 573231200Smm } 574231200Smm } 575231200Smm if (resid >= PAGE_SIZE) { 576231200Smm if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 577231200Smm bp->b_flags |= B_INVAL; 578231200Smm } 579231200Smm } else { 580231200Smm if (!vm_page_is_valid(m, 581231200Smm (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 582231200Smm bp->b_flags |= B_INVAL; 583231200Smm } 584231200Smm } 585231200Smm } 586231200Smm foff += resid; 587231200Smm iototal -= resid; 588231200Smm } 589231200Smm } 590231200Smm if (bp->b_flags & (B_INVAL | B_RELBUF)) 591231200Smm vfs_vmio_release(bp); 592231200Smm } 593231200Smm if (bp->b_qindex != QUEUE_NONE) 594231200Smm panic("brelse: free buffer onto another queue???"); 595231200Smm 596231200Smm /* enqueue */ 597231200Smm /* buffers with no memory */ 598231200Smm if (bp->b_bufsize == 0) { 599231200Smm bp->b_qindex = QUEUE_EMPTY; 600231200Smm TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 601231200Smm LIST_REMOVE(bp, b_hash); 602231200Smm LIST_INSERT_HEAD(&invalhash, bp, b_hash); 603231200Smm bp->b_dev = NODEV; 604231200Smm /* 605231200Smm * Get rid of the kva allocation *now* 606231200Smm */ 607231200Smm bfreekva(bp); 608231200Smm if (needsbuffer) { 609231200Smm wakeup(&needsbuffer); 610231200Smm needsbuffer=0; 611231200Smm } 612231200Smm /* buffers with junk contents */ 613231200Smm } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 614231200Smm bp->b_qindex = QUEUE_AGE; 615231200Smm TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 616231200Smm LIST_REMOVE(bp, b_hash); 617231200Smm LIST_INSERT_HEAD(&invalhash, bp, b_hash); 618231200Smm bp->b_dev = NODEV; 619231200Smm if (needsbuffer) { 620231200Smm wakeup(&needsbuffer); 621231200Smm needsbuffer=0; 622231200Smm } 623231200Smm /* buffers that are locked */ 624231200Smm } else if (bp->b_flags & B_LOCKED) { 625231200Smm bp->b_qindex = QUEUE_LOCKED; 626231200Smm TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 627231200Smm /* buffers with stale but valid contents */ 628231200Smm } else if (bp->b_flags & B_AGE) { 629231200Smm bp->b_qindex = QUEUE_AGE; 630231200Smm TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 631231200Smm if (needsbuffer) { 632231200Smm wakeup(&needsbuffer); 633231200Smm needsbuffer=0; 634231200Smm } 635231200Smm /* buffers with valid and quite potentially reuseable contents */ 636231200Smm } else { 637231200Smm bp->b_qindex = QUEUE_LRU; 638231200Smm TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 639231200Smm if (needsbuffer) { 640231200Smm wakeup(&needsbuffer); 641231200Smm needsbuffer=0; 642231200Smm } 643231200Smm } 644231200Smm 645231200Smm /* unlock */ 646231200Smm bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 647231200Smm B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 648231200Smm splx(s); 649231200Smm} 650231200Smm 651231200Smm/* 652231200Smm * Release a buffer. 653231200Smm */ 654231200Smmvoid 655231200Smmbqrelse(struct buf * bp) 656231200Smm{ 657231200Smm int s; 658231200Smm 659231200Smm s = splbio(); 660231200Smm 661231200Smm 662231200Smm /* anyone need this block? */ 663231200Smm if (bp->b_flags & B_WANTED) { 664231200Smm bp->b_flags &= ~(B_WANTED | B_AGE); 665231200Smm wakeup(bp); 666231200Smm } 667231200Smm 668231200Smm if (bp->b_qindex != QUEUE_NONE) 669231200Smm panic("bqrelse: free buffer onto another queue???"); 670231200Smm 671231200Smm if (bp->b_flags & B_LOCKED) { 672231200Smm bp->b_flags &= ~B_ERROR; 673231200Smm bp->b_qindex = QUEUE_LOCKED; 674231200Smm TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 675231200Smm /* buffers with stale but valid contents */ 676231200Smm } else { 677231200Smm bp->b_qindex = QUEUE_LRU; 678231200Smm TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 679231200Smm if (needsbuffer) { 680231200Smm wakeup(&needsbuffer); 681231200Smm needsbuffer=0; 682231200Smm } 683231200Smm } 684231200Smm 685231200Smm /* unlock */ 686231200Smm bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 687231200Smm B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 688231200Smm splx(s); 689231200Smm} 690231200Smm 691231200Smmstatic void 692231200Smmvfs_vmio_release(bp) 693231200Smm struct buf *bp; 694231200Smm{ 695231200Smm int i; 696231200Smm vm_page_t m; 697231200Smm 698231200Smm for (i = 0; i < bp->b_npages; i++) { 699231200Smm m = bp->b_pages[i]; 700231200Smm bp->b_pages[i] = NULL; 701231200Smm vm_page_unwire(m); 702231200Smm /* 703231200Smm * We don't mess with busy pages, it is 704231200Smm * the responsibility of the process that 705231200Smm * busied the pages to deal with them. 706231200Smm */ 707231200Smm if ((m->flags & PG_BUSY) || (m->busy != 0)) 708231200Smm continue; 709231200Smm 710231200Smm if (m->wire_count == 0) { 711231200Smm 712231200Smm if (m->flags & PG_WANTED) { 713231200Smm m->flags &= ~PG_WANTED; 714231200Smm wakeup(m); 715231200Smm } 716231200Smm 717231200Smm /* 718231200Smm * If this is an async free -- we cannot place 719231200Smm * pages onto the cache queue. If it is an 720231200Smm * async free, then we don't modify any queues. 721231200Smm * This is probably in error (for perf reasons), 722231200Smm * and we will eventually need to build 723231200Smm * a more complete infrastructure to support I/O 724231200Smm * rundown. 725231200Smm */ 726231200Smm if ((bp->b_flags & B_ASYNC) == 0) { 727231200Smm 728231200Smm /* 729231200Smm * In the case of sync buffer frees, we can do pretty much 730231200Smm * anything to any of the memory queues. Specifically, 731231200Smm * the cache queue is okay to be modified. 732231200Smm */ 733231200Smm if (m->valid) { 734231200Smm if(m->dirty == 0) 735231200Smm vm_page_test_dirty(m); 736231200Smm /* 737231200Smm * this keeps pressure off of the process memory 738231200Smm */ 739231200Smm if (m->dirty == 0 && m->hold_count == 0) 740231200Smm vm_page_cache(m); 741231200Smm else 742231200Smm vm_page_deactivate(m); 743231200Smm } else if (m->hold_count == 0) { 744231200Smm vm_page_protect(m, VM_PROT_NONE); 745231200Smm vm_page_free(m); 746231200Smm } 747231200Smm } else { 748231200Smm /* 749231200Smm * If async, then at least we clear the 750231200Smm * act_count. 751231200Smm */ 752231200Smm m->act_count = 0; 753231200Smm } 754231200Smm } 755231200Smm } 756231200Smm bufspace -= bp->b_bufsize; 757231200Smm vmiospace -= bp->b_bufsize; 758231200Smm pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 759231200Smm bp->b_npages = 0; 760231200Smm bp->b_bufsize = 0; 761231200Smm bp->b_flags &= ~B_VMIO; 762 if (bp->b_vp) 763 brelvp(bp); 764} 765 766/* 767 * Check to see if a block is currently memory resident. 768 */ 769struct buf * 770gbincore(struct vnode * vp, daddr_t blkno) 771{ 772 struct buf *bp; 773 struct bufhashhdr *bh; 774 775 bh = BUFHASH(vp, blkno); 776 bp = bh->lh_first; 777 778 /* Search hash chain */ 779 while (bp != NULL) { 780 /* hit */ 781 if (bp->b_vp == vp && bp->b_lblkno == blkno && 782 (bp->b_flags & B_INVAL) == 0) { 783 break; 784 } 785 bp = bp->b_hash.le_next; 786 } 787 return (bp); 788} 789 790/* 791 * this routine implements clustered async writes for 792 * clearing out B_DELWRI buffers... This is much better 793 * than the old way of writing only one buffer at a time. 794 */ 795int 796vfs_bio_awrite(struct buf * bp) 797{ 798 int i; 799 daddr_t lblkno = bp->b_lblkno; 800 struct vnode *vp = bp->b_vp; 801 int s; 802 int ncl; 803 struct buf *bpa; 804 int nwritten; 805 806 s = splbio(); 807 /* 808 * right now we support clustered writing only to regular files 809 */ 810 if ((vp->v_type == VREG) && 811 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 812 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 813 int size; 814 int maxcl; 815 816 size = vp->v_mount->mnt_stat.f_iosize; 817 maxcl = MAXPHYS / size; 818 819 for (i = 1; i < maxcl; i++) { 820 if ((bpa = gbincore(vp, lblkno + i)) && 821 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 822 (B_DELWRI | B_CLUSTEROK)) && 823 (bpa->b_bufsize == size)) { 824 if ((bpa->b_blkno == bpa->b_lblkno) || 825 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 826 break; 827 } else { 828 break; 829 } 830 } 831 ncl = i; 832 /* 833 * this is a possible cluster write 834 */ 835 if (ncl != 1) { 836 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 837 splx(s); 838 return nwritten; 839 } 840 } 841 bremfree(bp); 842 splx(s); 843 /* 844 * default (old) behavior, writing out only one block 845 */ 846 bp->b_flags |= B_BUSY | B_ASYNC; 847 nwritten = bp->b_bufsize; 848 (void) VOP_BWRITE(bp); 849 return nwritten; 850} 851 852 853/* 854 * Find a buffer header which is available for use. 855 */ 856static struct buf * 857getnewbuf(int slpflag, int slptimeo, int size, int maxsize) 858{ 859 struct buf *bp; 860 int nbyteswritten = 0; 861 vm_offset_t addr; 862 863start: 864 if (bufspace >= maxbufspace) 865 goto trytofreespace; 866 867 /* can we constitute a new buffer? */ 868 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 869 if (bp->b_qindex != QUEUE_EMPTY) 870 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 871 bp->b_qindex); 872 bp->b_flags |= B_BUSY; 873 bremfree(bp); 874 goto fillbuf; 875 } 876trytofreespace: 877 /* 878 * We keep the file I/O from hogging metadata I/O 879 * This is desirable because file data is cached in the 880 * VM/Buffer cache even if a buffer is freed. 881 */ 882 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 883 if (bp->b_qindex != QUEUE_AGE) 884 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 885 bp->b_qindex); 886 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 887 if (bp->b_qindex != QUEUE_LRU) 888 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 889 bp->b_qindex); 890 } 891 if (!bp) { 892 /* wait for a free buffer of any kind */ 893 needsbuffer = 1; 894 tsleep(&needsbuffer, 895 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 896 return (0); 897 } 898 899#if defined(DIAGNOSTIC) 900 if (bp->b_flags & B_BUSY) { 901 panic("getnewbuf: busy buffer on free list\n"); 902 } 903#endif 904 905 /* 906 * We are fairly aggressive about freeing VMIO buffers, but since 907 * the buffering is intact without buffer headers, there is not 908 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 909 */ 910 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 911 if ((bp->b_flags & B_VMIO) == 0 || 912 (vmiospace < maxvmiobufspace)) { 913 --bp->b_usecount; 914 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 915 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 916 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 917 goto start; 918 } 919 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 920 } 921 } 922 923 /* if we are a delayed write, convert to an async write */ 924 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 925 nbyteswritten += vfs_bio_awrite(bp); 926 if (!slpflag && !slptimeo) { 927 return (0); 928 } 929 goto start; 930 } 931 932 if (bp->b_flags & B_WANTED) { 933 bp->b_flags &= ~B_WANTED; 934 wakeup(bp); 935 } 936 bremfree(bp); 937 bp->b_flags |= B_BUSY; 938 939 if (bp->b_flags & B_VMIO) { 940 bp->b_flags &= ~B_ASYNC; 941 vfs_vmio_release(bp); 942 } 943 944 if (bp->b_vp) 945 brelvp(bp); 946 947fillbuf: 948 /* we are not free, nor do we contain interesting data */ 949 if (bp->b_rcred != NOCRED) { 950 crfree(bp->b_rcred); 951 bp->b_rcred = NOCRED; 952 } 953 if (bp->b_wcred != NOCRED) { 954 crfree(bp->b_wcred); 955 bp->b_wcred = NOCRED; 956 } 957 958 LIST_REMOVE(bp, b_hash); 959 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 960 if (bp->b_bufsize) { 961 allocbuf(bp, 0); 962 } 963 bp->b_flags = B_BUSY; 964 bp->b_dev = NODEV; 965 bp->b_vp = NULL; 966 bp->b_blkno = bp->b_lblkno = 0; 967 bp->b_iodone = 0; 968 bp->b_error = 0; 969 bp->b_resid = 0; 970 bp->b_bcount = 0; 971 bp->b_npages = 0; 972 bp->b_dirtyoff = bp->b_dirtyend = 0; 973 bp->b_validoff = bp->b_validend = 0; 974 bp->b_usecount = 4; 975 976 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 977 978 /* 979 * we assume that buffer_map is not at address 0 980 */ 981 addr = 0; 982 if (maxsize != bp->b_kvasize) { 983 bfreekva(bp); 984 985 /* 986 * See if we have buffer kva space 987 */ 988 if (vm_map_findspace(buffer_map, 989 vm_map_min(buffer_map), maxsize, &addr)) { 990 bp->b_flags |= B_INVAL; 991 brelse(bp); 992 goto trytofreespace; 993 } 994 } 995 996 /* 997 * See if we are below are allocated minimum 998 */ 999 if (bufspace >= (maxbufspace + nbyteswritten)) { 1000 bp->b_flags |= B_INVAL; 1001 brelse(bp); 1002 goto trytofreespace; 1003 } 1004 1005 /* 1006 * create a map entry for the buffer -- in essence 1007 * reserving the kva space. 1008 */ 1009 if (addr) { 1010 vm_map_insert(buffer_map, NULL, 0, 1011 addr, addr + maxsize, 1012 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1013 1014 bp->b_kvabase = (caddr_t) addr; 1015 bp->b_kvasize = maxsize; 1016 } 1017 bp->b_data = bp->b_kvabase; 1018 1019 return (bp); 1020} 1021 1022/* 1023 * Check to see if a block is currently memory resident. 1024 */ 1025struct buf * 1026incore(struct vnode * vp, daddr_t blkno) 1027{ 1028 struct buf *bp; 1029 1030 int s = splbio(); 1031 bp = gbincore(vp, blkno); 1032 splx(s); 1033 return (bp); 1034} 1035 1036/* 1037 * Returns true if no I/O is needed to access the 1038 * associated VM object. This is like incore except 1039 * it also hunts around in the VM system for the data. 1040 */ 1041 1042int 1043inmem(struct vnode * vp, daddr_t blkno) 1044{ 1045 vm_object_t obj; 1046 vm_offset_t toff, tinc; 1047 vm_page_t m; 1048 vm_ooffset_t off; 1049 1050 if (incore(vp, blkno)) 1051 return 1; 1052 if (vp->v_mount == NULL) 1053 return 0; 1054 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 1055 return 0; 1056 1057 obj = vp->v_object; 1058 tinc = PAGE_SIZE; 1059 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1060 tinc = vp->v_mount->mnt_stat.f_iosize; 1061 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1062 1063 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1064 1065 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1066 if (!m) 1067 return 0; 1068 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1069 return 0; 1070 } 1071 return 1; 1072} 1073 1074/* 1075 * now we set the dirty range for the buffer -- 1076 * for NFS -- if the file is mapped and pages have 1077 * been written to, let it know. We want the 1078 * entire range of the buffer to be marked dirty if 1079 * any of the pages have been written to for consistancy 1080 * with the b_validoff, b_validend set in the nfs write 1081 * code, and used by the nfs read code. 1082 */ 1083static void 1084vfs_setdirty(struct buf *bp) { 1085 int i; 1086 vm_object_t object; 1087 vm_offset_t boffset, offset; 1088 /* 1089 * We qualify the scan for modified pages on whether the 1090 * object has been flushed yet. The OBJ_WRITEABLE flag 1091 * is not cleared simply by protecting pages off. 1092 */ 1093 if ((bp->b_flags & B_VMIO) && 1094 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1095 /* 1096 * test the pages to see if they have been modified directly 1097 * by users through the VM system. 1098 */ 1099 for (i = 0; i < bp->b_npages; i++) 1100 vm_page_test_dirty(bp->b_pages[i]); 1101 1102 /* 1103 * scan forwards for the first page modified 1104 */ 1105 for (i = 0; i < bp->b_npages; i++) { 1106 if (bp->b_pages[i]->dirty) { 1107 break; 1108 } 1109 } 1110 boffset = (i << PAGE_SHIFT); 1111 if (boffset < bp->b_dirtyoff) { 1112 bp->b_dirtyoff = boffset; 1113 } 1114 1115 /* 1116 * scan backwards for the last page modified 1117 */ 1118 for (i = bp->b_npages - 1; i >= 0; --i) { 1119 if (bp->b_pages[i]->dirty) { 1120 break; 1121 } 1122 } 1123 boffset = (i + 1); 1124 offset = boffset + bp->b_pages[0]->pindex; 1125 if (offset >= object->size) 1126 boffset = object->size - bp->b_pages[0]->pindex; 1127 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1128 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1129 } 1130} 1131 1132/* 1133 * Get a block given a specified block and offset into a file/device. 1134 */ 1135struct buf * 1136getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1137{ 1138 struct buf *bp; 1139 int s; 1140 struct bufhashhdr *bh; 1141 int maxsize; 1142 1143 if (vp->v_mount) { 1144 maxsize = vp->v_mount->mnt_stat.f_iosize; 1145 /* 1146 * This happens on mount points. 1147 */ 1148 if (maxsize < size) 1149 maxsize = size; 1150 } else { 1151 maxsize = size; 1152 } 1153 1154 if (size > MAXBSIZE) 1155 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1156 1157 s = splbio(); 1158loop: 1159 if ((bp = gbincore(vp, blkno))) { 1160 if (bp->b_flags & B_BUSY) { 1161 bp->b_flags |= B_WANTED; 1162 if (bp->b_usecount < BUF_MAXUSE) 1163 ++bp->b_usecount; 1164 if (!tsleep(bp, 1165 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1166 goto loop; 1167 1168 splx(s); 1169 return (struct buf *) NULL; 1170 } 1171 bp->b_flags |= B_BUSY | B_CACHE; 1172 bremfree(bp); 1173 1174 /* 1175 * check for size inconsistancies (note that they shouldn't happen 1176 * but do when filesystems don't handle the size changes correctly.) 1177 * We are conservative on metadata and don't just extend the buffer 1178 * but write and re-constitute it. 1179 */ 1180 1181 if (bp->b_bcount != size) { 1182 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1183 allocbuf(bp, size); 1184 } else { 1185 bp->b_flags |= B_NOCACHE; 1186 VOP_BWRITE(bp); 1187 goto loop; 1188 } 1189 } 1190 1191 if (bp->b_usecount < BUF_MAXUSE) 1192 ++bp->b_usecount; 1193 splx(s); 1194 return (bp); 1195 } else { 1196 vm_object_t obj; 1197 1198 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == 0) { 1199 if (slpflag || slptimeo) { 1200 splx(s); 1201 return NULL; 1202 } 1203 goto loop; 1204 } 1205 1206 /* 1207 * This code is used to make sure that a buffer is not 1208 * created while the getnewbuf routine is blocked. 1209 * Normally the vnode is locked so this isn't a problem. 1210 * VBLK type I/O requests, however, don't lock the vnode. 1211 */ 1212 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1213 bp->b_flags |= B_INVAL; 1214 brelse(bp); 1215 goto loop; 1216 } 1217 1218 /* 1219 * Insert the buffer into the hash, so that it can 1220 * be found by incore. 1221 */ 1222 bp->b_blkno = bp->b_lblkno = blkno; 1223 bgetvp(vp, bp); 1224 LIST_REMOVE(bp, b_hash); 1225 bh = BUFHASH(vp, blkno); 1226 LIST_INSERT_HEAD(bh, bp, b_hash); 1227 1228 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1229 bp->b_flags |= (B_VMIO | B_CACHE); 1230#if defined(VFS_BIO_DEBUG) 1231 if (vp->v_type != VREG && vp->v_type != VBLK) 1232 printf("getblk: vmioing file type %d???\n", vp->v_type); 1233#endif 1234 } else { 1235 bp->b_flags &= ~B_VMIO; 1236 } 1237 splx(s); 1238 1239 allocbuf(bp, size); 1240#ifdef PC98 1241 /* 1242 * 1024byte/sector support 1243 */ 1244#define B_XXX2 0x8000000 1245 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1246#endif 1247 return (bp); 1248 } 1249} 1250 1251/* 1252 * Get an empty, disassociated buffer of given size. 1253 */ 1254struct buf * 1255geteblk(int size) 1256{ 1257 struct buf *bp; 1258 int s; 1259 1260 s = splbio(); 1261 while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0); 1262 splx(s); 1263 allocbuf(bp, size); 1264 bp->b_flags |= B_INVAL; 1265 return (bp); 1266} 1267 1268 1269/* 1270 * This code constitutes the buffer memory from either anonymous system 1271 * memory (in the case of non-VMIO operations) or from an associated 1272 * VM object (in the case of VMIO operations). 1273 * 1274 * Note that this code is tricky, and has many complications to resolve 1275 * deadlock or inconsistant data situations. Tread lightly!!! 1276 * 1277 * Modify the length of a buffer's underlying buffer storage without 1278 * destroying information (unless, of course the buffer is shrinking). 1279 */ 1280int 1281allocbuf(struct buf * bp, int size) 1282{ 1283 1284 int s; 1285 int newbsize, mbsize; 1286 int i; 1287 1288 if (!(bp->b_flags & B_BUSY)) 1289 panic("allocbuf: buffer not busy"); 1290 1291 if (bp->b_kvasize < size) 1292 panic("allocbuf: buffer too small"); 1293 1294 if ((bp->b_flags & B_VMIO) == 0) { 1295 caddr_t origbuf; 1296 int origbufsize; 1297 /* 1298 * Just get anonymous memory from the kernel 1299 */ 1300 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1301#if !defined(NO_B_MALLOC) 1302 if (bp->b_flags & B_MALLOC) 1303 newbsize = mbsize; 1304 else 1305#endif 1306 newbsize = round_page(size); 1307 1308 if (newbsize < bp->b_bufsize) { 1309#if !defined(NO_B_MALLOC) 1310 /* 1311 * malloced buffers are not shrunk 1312 */ 1313 if (bp->b_flags & B_MALLOC) { 1314 if (newbsize) { 1315 bp->b_bcount = size; 1316 } else { 1317 free(bp->b_data, M_BIOBUF); 1318 bufspace -= bp->b_bufsize; 1319 bufmallocspace -= bp->b_bufsize; 1320 bp->b_data = bp->b_kvabase; 1321 bp->b_bufsize = 0; 1322 bp->b_bcount = 0; 1323 bp->b_flags &= ~B_MALLOC; 1324 } 1325 return 1; 1326 } 1327#endif 1328 vm_hold_free_pages( 1329 bp, 1330 (vm_offset_t) bp->b_data + newbsize, 1331 (vm_offset_t) bp->b_data + bp->b_bufsize); 1332 } else if (newbsize > bp->b_bufsize) { 1333#if !defined(NO_B_MALLOC) 1334 /* 1335 * We only use malloced memory on the first allocation. 1336 * and revert to page-allocated memory when the buffer grows. 1337 */ 1338 if ( (bufmallocspace < maxbufmallocspace) && 1339 (bp->b_bufsize == 0) && 1340 (mbsize <= PAGE_SIZE/2)) { 1341 1342 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1343 bp->b_bufsize = mbsize; 1344 bp->b_bcount = size; 1345 bp->b_flags |= B_MALLOC; 1346 bufspace += mbsize; 1347 bufmallocspace += mbsize; 1348 return 1; 1349 } 1350#endif 1351 origbuf = NULL; 1352 origbufsize = 0; 1353#if !defined(NO_B_MALLOC) 1354 /* 1355 * If the buffer is growing on it's other-than-first allocation, 1356 * then we revert to the page-allocation scheme. 1357 */ 1358 if (bp->b_flags & B_MALLOC) { 1359 origbuf = bp->b_data; 1360 origbufsize = bp->b_bufsize; 1361 bp->b_data = bp->b_kvabase; 1362 bufspace -= bp->b_bufsize; 1363 bufmallocspace -= bp->b_bufsize; 1364 bp->b_bufsize = 0; 1365 bp->b_flags &= ~B_MALLOC; 1366 newbsize = round_page(newbsize); 1367 } 1368#endif 1369 vm_hold_load_pages( 1370 bp, 1371 (vm_offset_t) bp->b_data + bp->b_bufsize, 1372 (vm_offset_t) bp->b_data + newbsize); 1373#if !defined(NO_B_MALLOC) 1374 if (origbuf) { 1375 bcopy(origbuf, bp->b_data, origbufsize); 1376 free(origbuf, M_BIOBUF); 1377 } 1378#endif 1379 } 1380 } else { 1381 vm_page_t m; 1382 int desiredpages; 1383 1384 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1385 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1386 1387#if !defined(NO_B_MALLOC) 1388 if (bp->b_flags & B_MALLOC) 1389 panic("allocbuf: VMIO buffer can't be malloced"); 1390#endif 1391 1392 if (newbsize < bp->b_bufsize) { 1393 if (desiredpages < bp->b_npages) { 1394 for (i = desiredpages; i < bp->b_npages; i++) { 1395 /* 1396 * the page is not freed here -- it 1397 * is the responsibility of vnode_pager_setsize 1398 */ 1399 m = bp->b_pages[i]; 1400#if defined(DIAGNOSTIC) 1401 if (m == bogus_page) 1402 panic("allocbuf: bogus page found"); 1403#endif 1404 s = splvm(); 1405 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1406 m->flags |= PG_WANTED; 1407 tsleep(m, PVM, "biodep", 0); 1408 } 1409 splx(s); 1410 1411 bp->b_pages[i] = NULL; 1412 vm_page_unwire(m); 1413 } 1414 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1415 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1416 bp->b_npages = desiredpages; 1417 } 1418 } else if (newbsize > bp->b_bufsize) { 1419 vm_object_t obj; 1420 vm_offset_t tinc, toff; 1421 vm_ooffset_t off; 1422 vm_pindex_t objoff; 1423 int pageindex, curbpnpages; 1424 struct vnode *vp; 1425 int bsize; 1426 1427 vp = bp->b_vp; 1428 1429 if (vp->v_type == VBLK) 1430 bsize = DEV_BSIZE; 1431 else 1432 bsize = vp->v_mount->mnt_stat.f_iosize; 1433 1434 if (bp->b_npages < desiredpages) { 1435 obj = vp->v_object; 1436 tinc = PAGE_SIZE; 1437 if (tinc > bsize) 1438 tinc = bsize; 1439 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1440 curbpnpages = bp->b_npages; 1441 doretry: 1442 bp->b_flags |= B_CACHE; 1443 bp->b_validoff = bp->b_validend = 0; 1444 for (toff = 0; toff < newbsize; toff += tinc) { 1445 int bytesinpage; 1446 1447 pageindex = toff >> PAGE_SHIFT; 1448 objoff = OFF_TO_IDX(off + toff); 1449 if (pageindex < curbpnpages) { 1450 1451 m = bp->b_pages[pageindex]; 1452#ifdef VFS_BIO_DIAG 1453 if (m->pindex != objoff) 1454 panic("allocbuf: page changed offset??!!!?"); 1455#endif 1456 bytesinpage = tinc; 1457 if (tinc > (newbsize - toff)) 1458 bytesinpage = newbsize - toff; 1459 if (bp->b_flags & B_CACHE) 1460 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1461 continue; 1462 } 1463 m = vm_page_lookup(obj, objoff); 1464 if (!m) { 1465 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1466 if (!m) { 1467 VM_WAIT; 1468 goto doretry; 1469 } 1470 /* 1471 * Normally it is unwise to clear PG_BUSY without 1472 * PAGE_WAKEUP -- but it is okay here, as there is 1473 * no chance for blocking between here and vm_page_alloc 1474 */ 1475 m->flags &= ~PG_BUSY; 1476 vm_page_wire(m); 1477 bp->b_flags &= ~B_CACHE; 1478 } else if (m->flags & PG_BUSY) { 1479 s = splvm(); 1480 if (m->flags & PG_BUSY) { 1481 m->flags |= PG_WANTED; 1482 tsleep(m, PVM, "pgtblk", 0); 1483 } 1484 splx(s); 1485 goto doretry; 1486 } else { 1487 if ((curproc != pageproc) && 1488 ((m->queue - m->pc) == PQ_CACHE) && 1489 ((cnt.v_free_count + cnt.v_cache_count) < 1490 (cnt.v_free_min + cnt.v_cache_min))) { 1491 pagedaemon_wakeup(); 1492 } 1493 bytesinpage = tinc; 1494 if (tinc > (newbsize - toff)) 1495 bytesinpage = newbsize - toff; 1496 if (bp->b_flags & B_CACHE) 1497 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1498 vm_page_wire(m); 1499 } 1500 bp->b_pages[pageindex] = m; 1501 curbpnpages = pageindex + 1; 1502 } 1503 if (vp->v_tag == VT_NFS) { 1504 if (bp->b_dirtyend > 0) { 1505 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1506 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1507 } 1508 if (bp->b_validend == 0) 1509 bp->b_flags &= ~B_CACHE; 1510 } 1511 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1512 bp->b_npages = curbpnpages; 1513 pmap_qenter((vm_offset_t) bp->b_data, 1514 bp->b_pages, bp->b_npages); 1515 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1516 } 1517 } 1518 } 1519 if (bp->b_flags & B_VMIO) 1520 vmiospace += bp->b_bufsize; 1521 bufspace += (newbsize - bp->b_bufsize); 1522 bp->b_bufsize = newbsize; 1523 bp->b_bcount = size; 1524 return 1; 1525} 1526 1527/* 1528 * Wait for buffer I/O completion, returning error status. 1529 */ 1530int 1531biowait(register struct buf * bp) 1532{ 1533 int s; 1534 1535 s = splbio(); 1536 while ((bp->b_flags & B_DONE) == 0) 1537 tsleep(bp, PRIBIO, "biowait", 0); 1538 splx(s); 1539 if (bp->b_flags & B_EINTR) { 1540 bp->b_flags &= ~B_EINTR; 1541 return (EINTR); 1542 } 1543 if (bp->b_flags & B_ERROR) { 1544 return (bp->b_error ? bp->b_error : EIO); 1545 } else { 1546 return (0); 1547 } 1548} 1549 1550/* 1551 * Finish I/O on a buffer, calling an optional function. 1552 * This is usually called from interrupt level, so process blocking 1553 * is not *a good idea*. 1554 */ 1555void 1556biodone(register struct buf * bp) 1557{ 1558 int s; 1559 1560 s = splbio(); 1561 if (!(bp->b_flags & B_BUSY)) 1562 panic("biodone: buffer not busy"); 1563 1564 if (bp->b_flags & B_DONE) { 1565 splx(s); 1566 printf("biodone: buffer already done\n"); 1567 return; 1568 } 1569 bp->b_flags |= B_DONE; 1570 1571 if ((bp->b_flags & B_READ) == 0) { 1572 vwakeup(bp); 1573 } 1574#ifdef BOUNCE_BUFFERS 1575 if (bp->b_flags & B_BOUNCE) 1576 vm_bounce_free(bp); 1577#endif 1578 1579 /* call optional completion function if requested */ 1580 if (bp->b_flags & B_CALL) { 1581 bp->b_flags &= ~B_CALL; 1582 (*bp->b_iodone) (bp); 1583 splx(s); 1584 return; 1585 } 1586 if (bp->b_flags & B_VMIO) { 1587 int i, resid; 1588 vm_ooffset_t foff; 1589 vm_page_t m; 1590 vm_object_t obj; 1591 int iosize; 1592 struct vnode *vp = bp->b_vp; 1593 1594 if (vp->v_type == VBLK) 1595 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1596 else 1597 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1598 obj = vp->v_object; 1599 if (!obj) { 1600 panic("biodone: no object"); 1601 } 1602#if defined(VFS_BIO_DEBUG) 1603 if (obj->paging_in_progress < bp->b_npages) { 1604 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1605 obj->paging_in_progress, bp->b_npages); 1606 } 1607#endif 1608 iosize = bp->b_bufsize; 1609 for (i = 0; i < bp->b_npages; i++) { 1610 int bogusflag = 0; 1611 m = bp->b_pages[i]; 1612 if (m == bogus_page) { 1613 bogusflag = 1; 1614 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1615 if (!m) { 1616#if defined(VFS_BIO_DEBUG) 1617 printf("biodone: page disappeared\n"); 1618#endif 1619 --obj->paging_in_progress; 1620 continue; 1621 } 1622 bp->b_pages[i] = m; 1623 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1624 } 1625#if defined(VFS_BIO_DEBUG) 1626 if (OFF_TO_IDX(foff) != m->pindex) { 1627 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1628 } 1629#endif 1630 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1631 if (resid > iosize) 1632 resid = iosize; 1633 /* 1634 * In the write case, the valid and clean bits are 1635 * already changed correctly, so we only need to do this 1636 * here in the read case. 1637 */ 1638 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1639 vfs_page_set_valid(bp, foff, i, m); 1640 } 1641 1642 /* 1643 * when debugging new filesystems or buffer I/O methods, this 1644 * is the most common error that pops up. if you see this, you 1645 * have not set the page busy flag correctly!!! 1646 */ 1647 if (m->busy == 0) { 1648 printf("biodone: page busy < 0, " 1649 "pindex: %d, foff: 0x(%x,%x), " 1650 "resid: %d, index: %d\n", 1651 (int) m->pindex, (int)(foff >> 32), 1652 (int) foff & 0xffffffff, resid, i); 1653 if (vp->v_type != VBLK) 1654 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1655 bp->b_vp->v_mount->mnt_stat.f_iosize, 1656 (int) bp->b_lblkno, 1657 bp->b_flags, bp->b_npages); 1658 else 1659 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1660 (int) bp->b_lblkno, 1661 bp->b_flags, bp->b_npages); 1662 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1663 m->valid, m->dirty, m->wire_count); 1664 panic("biodone: page busy < 0\n"); 1665 } 1666 --m->busy; 1667 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1668 m->flags &= ~PG_WANTED; 1669 wakeup(m); 1670 } 1671 --obj->paging_in_progress; 1672 foff += resid; 1673 iosize -= resid; 1674 } 1675 if (obj && obj->paging_in_progress == 0 && 1676 (obj->flags & OBJ_PIPWNT)) { 1677 obj->flags &= ~OBJ_PIPWNT; 1678 wakeup(obj); 1679 } 1680 } 1681 /* 1682 * For asynchronous completions, release the buffer now. The brelse 1683 * checks for B_WANTED and will do the wakeup there if necessary - so 1684 * no need to do a wakeup here in the async case. 1685 */ 1686 1687 if (bp->b_flags & B_ASYNC) { 1688 if ((bp->b_flags & B_ORDERED) == 0) { 1689 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1690 brelse(bp); 1691 else 1692 bqrelse(bp); 1693 } 1694 } else { 1695 bp->b_flags &= ~B_WANTED; 1696 wakeup(bp); 1697 } 1698 splx(s); 1699} 1700 1701int 1702count_lock_queue() 1703{ 1704 int count; 1705 struct buf *bp; 1706 1707 count = 0; 1708 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1709 bp != NULL; 1710 bp = TAILQ_NEXT(bp, b_freelist)) 1711 count++; 1712 return (count); 1713} 1714 1715int vfs_update_interval = 30; 1716 1717static void 1718vfs_update() 1719{ 1720 while (1) { 1721 tsleep(&vfs_update_wakeup, PUSER, "update", 1722 hz * vfs_update_interval); 1723 vfs_update_wakeup = 0; 1724 sync(curproc, NULL, NULL); 1725 } 1726} 1727 1728static int 1729sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1730{ 1731 int error = sysctl_handle_int(oidp, 1732 oidp->oid_arg1, oidp->oid_arg2, req); 1733 if (!error) 1734 wakeup(&vfs_update_wakeup); 1735 return error; 1736} 1737 1738SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1739 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1740 1741 1742/* 1743 * This routine is called in lieu of iodone in the case of 1744 * incomplete I/O. This keeps the busy status for pages 1745 * consistant. 1746 */ 1747void 1748vfs_unbusy_pages(struct buf * bp) 1749{ 1750 int i; 1751 1752 if (bp->b_flags & B_VMIO) { 1753 struct vnode *vp = bp->b_vp; 1754 vm_object_t obj = vp->v_object; 1755 vm_ooffset_t foff; 1756 1757 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1758 1759 for (i = 0; i < bp->b_npages; i++) { 1760 vm_page_t m = bp->b_pages[i]; 1761 1762 if (m == bogus_page) { 1763 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1764 if (!m) { 1765 panic("vfs_unbusy_pages: page missing\n"); 1766 } 1767 bp->b_pages[i] = m; 1768 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1769 } 1770 --obj->paging_in_progress; 1771 --m->busy; 1772 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1773 m->flags &= ~PG_WANTED; 1774 wakeup(m); 1775 } 1776 } 1777 if (obj->paging_in_progress == 0 && 1778 (obj->flags & OBJ_PIPWNT)) { 1779 obj->flags &= ~OBJ_PIPWNT; 1780 wakeup(obj); 1781 } 1782 } 1783} 1784 1785/* 1786 * Set NFS' b_validoff and b_validend fields from the valid bits 1787 * of a page. If the consumer is not NFS, and the page is not 1788 * valid for the entire range, clear the B_CACHE flag to force 1789 * the consumer to re-read the page. 1790 */ 1791static void 1792vfs_buf_set_valid(struct buf *bp, 1793 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 1794 vm_page_t m) 1795{ 1796 if (bp->b_vp->v_tag == VT_NFS) { 1797 vm_offset_t svalid, evalid; 1798 int validbits = m->valid; 1799 1800 /* 1801 * This only bothers with the first valid range in the 1802 * page. 1803 */ 1804 svalid = off; 1805 while (validbits && !(validbits & 1)) { 1806 svalid += DEV_BSIZE; 1807 validbits >>= 1; 1808 } 1809 evalid = svalid; 1810 while (validbits & 1) { 1811 evalid += DEV_BSIZE; 1812 validbits >>= 1; 1813 } 1814 /* 1815 * Make sure this range is contiguous with the range 1816 * built up from previous pages. If not, then we will 1817 * just use the range from the previous pages. 1818 */ 1819 if (svalid == bp->b_validend) { 1820 bp->b_validoff = min(bp->b_validoff, svalid); 1821 bp->b_validend = max(bp->b_validend, evalid); 1822 } 1823 } else if (!vm_page_is_valid(m, 1824 (vm_offset_t) ((foff + off) & PAGE_MASK), 1825 size)) { 1826 bp->b_flags &= ~B_CACHE; 1827 } 1828} 1829 1830/* 1831 * Set the valid bits in a page, taking care of the b_validoff, 1832 * b_validend fields which NFS uses to optimise small reads. Off is 1833 * the offset within the file and pageno is the page index within the buf. 1834 */ 1835static void 1836vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 1837{ 1838 struct vnode *vp = bp->b_vp; 1839 vm_ooffset_t soff, eoff; 1840 1841 soff = off; 1842 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 1843 vm_page_set_invalid(m, 1844 (vm_offset_t) (soff & PAGE_MASK), 1845 (vm_offset_t) (eoff - soff)); 1846 if (vp->v_tag == VT_NFS) { 1847 vm_ooffset_t sv, ev; 1848 off = off - pageno * PAGE_SIZE; 1849 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 1850 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 1851 soff = max(sv, soff); 1852 eoff = min(ev, eoff); 1853 } 1854 if (eoff > soff) 1855 vm_page_set_validclean(m, 1856 (vm_offset_t) (soff & PAGE_MASK), 1857 (vm_offset_t) (eoff - soff)); 1858} 1859 1860/* 1861 * This routine is called before a device strategy routine. 1862 * It is used to tell the VM system that paging I/O is in 1863 * progress, and treat the pages associated with the buffer 1864 * almost as being PG_BUSY. Also the object paging_in_progress 1865 * flag is handled to make sure that the object doesn't become 1866 * inconsistant. 1867 */ 1868void 1869vfs_busy_pages(struct buf * bp, int clear_modify) 1870{ 1871 int i; 1872 1873 if (bp->b_flags & B_VMIO) { 1874 struct vnode *vp = bp->b_vp; 1875 vm_object_t obj = vp->v_object; 1876 vm_ooffset_t foff; 1877 1878 if (vp->v_type == VBLK) 1879 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1880 else 1881 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1882 vfs_setdirty(bp); 1883 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 1884 vm_page_t m = bp->b_pages[i]; 1885 1886 if ((bp->b_flags & B_CLUSTER) == 0) { 1887 obj->paging_in_progress++; 1888 m->busy++; 1889 } 1890 vm_page_protect(m, VM_PROT_NONE); 1891 if (clear_modify) 1892 vfs_page_set_valid(bp, foff, i, m); 1893 else if (bp->b_bcount >= PAGE_SIZE) { 1894 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1895 bp->b_pages[i] = bogus_page; 1896 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1897 } 1898 } 1899 } 1900 } 1901} 1902 1903/* 1904 * Tell the VM system that the pages associated with this buffer 1905 * are clean. This is used for delayed writes where the data is 1906 * going to go to disk eventually without additional VM intevention. 1907 */ 1908void 1909vfs_clean_pages(struct buf * bp) 1910{ 1911 int i; 1912 1913 if (bp->b_flags & B_VMIO) { 1914 struct vnode *vp = bp->b_vp; 1915 vm_object_t obj = vp->v_object; 1916 vm_ooffset_t foff; 1917 1918 if (vp->v_type == VBLK) 1919 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1920 else 1921 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1922 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 1923 vm_page_t m = bp->b_pages[i]; 1924 1925 vfs_page_set_valid(bp, foff, i, m); 1926 } 1927 } 1928} 1929 1930void 1931vfs_bio_clrbuf(struct buf *bp) { 1932 int i; 1933 if( bp->b_flags & B_VMIO) { 1934 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1935 int mask; 1936 mask = 0; 1937 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1938 mask |= (1 << (i/DEV_BSIZE)); 1939 if( bp->b_pages[0]->valid != mask) { 1940 bzero(bp->b_data, bp->b_bufsize); 1941 } 1942 bp->b_pages[0]->valid = mask; 1943 bp->b_resid = 0; 1944 return; 1945 } 1946 for(i=0;i<bp->b_npages;i++) { 1947 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1948 continue; 1949 if( bp->b_pages[i]->valid == 0) { 1950 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1951 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1952 } 1953 } else { 1954 int j; 1955 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1956 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1957 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1958 } 1959 } 1960 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1961 } 1962 bp->b_resid = 0; 1963 } else { 1964 clrbuf(bp); 1965 } 1966} 1967 1968/* 1969 * vm_hold_load_pages and vm_hold_unload pages get pages into 1970 * a buffers address space. The pages are anonymous and are 1971 * not associated with a file object. 1972 */ 1973void 1974vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1975{ 1976 vm_offset_t pg; 1977 vm_page_t p; 1978 int index; 1979 1980 to = round_page(to); 1981 from = round_page(from); 1982 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1983 1984 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1985 1986tryagain: 1987 1988 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1989 VM_ALLOC_NORMAL); 1990 if (!p) { 1991 VM_WAIT; 1992 goto tryagain; 1993 } 1994 vm_page_wire(p); 1995 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1996 bp->b_pages[index] = p; 1997 PAGE_WAKEUP(p); 1998 } 1999 bp->b_npages = to >> PAGE_SHIFT; 2000} 2001 2002void 2003vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2004{ 2005 vm_offset_t pg; 2006 vm_page_t p; 2007 int index; 2008 2009 from = round_page(from); 2010 to = round_page(to); 2011 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2012 2013 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2014 p = bp->b_pages[index]; 2015 if (p && (index < bp->b_npages)) { 2016 if (p->busy) { 2017 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2018 bp->b_blkno, bp->b_lblkno); 2019 } 2020 bp->b_pages[index] = NULL; 2021 pmap_kremove(pg); 2022 vm_page_unwire(p); 2023 vm_page_free(p); 2024 } 2025 } 2026 bp->b_npages = from >> PAGE_SHIFT; 2027} 2028 2029 2030#include "opt_ddb.h" 2031#ifdef DDB 2032#include <ddb/ddb.h> 2033 2034DB_SHOW_COMMAND(buffer, db_show_buffer) 2035{ 2036 /* get args */ 2037 struct buf *bp = (struct buf *)addr; 2038 2039 if (!have_addr) { 2040 db_printf("usage: show buffer <addr>\n"); 2041 return; 2042 } 2043 2044 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2045 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2046 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2047 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2048 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2049 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2050 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2051 "b_resid = %ld\nb_dev = 0x%x, b_un.b_addr = %p, " 2052 "b_blkno = %d, b_pblkno = %d\n", 2053 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2054 bp->b_dev, bp->b_un.b_addr, bp->b_blkno, bp->b_pblkno); 2055} 2056#endif /* DDB */ 2057