11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 45455Sdg * Modifications/enhancements: 55455Sdg * Copyright (c) 1995 John S. Dyson. All rights reserved. 61541Srgrimes * 71541Srgrimes * Redistribution and use in source and binary forms, with or without 81541Srgrimes * modification, are permitted provided that the following conditions 91541Srgrimes * are met: 101541Srgrimes * 1. Redistributions of source code must retain the above copyright 111541Srgrimes * notice, this list of conditions and the following disclaimer. 121541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 131541Srgrimes * notice, this list of conditions and the following disclaimer in the 141541Srgrimes * documentation and/or other materials provided with the distribution. 151541Srgrimes * 4. Neither the name of the University nor the names of its contributors 161541Srgrimes * may be used to endorse or promote products derived from this software 171541Srgrimes * without specific prior written permission. 181541Srgrimes * 191541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 201541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 211541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 221541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 231541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 241541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 251541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 261541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 271541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 281541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 291541Srgrimes * SUCH DAMAGE. 301541Srgrimes * 311541Srgrimes * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 321541Srgrimes */ 331541Srgrimes 34116182Sobrien#include <sys/cdefs.h> 35116182Sobrien__FBSDID("$FreeBSD$"); 36116182Sobrien 3732929Seivind#include "opt_debug_cluster.h" 3832929Seivind 391541Srgrimes#include <sys/param.h> 401549Srgrimes#include <sys/systm.h> 4141168Sbde#include <sys/kernel.h> 421541Srgrimes#include <sys/proc.h> 4360041Sphk#include <sys/bio.h> 441541Srgrimes#include <sys/buf.h> 451541Srgrimes#include <sys/vnode.h> 4641124Sdg#include <sys/malloc.h> 471541Srgrimes#include <sys/mount.h> 481541Srgrimes#include <sys/resourcevar.h> 49248084Sattilio#include <sys/rwlock.h> 5068885Sdillon#include <sys/vmmeter.h> 516621Sdg#include <vm/vm.h> 5210541Sdyson#include <vm/vm_object.h> 5310541Sdyson#include <vm/vm_page.h> 5448545Smckusick#include <sys/sysctl.h> 551541Srgrimes 5621002Sdyson#if defined(CLUSTERDEBUG) 5721002Sdysonstatic int rcluster= 0; 5891690SeivindSYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 5991690Seivind "Debug VFS clustering code"); 6021002Sdyson#endif 6121002Sdyson 62167214Swkoszekstatic MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer"); 6341124Sdg 64248508Skibstatic struct cluster_save *cluster_collectbufs(struct vnode *vp, 65248508Skib struct buf *last_bp, int gbflags); 66248508Skibstatic struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize, 67248508Skib daddr_t lbn, daddr_t blkno, long size, int run, int gbflags, 68248508Skib struct buf *fbp); 69141628Sphkstatic void cluster_callback(struct buf *); 701541Srgrimes 7148545Smckusickstatic int write_behind = 1; 7291690SeivindSYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 7391690Seivind "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 7448545Smckusick 75219699Sivorasstatic int read_max = 64; 76112080SjeffSYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 77112080Sjeff "Cluster read-ahead max block count"); 78112080Sjeff 79250327Sscottlstatic int read_min = 1; 80250327SscottlSYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0, 81250327Sscottl "Cluster read min block count"); 82250327Sscottl 8391690Seivind/* Page expended to mark partially backed buffers */ 8412973Sbdeextern vm_page_t bogus_page; 855455Sdg 8691690Seivind/* 8791690Seivind * Read data to a buf, including read-ahead if we find this to be beneficial. 8891690Seivind * cluster_read replaces bread. 8910541Sdyson */ 901549Srgrimesint 91248282Skibcluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size, 92248282Skib struct ucred *cred, long totread, int seqcount, int gbflags, 93248282Skib struct buf **bpp) 941541Srgrimes{ 9521002Sdyson struct buf *bp, *rbp, *reqbp; 96177493Sjeff struct bufobj *bo; 9796572Sphk daddr_t blkno, origblkno; 98112080Sjeff int maxra, racluster; 99112080Sjeff int error, ncontig; 10010541Sdyson int i; 1011541Srgrimes 1021541Srgrimes error = 0; 103177493Sjeff bo = &vp->v_bufobj; 104248508Skib if (!unmapped_buf_allowed) 105248508Skib gbflags &= ~GB_UNMAPPED; 10621002Sdyson 1075455Sdg /* 10821002Sdyson * Try to limit the amount of read-ahead by a few 10921002Sdyson * ad-hoc parameters. This needs work!!! 11021002Sdyson */ 11151797Sphk racluster = vp->v_mount->mnt_iosize_max / size; 112112080Sjeff maxra = seqcount; 113112080Sjeff maxra = min(read_max, maxra); 114112080Sjeff maxra = min(nbuf/8, maxra); 115112080Sjeff if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 116112080Sjeff maxra = (filesize / size) - lblkno; 11721002Sdyson 11821002Sdyson /* 1195455Sdg * get the requested block 1205455Sdg */ 121248508Skib *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, gbflags); 12221002Sdyson origblkno = lblkno; 12312767Sdyson 1245455Sdg /* 1255455Sdg * if it is in the cache, then check to see if the reads have been 1265455Sdg * sequential. If they have, then try some read-ahead, otherwise 1275455Sdg * back-off on prospective read-aheads. 1285455Sdg */ 1291541Srgrimes if (bp->b_flags & B_CACHE) { 13021002Sdyson if (!seqcount) { 1315455Sdg return 0; 13221002Sdyson } else if ((bp->b_flags & B_RAM) == 0) { 13321002Sdyson return 0; 13421002Sdyson } else { 13521002Sdyson bp->b_flags &= ~B_RAM; 136251171Sjeff BO_RLOCK(bo); 13748225Smckusick for (i = 1; i < maxra; i++) { 13899737Sdillon /* 13999737Sdillon * Stop if the buffer does not exist or it 14099737Sdillon * is invalid (about to go away?) 14199737Sdillon */ 142136767Sphk rbp = gbincore(&vp->v_bufobj, lblkno+i); 143112080Sjeff if (rbp == NULL || (rbp->b_flags & B_INVAL)) 14421002Sdyson break; 14521002Sdyson 14621002Sdyson /* 14748677Smckusick * Set another read-ahead mark so we know 148151621Sups * to check again. (If we can lock the 149151621Sups * buffer without waiting) 15021002Sdyson */ 151151621Sups if ((((i % racluster) == (racluster - 1)) || 152151621Sups (i == (maxra - 1))) 153151621Sups && (0 == BUF_LOCK(rbp, 154151621Sups LK_EXCLUSIVE | LK_NOWAIT, NULL))) { 155112080Sjeff rbp->b_flags |= B_RAM; 156151621Sups BUF_UNLOCK(rbp); 157151621Sups } 15821002Sdyson } 159251171Sjeff BO_RUNLOCK(bo); 16021002Sdyson if (i >= maxra) { 1615839Sdg return 0; 16210541Sdyson } 16321002Sdyson lblkno += i; 16421002Sdyson } 16521002Sdyson reqbp = bp = NULL; 166111886Sjeff /* 167111886Sjeff * If it isn't in the cache, then get a chunk from 168111886Sjeff * disk if sequential, otherwise just get the block. 169111886Sjeff */ 17021002Sdyson } else { 17142453Seivind off_t firstread = bp->b_offset; 172111886Sjeff int nblks; 173250327Sscottl long minread; 17442453Seivind 17542408Seivind KASSERT(bp->b_offset != NOOFFSET, 17642453Seivind ("cluster_read: no buffer offset")); 177111886Sjeff 178112080Sjeff ncontig = 0; 179111886Sjeff 180111886Sjeff /* 181250327Sscottl * Adjust totread if needed 182250327Sscottl */ 183250327Sscottl minread = read_min * size; 184250327Sscottl if (minread > totread) 185250327Sscottl totread = minread; 186250327Sscottl 187250327Sscottl /* 188111886Sjeff * Compute the total number of blocks that we should read 189111886Sjeff * synchronously. 190111886Sjeff */ 19121002Sdyson if (firstread + totread > filesize) 19221002Sdyson totread = filesize - firstread; 193111886Sjeff nblks = howmany(totread, size); 194111886Sjeff if (nblks > racluster) 195111886Sjeff nblks = racluster; 19621002Sdyson 197111886Sjeff /* 198111886Sjeff * Now compute the number of contiguous blocks. 199111886Sjeff */ 200111886Sjeff if (nblks > 1) { 20121002Sdyson error = VOP_BMAP(vp, lblkno, NULL, 202112080Sjeff &blkno, &ncontig, NULL); 203111886Sjeff /* 204111886Sjeff * If this failed to map just do the original block. 205111886Sjeff */ 206111886Sjeff if (error || blkno == -1) 207112080Sjeff ncontig = 0; 208111886Sjeff } 20921002Sdyson 210111886Sjeff /* 211111886Sjeff * If we have contiguous data available do a cluster 212111886Sjeff * otherwise just read the requested block. 213111886Sjeff */ 214112080Sjeff if (ncontig) { 215111886Sjeff /* Account for our first block. */ 216112080Sjeff ncontig = min(ncontig + 1, nblks); 217112080Sjeff if (ncontig < nblks) 218112080Sjeff nblks = ncontig; 21921002Sdyson bp = cluster_rbuild(vp, filesize, lblkno, 220248508Skib blkno, size, nblks, gbflags, bp); 22134694Sdyson lblkno += (bp->b_bufsize / size); 22210541Sdyson } else { 22358345Sphk bp->b_flags |= B_RAM; 22458345Sphk bp->b_iocmd = BIO_READ; 22510541Sdyson lblkno += 1; 2268876Srgrimes } 2271541Srgrimes } 2285455Sdg 2295455Sdg /* 230112080Sjeff * handle the synchronous read so that it is available ASAP. 2315455Sdg */ 2325455Sdg if (bp) { 23370374Sdillon if ((bp->b_flags & B_CLUSTER) == 0) { 23436275Sdyson vfs_busy_pages(bp, 0); 23570374Sdillon } 23658934Sphk bp->b_flags &= ~B_INVAL; 23758934Sphk bp->b_ioflags &= ~BIO_ERROR; 23858345Sphk if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 23948333Speter BUF_KERNPROC(bp); 240121205Sphk bp->b_iooffset = dbtob(bp->b_blkno); 241136927Sphk bstrategy(bp); 242170174Sjeff curthread->td_ru.ru_inblock++; 2435455Sdg } 24434611Sdyson 2455455Sdg /* 246112080Sjeff * If we have been doing sequential I/O, then do some read-ahead. 2475455Sdg */ 248112080Sjeff while (lblkno < (origblkno + maxra)) { 249112080Sjeff error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 250112080Sjeff if (error) 251112080Sjeff break; 252112080Sjeff 253112080Sjeff if (blkno == -1) 254112080Sjeff break; 255112080Sjeff 256112080Sjeff /* 257112080Sjeff * We could throttle ncontig here by maxra but we might as 258112080Sjeff * well read the data if it is contiguous. We're throttled 259112080Sjeff * by racluster anyway. 260112080Sjeff */ 261112080Sjeff if (ncontig) { 262112080Sjeff ncontig = min(ncontig + 1, racluster); 263112080Sjeff rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 264248508Skib size, ncontig, gbflags, NULL); 265112080Sjeff lblkno += (rbp->b_bufsize / size); 266112838Sjeff if (rbp->b_flags & B_DELWRI) { 267112838Sjeff bqrelse(rbp); 268112838Sjeff continue; 269112838Sjeff } 270112080Sjeff } else { 271248508Skib rbp = getblk(vp, lblkno, size, 0, 0, gbflags); 272112838Sjeff lblkno += 1; 273112838Sjeff if (rbp->b_flags & B_DELWRI) { 274112838Sjeff bqrelse(rbp); 275112838Sjeff continue; 276112838Sjeff } 277112080Sjeff rbp->b_flags |= B_ASYNC | B_RAM; 278112080Sjeff rbp->b_iocmd = BIO_READ; 279112080Sjeff rbp->b_blkno = blkno; 280112080Sjeff } 281112080Sjeff if (rbp->b_flags & B_CACHE) { 28258345Sphk rbp->b_flags &= ~B_ASYNC; 28313490Sdyson bqrelse(rbp); 284112080Sjeff continue; 2855455Sdg } 286112080Sjeff if ((rbp->b_flags & B_CLUSTER) == 0) { 287112080Sjeff vfs_busy_pages(rbp, 0); 288112080Sjeff } 289112080Sjeff rbp->b_flags &= ~B_INVAL; 290112080Sjeff rbp->b_ioflags &= ~BIO_ERROR; 291112080Sjeff if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 292112080Sjeff BUF_KERNPROC(rbp); 293121205Sphk rbp->b_iooffset = dbtob(rbp->b_blkno); 294136927Sphk bstrategy(rbp); 295170174Sjeff curthread->td_ru.ru_inblock++; 2965455Sdg } 297112080Sjeff 29821002Sdyson if (reqbp) 29959762Sphk return (bufwait(reqbp)); 30021002Sdyson else 30121002Sdyson return (error); 3021541Srgrimes} 3031541Srgrimes 3041541Srgrimes/* 3051541Srgrimes * If blocks are contiguous on disk, use this to provide clustered 3061541Srgrimes * read ahead. We will read as many blocks as possible sequentially 3071541Srgrimes * and then parcel them up into logical blocks in the buffer hash table. 3081541Srgrimes */ 30910541Sdysonstatic struct buf * 310248508Skibcluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 311248508Skib daddr_t blkno, long size, int run, int gbflags, struct buf *fbp) 3121541Srgrimes{ 31310541Sdyson struct buf *bp, *tbp; 3141541Srgrimes daddr_t bn; 315195122Salc off_t off; 316195122Salc long tinc, tsize; 317254668Skib int i, inc, j, k, toff; 3181541Srgrimes 31942408Seivind KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 320239315Salc ("cluster_rbuild: size %ld != f_iosize %jd\n", 321122537Smckusick size, (intmax_t)vp->v_mount->mnt_stat.f_iosize)); 32242453Seivind 32312767Sdyson /* 32412767Sdyson * avoid a division 32512767Sdyson */ 32612767Sdyson while ((u_quad_t) size * (lbn + run) > filesize) { 3271541Srgrimes --run; 32812767Sdyson } 32910541Sdyson 33021002Sdyson if (fbp) { 33121002Sdyson tbp = fbp; 33258345Sphk tbp->b_iocmd = BIO_READ; 33321002Sdyson } else { 334248508Skib tbp = getblk(vp, lbn, size, 0, 0, gbflags); 33521002Sdyson if (tbp->b_flags & B_CACHE) 33621002Sdyson return tbp; 33758345Sphk tbp->b_flags |= B_ASYNC | B_RAM; 33858345Sphk tbp->b_iocmd = BIO_READ; 33921002Sdyson } 34010541Sdyson tbp->b_blkno = blkno; 34116086Sdyson if( (tbp->b_flags & B_MALLOC) || 34216086Sdyson ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 34310541Sdyson return tbp; 34410541Sdyson 34542957Sdillon bp = trypbuf(&cluster_pbuf_freecnt); 34610541Sdyson if (bp == 0) 34710541Sdyson return tbp; 34810541Sdyson 34985272Sdillon /* 35085272Sdillon * We are synthesizing a buffer out of vm_page_t's, but 35185272Sdillon * if the block size is not page aligned then the starting 35285272Sdillon * address may not be either. Inherit the b_data offset 35385272Sdillon * from the original buffer. 35485272Sdillon */ 35558345Sphk bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 356248508Skib if ((gbflags & GB_UNMAPPED) != 0) { 357248508Skib bp->b_flags |= B_UNMAPPED; 358248508Skib bp->b_data = unmapped_buf; 359248508Skib } else { 360248508Skib bp->b_data = (char *)((vm_offset_t)bp->b_data | 361248508Skib ((vm_offset_t)tbp->b_data & PAGE_MASK)); 362248508Skib } 36358345Sphk bp->b_iocmd = BIO_READ; 3645455Sdg bp->b_iodone = cluster_callback; 3655455Sdg bp->b_blkno = blkno; 3665455Sdg bp->b_lblkno = lbn; 36734611Sdyson bp->b_offset = tbp->b_offset; 36842453Seivind KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 3695455Sdg pbgetvp(vp, bp); 3701541Srgrimes 37112404Sdyson TAILQ_INIT(&bp->b_cluster.cluster_head); 3721541Srgrimes 3735455Sdg bp->b_bcount = 0; 3745455Sdg bp->b_bufsize = 0; 3755455Sdg bp->b_npages = 0; 3765455Sdg 3771541Srgrimes inc = btodb(size); 37810541Sdyson for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 379254668Skib if (i == 0) { 380254668Skib VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object); 381254668Skib vfs_drain_busy_pages(tbp); 382254668Skib vm_object_pip_add(tbp->b_bufobj->bo_object, 383254668Skib tbp->b_npages); 384254668Skib for (k = 0; k < tbp->b_npages; k++) 385254668Skib vm_page_sbusy(tbp->b_pages[k]); 386254668Skib VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object); 387254668Skib } else { 38812767Sdyson if ((bp->b_npages * PAGE_SIZE) + 38985272Sdillon round_page(size) > vp->v_mount->mnt_iosize_max) { 39010541Sdyson break; 39185272Sdillon } 39210978Sdyson 393248508Skib tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT | 394248508Skib (gbflags & GB_UNMAPPED)); 39512767Sdyson 396111886Sjeff /* Don't wait around for locked bufs. */ 397111886Sjeff if (tbp == NULL) 398111886Sjeff break; 39934611Sdyson 40071230Sdillon /* 40185272Sdillon * Stop scanning if the buffer is fully valid 40285272Sdillon * (marked B_CACHE), or locked (may be doing a 40385272Sdillon * background write), or if the buffer is not 40485272Sdillon * VMIO backed. The clustering code can only deal 405251171Sjeff * with VMIO-backed buffers. The bo lock is not 406251171Sjeff * required for the BKGRDINPROG check since it 407251171Sjeff * can not be set without the buf lock. 40871230Sdillon */ 409119521Sjeff if ((tbp->b_vflags & BV_BKGRDINPROG) || 410119521Sjeff (tbp->b_flags & B_CACHE) || 411119521Sjeff (tbp->b_flags & B_VMIO) == 0) { 41213490Sdyson bqrelse(tbp); 4135455Sdg break; 4145455Sdg } 41510541Sdyson 41685272Sdillon /* 41785272Sdillon * The buffer must be completely invalid in order to 41885272Sdillon * take part in the cluster. If it is partially valid 41985272Sdillon * then we stop. 42085272Sdillon */ 421195122Salc off = tbp->b_offset; 422195122Salc tsize = size; 423248084Sattilio VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object); 424195122Salc for (j = 0; tsize > 0; j++) { 425195122Salc toff = off & PAGE_MASK; 426195122Salc tinc = tsize; 427195122Salc if (toff + tinc > PAGE_SIZE) 428195122Salc tinc = PAGE_SIZE - toff; 429248084Sattilio VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object); 430195122Salc if ((tbp->b_pages[j]->valid & 431195122Salc vm_page_bits(toff, tinc)) != 0) 43210541Sdyson break; 433254668Skib if (vm_page_xbusied(tbp->b_pages[j])) 434254668Skib break; 435254668Skib vm_object_pip_add(tbp->b_bufobj->bo_object, 1); 436254668Skib vm_page_sbusy(tbp->b_pages[j]); 437195122Salc off += tinc; 438195122Salc tsize -= tinc; 43971230Sdillon } 440195122Salc if (tsize > 0) { 441254668Skibclean_sbusy: 442254668Skib vm_object_pip_add(tbp->b_bufobj->bo_object, -j); 443254668Skib for (k = 0; k < j; k++) 444254668Skib vm_page_sunbusy(tbp->b_pages[k]); 445254668Skib VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object); 44634611Sdyson bqrelse(tbp); 44710541Sdyson break; 44810541Sdyson } 449254668Skib VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object); 45010541Sdyson 45185272Sdillon /* 45285272Sdillon * Set a read-ahead mark as appropriate 45385272Sdillon */ 45421002Sdyson if ((fbp && (i == 1)) || (i == (run - 1))) 45521002Sdyson tbp->b_flags |= B_RAM; 45685272Sdillon 45785272Sdillon /* 45885272Sdillon * Set the buffer up for an async read (XXX should 45985272Sdillon * we do this only if we do not wind up brelse()ing?). 46085272Sdillon * Set the block number if it isn't set, otherwise 46185272Sdillon * if it is make sure it matches the block number we 46285272Sdillon * expect. 46385272Sdillon */ 46458345Sphk tbp->b_flags |= B_ASYNC; 46558345Sphk tbp->b_iocmd = BIO_READ; 46612767Sdyson if (tbp->b_blkno == tbp->b_lblkno) { 46710541Sdyson tbp->b_blkno = bn; 46810541Sdyson } else if (tbp->b_blkno != bn) { 469254668Skib VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object); 470254668Skib goto clean_sbusy; 47110541Sdyson } 4721541Srgrimes } 47348333Speter /* 47448333Speter * XXX fbp from caller may not be B_ASYNC, but we are going 47548333Speter * to biodone() it in cluster_callback() anyway 47648333Speter */ 47748333Speter BUF_KERNPROC(tbp); 47812404Sdyson TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 47912404Sdyson tbp, b_cluster.cluster_entry); 480248084Sattilio VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object); 4815455Sdg for (j = 0; j < tbp->b_npages; j += 1) { 48210541Sdyson vm_page_t m; 48310541Sdyson m = tbp->b_pages[j]; 48410541Sdyson if ((bp->b_npages == 0) || 485254668Skib (bp->b_pages[bp->b_npages-1] != m)) { 48610541Sdyson bp->b_pages[bp->b_npages] = m; 48710541Sdyson bp->b_npages++; 48810541Sdyson } 489193643Salc if (m->valid == VM_PAGE_BITS_ALL) 49018737Sdyson tbp->b_pages[j] = bogus_page; 4911541Srgrimes } 492248084Sattilio VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object); 49385511Sdillon /* 49485511Sdillon * Don't inherit tbp->b_bufsize as it may be larger due to 49585511Sdillon * a non-page-aligned size. Instead just aggregate using 49685511Sdillon * 'size'. 49785511Sdillon */ 49885511Sdillon if (tbp->b_bcount != size) 49985511Sdillon printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 50085511Sdillon if (tbp->b_bufsize != size) 50185511Sdillon printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 50285511Sdillon bp->b_bcount += size; 50385511Sdillon bp->b_bufsize += size; 5041541Srgrimes } 50518737Sdyson 50685272Sdillon /* 50785272Sdillon * Fully valid pages in the cluster are already good and do not need 50885272Sdillon * to be re-read from disk. Replace the page with bogus_page 50985272Sdillon */ 510248084Sattilio VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 51185272Sdillon for (j = 0; j < bp->b_npages; j++) { 512248084Sattilio VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object); 513193643Salc if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL) 51418737Sdyson bp->b_pages[j] = bogus_page; 51518737Sdyson } 516248084Sattilio VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 51720054Sdyson if (bp->b_bufsize > bp->b_kvasize) 51837559Sbde panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 51937559Sbde bp->b_bufsize, bp->b_kvasize); 52020054Sdyson bp->b_kvasize = bp->b_bufsize; 52118737Sdyson 522248508Skib if ((bp->b_flags & B_UNMAPPED) == 0) { 523248508Skib pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 524248508Skib (vm_page_t *)bp->b_pages, bp->b_npages); 525248508Skib } 5265455Sdg return (bp); 5271541Srgrimes} 5281541Srgrimes 5291541Srgrimes/* 5301541Srgrimes * Cleanup after a clustered read or write. 5311541Srgrimes * This is complicated by the fact that any of the buffers might have 5321541Srgrimes * extra memory (if there were no empty buffer headers at allocbuf time) 5331541Srgrimes * that we will need to shift around. 5341541Srgrimes */ 535141628Sphkstatic void 5361541Srgrimescluster_callback(bp) 5371541Srgrimes struct buf *bp; 5381541Srgrimes{ 53912404Sdyson struct buf *nbp, *tbp; 5401541Srgrimes int error = 0; 5411541Srgrimes 5421541Srgrimes /* 543302234Sbdrewery * Must propagate errors to all the components. 5441541Srgrimes */ 54558934Sphk if (bp->b_ioflags & BIO_ERROR) 5461541Srgrimes error = bp->b_error; 5471541Srgrimes 548248508Skib if ((bp->b_flags & B_UNMAPPED) == 0) { 549248508Skib pmap_qremove(trunc_page((vm_offset_t) bp->b_data), 550248508Skib bp->b_npages); 551248508Skib } 5521541Srgrimes /* 5531541Srgrimes * Move memory from the large cluster buffer into the component 5541541Srgrimes * buffers and mark IO as done on these. 5551541Srgrimes */ 55621002Sdyson for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 55712404Sdyson tbp; tbp = nbp) { 55821002Sdyson nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 5591541Srgrimes if (error) { 56058934Sphk tbp->b_ioflags |= BIO_ERROR; 5611541Srgrimes tbp->b_error = error; 56246349Salc } else { 56346349Salc tbp->b_dirtyoff = tbp->b_dirtyend = 0; 56458934Sphk tbp->b_flags &= ~B_INVAL; 56558934Sphk tbp->b_ioflags &= ~BIO_ERROR; 56677115Sdillon /* 56777115Sdillon * XXX the bdwrite()/bqrelse() issued during 56877115Sdillon * cluster building clears B_RELBUF (see bqrelse() 56977115Sdillon * comment). If direct I/O was specified, we have 57077115Sdillon * to restore it here to allow the buffer and VM 57177115Sdillon * to be freed. 57277115Sdillon */ 57377115Sdillon if (tbp->b_flags & B_DIRECT) 57477115Sdillon tbp->b_flags |= B_RELBUF; 57546349Salc } 57659249Sphk bufdone(tbp); 5771541Srgrimes } 578137719Sphk pbrelvp(bp); 57942957Sdillon relpbuf(bp, &cluster_pbuf_freecnt); 5801541Srgrimes} 5811541Srgrimes 5821541Srgrimes/* 58348545Smckusick * cluster_wbuild_wb: 58448545Smckusick * 58548545Smckusick * Implement modified write build for cluster. 58648545Smckusick * 58748545Smckusick * write_behind = 0 write behind disabled 58848545Smckusick * write_behind = 1 write behind normal (default) 58948545Smckusick * write_behind = 2 write behind backed-off 59048545Smckusick */ 59148545Smckusick 59248545Smckusickstatic __inline int 593248508Skibcluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len, 594248508Skib int gbflags) 59548545Smckusick{ 59648545Smckusick int r = 0; 59748545Smckusick 598248283Skib switch (write_behind) { 59948545Smckusick case 2: 60048545Smckusick if (start_lbn < len) 60148545Smckusick break; 60248545Smckusick start_lbn -= len; 603102412Scharnier /* FALLTHROUGH */ 60448545Smckusick case 1: 605248508Skib r = cluster_wbuild(vp, size, start_lbn, len, gbflags); 606102412Scharnier /* FALLTHROUGH */ 60748545Smckusick default: 608102412Scharnier /* FALLTHROUGH */ 60948545Smckusick break; 61048545Smckusick } 61148545Smckusick return(r); 61248545Smckusick} 61348545Smckusick 61448545Smckusick/* 6151541Srgrimes * Do clustered write for FFS. 6161541Srgrimes * 6171541Srgrimes * Three cases: 6181541Srgrimes * 1. Write is not sequential (write asynchronously) 6191541Srgrimes * Write is sequential: 6201541Srgrimes * 2. beginning of cluster - begin cluster 6211541Srgrimes * 3. middle of a cluster - add to cluster 6221541Srgrimes * 4. end of a cluster - asynchronously write cluster 6231541Srgrimes */ 6241541Srgrimesvoid 625248282Skibcluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount, 626248282Skib int gbflags) 6271541Srgrimes{ 6285455Sdg daddr_t lbn; 6295455Sdg int maxclen, cursize; 6305455Sdg int lblocksize; 63112404Sdyson int async; 6321541Srgrimes 633248508Skib if (!unmapped_buf_allowed) 634248508Skib gbflags &= ~GB_UNMAPPED; 635248508Skib 63632286Sdyson if (vp->v_type == VREG) { 637231204Skib async = DOINGASYNC(vp); 63832286Sdyson lblocksize = vp->v_mount->mnt_stat.f_iosize; 63932286Sdyson } else { 64032286Sdyson async = 0; 64132286Sdyson lblocksize = bp->b_bufsize; 64232286Sdyson } 6435455Sdg lbn = bp->b_lblkno; 64442408Seivind KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 64534694Sdyson 6461541Srgrimes /* Initialize vnode to beginning of file. */ 6471541Srgrimes if (lbn == 0) 6481541Srgrimes vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 6491541Srgrimes 6505455Sdg if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 6515455Sdg (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 65251797Sphk maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 6531541Srgrimes if (vp->v_clen != 0) { 6541541Srgrimes /* 6551541Srgrimes * Next block is not sequential. 6568876Srgrimes * 6571541Srgrimes * If we are not writing at end of file, the process 6585455Sdg * seeked to another point in the file since its last 6595455Sdg * write, or we have reached our maximum cluster size, 6605455Sdg * then push the previous cluster. Otherwise try 6615455Sdg * reallocating to make it sequential. 66258909Sdillon * 66358909Sdillon * Change to algorithm: only push previous cluster if 66458909Sdillon * it was sequential from the point of view of the 66558909Sdillon * seqcount heuristic, otherwise leave the buffer 66658909Sdillon * intact so we can potentially optimize the I/O 66758909Sdillon * later on in the buf_daemon or update daemon 66858909Sdillon * flush. 6691541Srgrimes */ 6701541Srgrimes cursize = vp->v_lastw - vp->v_cstart + 1; 67134611Sdyson if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 67210541Sdyson lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 67358909Sdillon if (!async && seqcount > 0) { 67448677Smckusick cluster_wbuild_wb(vp, lblocksize, 675248508Skib vp->v_cstart, cursize, gbflags); 67658909Sdillon } 67710541Sdyson } else { 67810541Sdyson struct buf **bpp, **endbp; 67910541Sdyson struct cluster_save *buflist; 68010541Sdyson 681248508Skib buflist = cluster_collectbufs(vp, bp, gbflags); 68210541Sdyson endbp = &buflist->bs_children 68310541Sdyson [buflist->bs_nchildren - 1]; 68410541Sdyson if (VOP_REALLOCBLKS(vp, buflist)) { 68510541Sdyson /* 68658909Sdillon * Failed, push the previous cluster 68758909Sdillon * if *really* writing sequentially 68858909Sdillon * in the logical file (seqcount > 1), 68958909Sdillon * otherwise delay it in the hopes that 69058909Sdillon * the low level disk driver can 69158909Sdillon * optimize the write ordering. 69210541Sdyson */ 69310541Sdyson for (bpp = buflist->bs_children; 69410541Sdyson bpp < endbp; bpp++) 69510541Sdyson brelse(*bpp); 69610541Sdyson free(buflist, M_SEGMENT); 69758909Sdillon if (seqcount > 1) { 69858909Sdillon cluster_wbuild_wb(vp, 69958909Sdillon lblocksize, vp->v_cstart, 700248508Skib cursize, gbflags); 70158909Sdillon } 70210541Sdyson } else { 70310541Sdyson /* 70410541Sdyson * Succeeded, keep building cluster. 70510541Sdyson */ 70610541Sdyson for (bpp = buflist->bs_children; 70710541Sdyson bpp <= endbp; bpp++) 70810541Sdyson bdwrite(*bpp); 70910541Sdyson free(buflist, M_SEGMENT); 71010541Sdyson vp->v_lastw = lbn; 71110541Sdyson vp->v_lasta = bp->b_blkno; 71210541Sdyson return; 71310541Sdyson } 71410541Sdyson } 7151541Srgrimes } 7161541Srgrimes /* 7175455Sdg * Consider beginning a cluster. If at end of file, make 7185455Sdg * cluster as large as possible, otherwise find size of 7195455Sdg * existing cluster. 7201541Srgrimes */ 72132286Sdyson if ((vp->v_type == VREG) && 72234611Sdyson ((u_quad_t) bp->b_offset + lblocksize) != filesize && 7237613Sdg (bp->b_blkno == bp->b_lblkno) && 72410551Sdyson (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 72510541Sdyson bp->b_blkno == -1)) { 7261541Srgrimes bawrite(bp); 7271541Srgrimes vp->v_clen = 0; 7281541Srgrimes vp->v_lasta = bp->b_blkno; 7291541Srgrimes vp->v_cstart = lbn + 1; 7301541Srgrimes vp->v_lastw = lbn; 7311541Srgrimes return; 7321541Srgrimes } 7335455Sdg vp->v_clen = maxclen; 73412404Sdyson if (!async && maxclen == 0) { /* I/O not contiguous */ 7351541Srgrimes vp->v_cstart = lbn + 1; 73613490Sdyson bawrite(bp); 7375455Sdg } else { /* Wait for rest of cluster */ 7381541Srgrimes vp->v_cstart = lbn; 7395455Sdg bdwrite(bp); 7401541Srgrimes } 7411541Srgrimes } else if (lbn == vp->v_cstart + vp->v_clen) { 7421541Srgrimes /* 74358909Sdillon * At end of cluster, write it out if seqcount tells us we 74458909Sdillon * are operating sequentially, otherwise let the buf or 74558909Sdillon * update daemon handle it. 7461541Srgrimes */ 74712404Sdyson bdwrite(bp); 748248508Skib if (seqcount > 1) { 749248508Skib cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, 750248508Skib vp->v_clen + 1, gbflags); 751248508Skib } 7521541Srgrimes vp->v_clen = 0; 7531541Srgrimes vp->v_cstart = lbn + 1; 75468885Sdillon } else if (vm_page_count_severe()) { 75568885Sdillon /* 75668885Sdillon * We are low on memory, get it going NOW 75768885Sdillon */ 75868885Sdillon bawrite(bp); 75958909Sdillon } else { 7601541Srgrimes /* 7615455Sdg * In the middle of a cluster, so just delay the I/O for now. 7621541Srgrimes */ 7631541Srgrimes bdwrite(bp); 76458909Sdillon } 7651541Srgrimes vp->v_lastw = lbn; 7661541Srgrimes vp->v_lasta = bp->b_blkno; 7671541Srgrimes} 7681541Srgrimes 7691541Srgrimes 7701541Srgrimes/* 7711541Srgrimes * This is an awful lot like cluster_rbuild...wish they could be combined. 7721541Srgrimes * The last lbn argument is the current block on which I/O is being 7731541Srgrimes * performed. Check to see that it doesn't fall in the middle of 7741541Srgrimes * the current block (if last_bp == NULL). 7751541Srgrimes */ 77612767Sdysonint 777248282Skibcluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len, 778248282Skib int gbflags) 7791541Srgrimes{ 78012404Sdyson struct buf *bp, *tbp; 781177493Sjeff struct bufobj *bo; 782145734Sjeff int i, j; 78312767Sdyson int totalwritten = 0; 78412404Sdyson int dbsize = btodb(size); 78535595Sbde 786248508Skib if (!unmapped_buf_allowed) 787248508Skib gbflags &= ~GB_UNMAPPED; 788248508Skib 789177493Sjeff bo = &vp->v_bufobj; 79012767Sdyson while (len > 0) { 79171230Sdillon /* 79271230Sdillon * If the buffer is not delayed-write (i.e. dirty), or it 79371230Sdillon * is delayed-write but either locked or inval, it cannot 79472080Sasmodai * partake in the clustered write. 79571230Sdillon */ 796177493Sjeff BO_LOCK(bo); 797136767Sphk if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL || 798119521Sjeff (tbp->b_vflags & BV_BKGRDINPROG)) { 799177493Sjeff BO_UNLOCK(bo); 80012767Sdyson ++start_lbn; 80112767Sdyson --len; 80212767Sdyson continue; 80312767Sdyson } 804111886Sjeff if (BUF_LOCK(tbp, 805251171Sjeff LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) { 806111886Sjeff ++start_lbn; 807111886Sjeff --len; 808111886Sjeff continue; 809111886Sjeff } 810119521Sjeff if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) { 811111886Sjeff BUF_UNLOCK(tbp); 812111886Sjeff ++start_lbn; 813111886Sjeff --len; 814111886Sjeff continue; 815111886Sjeff } 816153192Srodrigc if (tbp->b_pin_count > 0) { 817153192Srodrigc BUF_UNLOCK(tbp); 818153192Srodrigc ++start_lbn; 819153192Srodrigc --len; 820153192Srodrigc continue; 821153192Srodrigc } 82212767Sdyson bremfree(tbp); 82312767Sdyson tbp->b_flags &= ~B_DONE; 8241541Srgrimes 82547967Sjulian /* 82647967Sjulian * Extra memory in the buffer, punt on this buffer. 82747967Sjulian * XXX we could handle this in most cases, but we would 82847967Sjulian * have to push the extra memory down to after our max 82947967Sjulian * possible cluster size and then potentially pull it back 83047967Sjulian * up if the cluster was terminated prematurely--too much 83147967Sjulian * hassle. 83247967Sjulian */ 83368868Stegge if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 83468868Stegge (B_CLUSTEROK | B_VMIO)) || 83534630Sjulian (tbp->b_bcount != tbp->b_bufsize) || 83634630Sjulian (tbp->b_bcount != size) || 83734630Sjulian (len == 1) || 838254945Skib ((bp = (vp->v_vflag & VV_MD) != 0 ? 839254945Skib trypbuf(&cluster_pbuf_freecnt) : 840254945Skib getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 84112767Sdyson totalwritten += tbp->b_bufsize; 84212767Sdyson bawrite(tbp); 84312767Sdyson ++start_lbn; 84412767Sdyson --len; 84512767Sdyson continue; 84612767Sdyson } 84712404Sdyson 84834630Sjulian /* 84934630Sjulian * We got a pbuf to make the cluster in. 85034630Sjulian * so initialise it. 85134630Sjulian */ 85212767Sdyson TAILQ_INIT(&bp->b_cluster.cluster_head); 85312767Sdyson bp->b_bcount = 0; 85412767Sdyson bp->b_bufsize = 0; 85512767Sdyson bp->b_npages = 0; 85684827Sjhb if (tbp->b_wcred != NOCRED) 85784827Sjhb bp->b_wcred = crhold(tbp->b_wcred); 8581541Srgrimes 85912767Sdyson bp->b_blkno = tbp->b_blkno; 86012767Sdyson bp->b_lblkno = tbp->b_lblkno; 86134611Sdyson bp->b_offset = tbp->b_offset; 86285272Sdillon 86385272Sdillon /* 86485272Sdillon * We are synthesizing a buffer out of vm_page_t's, but 86585272Sdillon * if the block size is not page aligned then the starting 86685272Sdillon * address may not be either. Inherit the b_data offset 86785272Sdillon * from the original buffer. 86885272Sdillon */ 869248508Skib if ((gbflags & GB_UNMAPPED) == 0 || 870248508Skib (tbp->b_flags & B_VMIO) == 0) { 871248508Skib bp->b_data = (char *)((vm_offset_t)bp->b_data | 872248508Skib ((vm_offset_t)tbp->b_data & PAGE_MASK)); 873248508Skib } else { 874248508Skib bp->b_flags |= B_UNMAPPED; 875248508Skib bp->b_data = unmapped_buf; 876248508Skib } 877248508Skib bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO | 878248508Skib B_NEEDCOMMIT)); 87912767Sdyson bp->b_iodone = cluster_callback; 88012767Sdyson pbgetvp(vp, bp); 88134630Sjulian /* 88234630Sjulian * From this location in the file, scan forward to see 88334630Sjulian * if there are buffers with adjacent data that need to 88434630Sjulian * be written as well. 88534630Sjulian */ 88612767Sdyson for (i = 0; i < len; ++i, ++start_lbn) { 88734630Sjulian if (i != 0) { /* If not the first buffer */ 88834630Sjulian /* 88934630Sjulian * If the adjacent data is not even in core it 89034630Sjulian * can't need to be written. 89134630Sjulian */ 892177493Sjeff BO_LOCK(bo); 893177493Sjeff if ((tbp = gbincore(bo, start_lbn)) == NULL || 894119521Sjeff (tbp->b_vflags & BV_BKGRDINPROG)) { 895177493Sjeff BO_UNLOCK(bo); 89612767Sdyson break; 89712767Sdyson } 8981541Srgrimes 89934630Sjulian /* 90034630Sjulian * If it IS in core, but has different 90171230Sdillon * characteristics, or is locked (which 90271230Sdillon * means it could be undergoing a background 90371230Sdillon * I/O or be in a weird state), then don't 90471230Sdillon * cluster with it. 90534630Sjulian */ 906111886Sjeff if (BUF_LOCK(tbp, 907111886Sjeff LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 908251171Sjeff BO_LOCKPTR(bo))) 909111886Sjeff break; 910111886Sjeff 91148225Smckusick if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 91248225Smckusick B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 913111886Sjeff != (B_DELWRI | B_CLUSTEROK | 91448225Smckusick (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 915111886Sjeff tbp->b_wcred != bp->b_wcred) { 916112347Sjeff BUF_UNLOCK(tbp); 91712767Sdyson break; 91812767Sdyson } 91912767Sdyson 92034630Sjulian /* 92134630Sjulian * Check that the combined cluster 92234630Sjulian * would make sense with regard to pages 92334630Sjulian * and would not be too large 92434630Sjulian */ 92512767Sdyson if ((tbp->b_bcount != size) || 92634630Sjulian ((bp->b_blkno + (dbsize * i)) != 92734694Sdyson tbp->b_blkno) || 92834630Sjulian ((tbp->b_npages + bp->b_npages) > 92951797Sphk (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 93048225Smckusick BUF_UNLOCK(tbp); 93112767Sdyson break; 93212767Sdyson } 933153192Srodrigc 93434630Sjulian /* 935153192Srodrigc * Do not pull in pinned buffers. 936153192Srodrigc */ 937153192Srodrigc if (tbp->b_pin_count > 0) { 938153192Srodrigc BUF_UNLOCK(tbp); 939153192Srodrigc break; 940153192Srodrigc } 941153192Srodrigc 942153192Srodrigc /* 94334630Sjulian * Ok, it's passed all the tests, 94434630Sjulian * so remove it from the free list 94534630Sjulian * and mark it busy. We will use it. 94634630Sjulian */ 94712767Sdyson bremfree(tbp); 94812767Sdyson tbp->b_flags &= ~B_DONE; 94934630Sjulian } /* end of code for non-first buffers only */ 95034630Sjulian /* 95134630Sjulian * If the IO is via the VM then we do some 95285272Sdillon * special VM hackery (yuck). Since the buffer's 95385272Sdillon * block size may not be page-aligned it is possible 95485272Sdillon * for a page to be shared between two buffers. We 95585272Sdillon * have to get rid of the duplication when building 95685272Sdillon * the cluster. 95734630Sjulian */ 95813490Sdyson if (tbp->b_flags & B_VMIO) { 95932937Sdyson vm_page_t m; 96032937Sdyson 961248084Sattilio VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object); 962254668Skib if (i == 0) { 963254668Skib vfs_drain_busy_pages(tbp); 964254717Sjkim } else { /* if not first buffer */ 96532937Sdyson for (j = 0; j < tbp->b_npages; j += 1) { 96632937Sdyson m = tbp->b_pages[j]; 967254138Sattilio if (vm_page_xbusied(m)) { 968248084Sattilio VM_OBJECT_WUNLOCK( 969136985Salc tbp->b_object); 97050701Stegge bqrelse(tbp); 97132937Sdyson goto finishcluster; 97250701Stegge } 97332937Sdyson } 97432937Sdyson } 97513490Sdyson for (j = 0; j < tbp->b_npages; j += 1) { 97613490Sdyson m = tbp->b_pages[j]; 977254138Sattilio vm_page_sbusy(m); 97838517Sdfr vm_object_pip_add(m->object, 1); 97913490Sdyson if ((bp->b_npages == 0) || 98034630Sjulian (bp->b_pages[bp->b_npages - 1] != m)) { 98113490Sdyson bp->b_pages[bp->b_npages] = m; 98213490Sdyson bp->b_npages++; 98313490Sdyson } 98412767Sdyson } 985248084Sattilio VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object); 98612767Sdyson } 98712767Sdyson bp->b_bcount += size; 98812767Sdyson bp->b_bufsize += size; 989246876Smckusick /* 990246876Smckusick * If any of the clustered buffers have their 991246876Smckusick * B_BARRIER flag set, transfer that request to 992246876Smckusick * the cluster. 993246876Smckusick */ 994246876Smckusick bp->b_flags |= (tbp->b_flags & B_BARRIER); 995246876Smckusick tbp->b_flags &= ~(B_DONE | B_BARRIER); 996246876Smckusick tbp->b_flags |= B_ASYNC; 99758934Sphk tbp->b_ioflags &= ~BIO_ERROR; 99858345Sphk tbp->b_iocmd = BIO_WRITE; 999246876Smckusick bundirty(tbp); 1000132640Sphk reassignbuf(tbp); /* put on clean list */ 1001136767Sphk bufobj_wref(tbp->b_bufobj); 100248333Speter BUF_KERNPROC(tbp); 100312767Sdyson TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 100412767Sdyson tbp, b_cluster.cluster_entry); 10051541Srgrimes } 100632937Sdyson finishcluster: 1007248508Skib if ((bp->b_flags & B_UNMAPPED) == 0) { 1008248508Skib pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 1009248508Skib (vm_page_t *)bp->b_pages, bp->b_npages); 1010248508Skib } 101120054Sdyson if (bp->b_bufsize > bp->b_kvasize) 101237559Sbde panic( 101337559Sbde "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 101437559Sbde bp->b_bufsize, bp->b_kvasize); 101520054Sdyson bp->b_kvasize = bp->b_bufsize; 101612767Sdyson totalwritten += bp->b_bufsize; 101717304Sdyson bp->b_dirtyoff = 0; 101817304Sdyson bp->b_dirtyend = bp->b_bufsize; 101912767Sdyson bawrite(bp); 10201541Srgrimes 102112767Sdyson len -= i; 10221541Srgrimes } 102312767Sdyson return totalwritten; 10241541Srgrimes} 10251541Srgrimes 10261541Srgrimes/* 10271541Srgrimes * Collect together all the buffers in a cluster. 10281541Srgrimes * Plus add one additional buffer. 10291541Srgrimes */ 103012973Sbdestatic struct cluster_save * 1031248508Skibcluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags) 10321541Srgrimes{ 10331541Srgrimes struct cluster_save *buflist; 103441205Smckusick struct buf *bp; 10355455Sdg daddr_t lbn; 10361541Srgrimes int i, len; 10371541Srgrimes 10381541Srgrimes len = vp->v_lastw - vp->v_cstart + 1; 10391541Srgrimes buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 1040111119Simp M_SEGMENT, M_WAITOK); 10411541Srgrimes buflist->bs_nchildren = 0; 10425455Sdg buflist->bs_children = (struct buf **) (buflist + 1); 104341205Smckusick for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 1044248508Skib (void)bread_gb(vp, lbn, last_bp->b_bcount, NOCRED, 1045248508Skib gbflags, &bp); 104641205Smckusick buflist->bs_children[i] = bp; 104741205Smckusick if (bp->b_blkno == bp->b_lblkno) 1048136989Sphk VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, 104941205Smckusick NULL, NULL); 105041205Smckusick } 105141529Smckusick buflist->bs_children[i] = bp = last_bp; 105241529Smckusick if (bp->b_blkno == bp->b_lblkno) 1053136989Sphk VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 10541541Srgrimes buflist->bs_nchildren = i + 1; 10551541Srgrimes return (buflist); 10561541Srgrimes} 1057