vfs_bio.c revision 26290
1118611Snjl/*
2118611Snjl * Copyright (c) 1994 John S. Dyson
3118611Snjl * All rights reserved.
4118611Snjl *
5118611Snjl * Redistribution and use in source and binary forms, with or without
6118611Snjl * modification, are permitted provided that the following conditions
7217365Sjkim * are met:
8245582Sjkim * 1. Redistributions of source code must retain the above copyright
9118611Snjl *    notice immediately at the beginning of the file, without modification,
10118611Snjl *    this list of conditions, and the following disclaimer.
11217365Sjkim * 2. Redistributions in binary form must reproduce the above copyright
12217365Sjkim *    notice, this list of conditions and the following disclaimer in the
13217365Sjkim *    documentation and/or other materials provided with the distribution.
14217365Sjkim * 3. Absolutely no warranty of function or purpose is made by the author
15217365Sjkim *    John S. Dyson.
16217365Sjkim * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17217365Sjkim *    is allowed if this notation is included.
18217365Sjkim * 5. Modifications may be freely made to this file if the above conditions
19217365Sjkim *    are met.
20217365Sjkim *
21217365Sjkim * $Id: vfs_bio.c,v 1.116 1997/05/19 14:36:36 dfr Exp $
22217365Sjkim */
23217365Sjkim
24217365Sjkim/*
25118611Snjl * this file contains a new buffer I/O scheme implementing a coherent
26217365Sjkim * VM object and buffer cache scheme.  Pains have been taken to make
27217365Sjkim * sure that the performance degradation associated with schemes such
28217365Sjkim * as this is not realized.
29118611Snjl *
30217365Sjkim * Author:  John S. Dyson
31217365Sjkim * Significant help during the development and debugging phases
32217365Sjkim * had been provided by David Greenman, also of the FreeBSD core team.
33217365Sjkim */
34217365Sjkim
35217365Sjkim#include "opt_bounce.h"
36217365Sjkim
37217365Sjkim#define VMIO
38217365Sjkim#include <sys/param.h>
39217365Sjkim#include <sys/systm.h>
40217365Sjkim#include <sys/sysproto.h>
41217365Sjkim#include <sys/kernel.h>
42217365Sjkim#include <sys/sysctl.h>
43118611Snjl#include <sys/proc.h>
44118611Snjl#include <sys/vnode.h>
45151937Sjkim#include <sys/vmmeter.h>
46118611Snjl#include <vm/vm.h>
47118611Snjl#include <vm/vm_param.h>
48193529Sjkim#include <vm/vm_prot.h>
49193529Sjkim#include <vm/vm_kern.h>
50193529Sjkim#include <vm/vm_pageout.h>
51118611Snjl#include <vm/vm_page.h>
52118611Snjl#include <vm/vm_object.h>
53118611Snjl#include <vm/vm_extern.h>
54118611Snjl#include <vm/vm_map.h>
55118611Snjl#include <sys/buf.h>
56118611Snjl#include <sys/mount.h>
57151937Sjkim#include <sys/malloc.h>
58118611Snjl#include <sys/resourcevar.h>
59151937Sjkim#include <sys/proc.h>
60118611Snjl
61151937Sjkim#include <miscfs/specfs/specdev.h>
62151937Sjkim
63151937Sjkimstatic void vfs_update __P((void));
64151937Sjkimstatic struct	proc *updateproc;
65151937Sjkimstatic struct kproc_desc up_kp = {
66151937Sjkim	"update",
67151937Sjkim	vfs_update,
68151937Sjkim	&updateproc
69151937Sjkim};
70151937SjkimSYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
71151937Sjkim
72151937Sjkimstruct buf *buf;		/* buffer header pool */
73151937Sjkimstruct swqueue bswlist;
74151937Sjkim
75151937Sjkimint count_lock_queue __P((void));
76151937Sjkimstatic void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
77151937Sjkim		vm_offset_t to);
78151937Sjkimstatic void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
79151937Sjkim		vm_offset_t to);
80151937Sjkimstatic void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff,
81151937Sjkim			      vm_offset_t off, vm_offset_t size,
82151937Sjkim			      vm_page_t m);
83151937Sjkimstatic void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
84151937Sjkim			       int pageno, vm_page_t m);
85151937Sjkimstatic void vfs_clean_pages(struct buf * bp);
86151937Sjkimstatic void vfs_setdirty(struct buf *bp);
87151937Sjkimstatic void vfs_vmio_release(struct buf *bp);
88151937Sjkim
89151937Sjkimint needsbuffer;
90151937Sjkim
91151937Sjkim/*
92118611Snjl * Internal update daemon, process 3
93118611Snjl *	The variable vfs_update_wakeup allows for internal syncs.
94118611Snjl */
95118611Snjlint vfs_update_wakeup;
96118611Snjl
97118611Snjl
98118611Snjl/*
99118611Snjl * buffers base kva
100118611Snjl */
101118611Snjl
102118611Snjl/*
103118611Snjl * bogus page -- for I/O to/from partially complete buffers
104118611Snjl * this is a temporary solution to the problem, but it is not
105118611Snjl * really that bad.  it would be better to split the buffer
106118611Snjl * for input in the case of buffers partially already in memory,
107118611Snjl * but the code is intricate enough already.
108118611Snjl */
109118611Snjlvm_page_t bogus_page;
110118611Snjlstatic vm_offset_t bogus_offset;
111151937Sjkim
112118611Snjlstatic int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
113118611Snjl	bufmallocspace, maxbufmallocspace;
114118611Snjl
115118611Snjlstatic struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
116118611Snjlstatic struct bqueues bufqueues[BUFFER_QUEUES];
117118611Snjl
118118611Snjlextern int vm_swap_size;
119118611Snjl
120118611Snjl#define BUF_MAXUSE 16
121118611Snjl
122118611Snjl/*
123118611Snjl * Initialize buffer headers and related structures.
124118611Snjl */
125118611Snjlvoid
126167802Sjkimbufinit()
127118611Snjl{
128118611Snjl	struct buf *bp;
129118611Snjl	int i;
130241973Sjkim
131118611Snjl	TAILQ_INIT(&bswlist);
132241973Sjkim	LIST_INIT(&invalhash);
133118611Snjl
134118611Snjl	/* first, make a null hash table */
135118611Snjl	for (i = 0; i < BUFHSZ; i++)
136151937Sjkim		LIST_INIT(&bufhashtbl[i]);
137151937Sjkim
138118611Snjl	/* next, make a null set of free lists */
139118611Snjl	for (i = 0; i < BUFFER_QUEUES; i++)
140118611Snjl		TAILQ_INIT(&bufqueues[i]);
141118611Snjl
142118611Snjl	/* finally, initialize each buffer header and stick on empty q */
143118611Snjl	for (i = 0; i < nbuf; i++) {
144118611Snjl		bp = &buf[i];
145118611Snjl		bzero(bp, sizeof *bp);
146118611Snjl		bp->b_flags = B_INVAL;	/* we're just an empty header */
147118611Snjl		bp->b_dev = NODEV;
148118611Snjl		bp->b_rcred = NOCRED;
149118611Snjl		bp->b_wcred = NOCRED;
150118611Snjl		bp->b_qindex = QUEUE_EMPTY;
151118611Snjl		bp->b_vnbufs.le_next = NOLIST;
152241973Sjkim		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
153118611Snjl		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
154118611Snjl	}
155118611Snjl/*
156118611Snjl * maxbufspace is currently calculated to support all filesystem blocks
157118611Snjl * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
158118611Snjl * cache is still the same as it would be for 8K filesystems.  This
159118611Snjl * keeps the size of the buffer cache "in check" for big block filesystems.
160118611Snjl */
161118611Snjl	maxbufspace = (nbuf + 8) * DFLTBSIZE;
162118611Snjl/*
163118611Snjl * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
164118611Snjl */
165118611Snjl	maxvmiobufspace = 2 * maxbufspace / 3;
166118611Snjl/*
167118611Snjl * Limit the amount of malloc memory since it is wired permanently into
168118611Snjl * the kernel space.  Even though this is accounted for in the buffer
169118611Snjl * allocation, we don't want the malloced region to grow uncontrolled.
170118611Snjl * The malloc scheme improves memory utilization significantly on average
171167802Sjkim * (small) directories.
172118611Snjl */
173118611Snjl	maxbufmallocspace = maxbufspace / 20;
174138287Smarks
175138287Smarks	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
176138287Smarks	bogus_page = vm_page_alloc(kernel_object,
177138287Smarks			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
178138287Smarks			VM_ALLOC_NORMAL);
179118611Snjl
180118611Snjl}
181118611Snjl
182118611Snjl/*
183118611Snjl * Free the kva allocation for a buffer
184118611Snjl * Must be called only at splbio or higher,
185118611Snjl *  as this is the only locking for buffer_map.
186118611Snjl */
187118611Snjlstatic void
188118611Snjlbfreekva(struct buf * bp)
189118611Snjl{
190118611Snjl	if (bp->b_kvasize == 0)
191118611Snjl		return;
192118611Snjl
193118611Snjl	vm_map_delete(buffer_map,
194118611Snjl		(vm_offset_t) bp->b_kvabase,
195118611Snjl		(vm_offset_t) bp->b_kvabase + bp->b_kvasize);
196118611Snjl
197118611Snjl	bp->b_kvasize = 0;
198118611Snjl
199118611Snjl}
200118611Snjl
201118611Snjl/*
202118611Snjl * remove the buffer from the appropriate free list
203118611Snjl */
204151937Sjkimvoid
205118611Snjlbremfree(struct buf * bp)
206118611Snjl{
207118611Snjl	int s = splbio();
208118611Snjl
209118611Snjl	if (bp->b_qindex != QUEUE_NONE) {
210118611Snjl		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
211118611Snjl		bp->b_qindex = QUEUE_NONE;
212118611Snjl	} else {
213118611Snjl		panic("bremfree: removing a buffer when not on a queue");
214118611Snjl	}
215118611Snjl	splx(s);
216118611Snjl}
217118611Snjl
218193529Sjkim/*
219118611Snjl * Get a buffer with the specified data.  Look in the cache first.
220193529Sjkim */
221118611Snjlint
222118611Snjlbread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
223118611Snjl    struct buf ** bpp)
224118611Snjl{
225118611Snjl	struct buf *bp;
226118611Snjl
227118611Snjl	bp = getblk(vp, blkno, size, 0, 0);
228118611Snjl	*bpp = bp;
229167802Sjkim
230118611Snjl	/* if not found in cache, do some I/O */
231118611Snjl	if ((bp->b_flags & B_CACHE) == 0) {
232118611Snjl		if (curproc != NULL)
233118611Snjl			curproc->p_stats->p_ru.ru_inblock++;
234118611Snjl		bp->b_flags |= B_READ;
235118611Snjl		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
236241973Sjkim		if (bp->b_rcred == NOCRED) {
237118611Snjl			if (cred != NOCRED)
238118611Snjl				crhold(cred);
239118611Snjl			bp->b_rcred = cred;
240118611Snjl		}
241118611Snjl		vfs_busy_pages(bp, 0);
242118611Snjl		VOP_STRATEGY(bp);
243118611Snjl		return (biowait(bp));
244118611Snjl	}
245118611Snjl	return (0);
246118611Snjl}
247118611Snjl
248118611Snjl/*
249118611Snjl * Operates like bread, but also starts asynchronous I/O on
250118611Snjl * read-ahead blocks.
251118611Snjl */
252118611Snjlint
253118611Snjlbreadn(struct vnode * vp, daddr_t blkno, int size,
254118611Snjl    daddr_t * rablkno, int *rabsize,
255118611Snjl    int cnt, struct ucred * cred, struct buf ** bpp)
256118611Snjl{
257241973Sjkim	struct buf *bp, *rabp;
258241973Sjkim	int i;
259241973Sjkim	int rv = 0, readwait = 0;
260241973Sjkim
261241973Sjkim	*bpp = bp = getblk(vp, blkno, size, 0, 0);
262118611Snjl
263118611Snjl	/* if not found in cache, do some I/O */
264118611Snjl	if ((bp->b_flags & B_CACHE) == 0) {
265118611Snjl		if (curproc != NULL)
266118611Snjl			curproc->p_stats->p_ru.ru_inblock++;
267118611Snjl		bp->b_flags |= B_READ;
268118611Snjl		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
269209746Sjkim		if (bp->b_rcred == NOCRED) {
270151937Sjkim			if (cred != NOCRED)
271118611Snjl				crhold(cred);
272118611Snjl			bp->b_rcred = cred;
273118611Snjl		}
274118611Snjl		vfs_busy_pages(bp, 0);
275118611Snjl		VOP_STRATEGY(bp);
276118611Snjl		++readwait;
277118611Snjl	}
278118611Snjl	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
279118611Snjl		if (inmem(vp, *rablkno))
280118611Snjl			continue;
281118611Snjl		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
282118611Snjl
283118611Snjl		if ((rabp->b_flags & B_CACHE) == 0) {
284118611Snjl			if (curproc != NULL)
285118611Snjl				curproc->p_stats->p_ru.ru_inblock++;
286118611Snjl			rabp->b_flags |= B_READ | B_ASYNC;
287118611Snjl			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
288118611Snjl			if (rabp->b_rcred == NOCRED) {
289118611Snjl				if (cred != NOCRED)
290118611Snjl					crhold(cred);
291118611Snjl				rabp->b_rcred = cred;
292118611Snjl			}
293118611Snjl			vfs_busy_pages(rabp, 0);
294118611Snjl			VOP_STRATEGY(rabp);
295151937Sjkim		} else {
296151937Sjkim			brelse(rabp);
297118611Snjl		}
298118611Snjl	}
299118611Snjl
300118611Snjl	if (readwait) {
301167802Sjkim		rv = biowait(bp);
302167802Sjkim	}
303118611Snjl	return (rv);
304118611Snjl}
305118611Snjl
306118611Snjl/*
307118611Snjl * Write, release buffer on completion.  (Done by iodone
308245582Sjkim * if async.)
309118611Snjl */
310118611Snjlint
311151937Sjkimbwrite(struct buf * bp)
312151937Sjkim{
313151937Sjkim	int oldflags = bp->b_flags;
314151937Sjkim
315118611Snjl	if (bp->b_flags & B_INVAL) {
316118611Snjl		brelse(bp);
317118611Snjl		return (0);
318118611Snjl	}
319118611Snjl	if (!(bp->b_flags & B_BUSY))
320118611Snjl		panic("bwrite: buffer is not busy???");
321118611Snjl
322151937Sjkim	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
323151937Sjkim	bp->b_flags |= B_WRITEINPROG;
324151937Sjkim
325118611Snjl	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
326118611Snjl		reassignbuf(bp, bp->b_vp);
327138287Smarks	}
328138287Smarks
329138287Smarks	bp->b_vp->v_numoutput++;
330138287Smarks	vfs_busy_pages(bp, 1);
331138287Smarks	if (curproc != NULL)
332245582Sjkim		curproc->p_stats->p_ru.ru_oublock++;
333138287Smarks	VOP_STRATEGY(bp);
334138287Smarks
335138287Smarks	/*
336118611Snjl	 * Handle ordered writes here.
337118611Snjl	 * If the write was originally flagged as ordered,
338151937Sjkim	 * then we check to see if it was converted to async.
339151937Sjkim	 * If it was converted to async, and is done now, then
340151937Sjkim	 * we release the buffer.  Otherwise we clear the
341151937Sjkim	 * ordered flag because it is not needed anymore.
342151937Sjkim	 *
343151937Sjkim 	 * Note that biodone has been modified so that it does
344151937Sjkim	 * not release ordered buffers.  This allows us to have
345118611Snjl	 * a chance to determine whether or not the driver
346118611Snjl	 * has set the async flag in the strategy routine.  Otherwise
347118611Snjl	 * if biodone was not modified, then the buffer may have been
348118611Snjl	 * reused before we have had a chance to check the flag.
349151937Sjkim	 */
350151937Sjkim
351151937Sjkim	if ((oldflags & B_ORDERED) == B_ORDERED) {
352118611Snjl		int s;
353118611Snjl		s = splbio();
354118611Snjl		if (bp->b_flags & B_ASYNC)  {
355118611Snjl			if ((bp->b_flags & B_DONE)) {
356151937Sjkim				if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
357151937Sjkim					brelse(bp);
358167802Sjkim				else
359118611Snjl					bqrelse(bp);
360118611Snjl			}
361118611Snjl			splx(s);
362118611Snjl			return (0);
363118611Snjl		} else {
364151937Sjkim			bp->b_flags &= ~B_ORDERED;
365167802Sjkim		}
366193529Sjkim		splx(s);
367167802Sjkim	}
368118611Snjl
369118611Snjl	if ((oldflags & B_ASYNC) == 0) {
370118611Snjl		int rtval = biowait(bp);
371118611Snjl
372118611Snjl		if (oldflags & B_DELWRI) {
373241973Sjkim			reassignbuf(bp, bp->b_vp);
374118611Snjl		}
375118611Snjl		brelse(bp);
376118611Snjl		return (rtval);
377118611Snjl	}
378118611Snjl	return (0);
379118611Snjl}
380118611Snjl
381118611Snjlint
382118611Snjlvn_bwrite(ap)
383118611Snjl	struct vop_bwrite_args *ap;
384118611Snjl{
385118611Snjl	return (bwrite(ap->a_bp));
386118611Snjl}
387118611Snjl
388118611Snjl/*
389118611Snjl * Delayed write. (Buffer is marked dirty).
390118611Snjl */
391118611Snjlvoid
392118611Snjlbdwrite(struct buf * bp)
393118611Snjl{
394118611Snjl
395118611Snjl	if ((bp->b_flags & B_BUSY) == 0) {
396118611Snjl		panic("bdwrite: buffer is not busy");
397118611Snjl	}
398118611Snjl	if (bp->b_flags & B_INVAL) {
399118611Snjl		brelse(bp);
400118611Snjl		return;
401118611Snjl	}
402118611Snjl	if (bp->b_flags & B_TAPE) {
403118611Snjl		bawrite(bp);
404118611Snjl		return;
405118611Snjl	}
406118611Snjl	bp->b_flags &= ~(B_READ|B_RELBUF);
407118611Snjl	if ((bp->b_flags & B_DELWRI) == 0) {
408118611Snjl		bp->b_flags |= B_DONE | B_DELWRI;
409118611Snjl		reassignbuf(bp, bp->b_vp);
410167802Sjkim	}
411118611Snjl
412118611Snjl	/*
413118611Snjl	 * This bmap keeps the system from needing to do the bmap later,
414118611Snjl	 * perhaps when the system is attempting to do a sync.  Since it
415118611Snjl	 * is likely that the indirect block -- or whatever other datastructure
416118611Snjl	 * that the filesystem needs is still in memory now, it is a good
417118611Snjl	 * thing to do this.  Note also, that if the pageout daemon is
418118611Snjl	 * requesting a sync -- there might not be enough memory to do
419118611Snjl	 * the bmap then...  So, this is important to do.
420118611Snjl	 */
421118611Snjl	if( bp->b_lblkno == bp->b_blkno) {
422118611Snjl		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
423118611Snjl	}
424118611Snjl
425118611Snjl	/*
426118611Snjl	 * Set the *dirty* buffer range based upon the VM system dirty pages.
427118611Snjl	 */
428118611Snjl	vfs_setdirty(bp);
429118611Snjl
430118611Snjl	/*
431118611Snjl	 * We need to do this here to satisfy the vnode_pager and the
432151937Sjkim	 * pageout daemon, so that it thinks that the pages have been
433118611Snjl	 * "cleaned".  Note that since the pages are in a delayed write
434118611Snjl	 * buffer -- the VFS layer "will" see that the pages get written
435118611Snjl	 * out on the next sync, or perhaps the cluster will be completed.
436118611Snjl	 */
437118611Snjl	vfs_clean_pages(bp);
438118611Snjl	bqrelse(bp);
439118611Snjl	return;
440118611Snjl}
441118611Snjl
442118611Snjl/*
443118611Snjl * Asynchronous write.
444118611Snjl * Start output on a buffer, but do not wait for it to complete.
445118611Snjl * The buffer is released when the output completes.
446167802Sjkim */
447118611Snjlvoid
448118611Snjlbawrite(struct buf * bp)
449118611Snjl{
450118611Snjl	bp->b_flags |= B_ASYNC;
451245582Sjkim	(void) VOP_BWRITE(bp);
452118611Snjl}
453118611Snjl
454118611Snjl/*
455241973Sjkim * Ordered write.
456118611Snjl * Start output on a buffer, but only wait for it to complete if the
457118611Snjl * output device cannot guarantee ordering in some other way.  Devices
458118611Snjl * that can perform asynchronous ordered writes will set the B_ASYNC
459118611Snjl * flag in their strategy routine.
460118611Snjl * The buffer is released when the output completes.
461118611Snjl */
462118611Snjlint
463118611Snjlbowrite(struct buf * bp)
464118611Snjl{
465151937Sjkim	bp->b_flags |= B_ORDERED;
466151937Sjkim	return (VOP_BWRITE(bp));
467118611Snjl}
468118611Snjl
469118611Snjl/*
470118611Snjl * Release a buffer.
471118611Snjl */
472241973Sjkimvoid
473118611Snjlbrelse(struct buf * bp)
474240716Sjkim{
475240716Sjkim	int s;
476118611Snjl
477240716Sjkim	if (bp->b_flags & B_CLUSTER) {
478118611Snjl		relpbuf(bp);
479118611Snjl		return;
480118611Snjl	}
481118611Snjl	/* anyone need a "free" block? */
482118611Snjl	s = splbio();
483118611Snjl
484118611Snjl	/* anyone need this block? */
485118611Snjl	if (bp->b_flags & B_WANTED) {
486118611Snjl		bp->b_flags &= ~(B_WANTED | B_AGE);
487118611Snjl		wakeup(bp);
488118611Snjl	}
489118611Snjl
490118611Snjl	if (bp->b_flags & B_LOCKED)
491118611Snjl		bp->b_flags &= ~B_ERROR;
492118611Snjl
493118611Snjl	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
494118611Snjl	    (bp->b_bufsize <= 0)) {
495118611Snjl		bp->b_flags |= B_INVAL;
496118611Snjl		bp->b_flags &= ~(B_DELWRI | B_CACHE);
497118611Snjl		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
498118611Snjl			if (bp->b_bufsize)
499151937Sjkim				allocbuf(bp, 0);
500151937Sjkim			brelvp(bp);
501118611Snjl		}
502118611Snjl	}
503118611Snjl
504118611Snjl	/*
505118611Snjl	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
506118611Snjl	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
507118611Snjl	 * but the VM object is kept around.  The B_NOCACHE flag is used to
508118611Snjl	 * invalidate the pages in the VM object.
509151937Sjkim	 *
510151937Sjkim	 * If the buffer is a partially filled NFS buffer, keep it
511118611Snjl	 * since invalidating it now will lose informatio.  The valid
512118611Snjl	 * flags in the vm_pages have only DEV_BSIZE resolution but
513118611Snjl	 * the b_validoff, b_validend fields have byte resolution.
514118611Snjl	 * This can avoid unnecessary re-reads of the buffer.
515167802Sjkim	 */
516118611Snjl	if ((bp->b_flags & B_VMIO)
517118611Snjl	    && (bp->b_vp->v_tag != VT_NFS
518118611Snjl		|| (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR))
519118611Snjl		|| bp->b_validend == 0
520118611Snjl		|| (bp->b_validoff == 0
521118611Snjl		    && bp->b_validend == bp->b_bufsize))) {
522118611Snjl		vm_ooffset_t foff;
523118611Snjl		vm_object_t obj;
524118611Snjl		int i, resid;
525118611Snjl		vm_page_t m;
526118611Snjl		struct vnode *vp;
527118611Snjl		int iototal = bp->b_bufsize;
528118611Snjl
529118611Snjl		vp = bp->b_vp;
530118611Snjl		if (!vp)
531118611Snjl			panic("brelse: missing vp");
532118611Snjl
533118611Snjl		if (bp->b_npages) {
534118611Snjl			vm_pindex_t poff;
535241973Sjkim			obj = (vm_object_t) vp->v_object;
536118611Snjl			if (vp->v_type == VBLK)
537118611Snjl				foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
538118611Snjl			else
539118611Snjl				foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
540118611Snjl			poff = OFF_TO_IDX(foff);
541118611Snjl			for (i = 0; i < bp->b_npages; i++) {
542118611Snjl				m = bp->b_pages[i];
543118611Snjl				if (m == bogus_page) {
544118611Snjl					m = vm_page_lookup(obj, poff + i);
545118611Snjl					if (!m) {
546118611Snjl						panic("brelse: page missing\n");
547118611Snjl					}
548118611Snjl					bp->b_pages[i] = m;
549118611Snjl					pmap_qenter(trunc_page(bp->b_data),
550118611Snjl						bp->b_pages, bp->b_npages);
551118611Snjl				}
552118611Snjl				resid = IDX_TO_OFF(m->pindex+1) - foff;
553118611Snjl				if (resid > iototal)
554118611Snjl					resid = iototal;
555118611Snjl				if (resid > 0) {
556118611Snjl					/*
557118611Snjl					 * Don't invalidate the page if the local machine has already
558118611Snjl					 * modified it.  This is the lesser of two evils, and should
559118611Snjl					 * be fixed.
560118611Snjl					 */
561118611Snjl					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
562167802Sjkim						vm_page_test_dirty(m);
563118611Snjl						if (m->dirty == 0) {
564118611Snjl							vm_page_set_invalid(m, (vm_offset_t) foff, resid);
565118611Snjl							if (m->valid == 0)
566118611Snjl								vm_page_protect(m, VM_PROT_NONE);
567118611Snjl						}
568118611Snjl					}
569118611Snjl					if (resid >= PAGE_SIZE) {
570118611Snjl						if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
571118611Snjl							bp->b_flags |= B_INVAL;
572118611Snjl						}
573118611Snjl					} else {
574138287Smarks						if (!vm_page_is_valid(m,
575118611Snjl							(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
576118611Snjl							bp->b_flags |= B_INVAL;
577118611Snjl						}
578118611Snjl					}
579118611Snjl				}
580118611Snjl				foff += resid;
581118611Snjl				iototal -= resid;
582118611Snjl			}
583118611Snjl		}
584118611Snjl		if (bp->b_flags & (B_INVAL | B_RELBUF))
585118611Snjl			vfs_vmio_release(bp);
586118611Snjl	}
587118611Snjl	if (bp->b_qindex != QUEUE_NONE)
588118611Snjl		panic("brelse: free buffer onto another queue???");
589118611Snjl
590118611Snjl	/* enqueue */
591118611Snjl	/* buffers with no memory */
592118611Snjl	if (bp->b_bufsize == 0) {
593118611Snjl		bp->b_qindex = QUEUE_EMPTY;
594118611Snjl		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
595118611Snjl		LIST_REMOVE(bp, b_hash);
596118611Snjl		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
597118611Snjl		bp->b_dev = NODEV;
598118611Snjl		/*
599118611Snjl		 * Get rid of the kva allocation *now*
600118611Snjl		 */
601118611Snjl		bfreekva(bp);
602118611Snjl		if (needsbuffer) {
603118611Snjl			wakeup(&needsbuffer);
604118611Snjl			needsbuffer=0;
605118611Snjl		}
606118611Snjl		/* buffers with junk contents */
607118611Snjl	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
608118611Snjl		bp->b_qindex = QUEUE_AGE;
609118611Snjl		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
610241973Sjkim		LIST_REMOVE(bp, b_hash);
611118611Snjl		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
612118611Snjl		bp->b_dev = NODEV;
613118611Snjl		if (needsbuffer) {
614118611Snjl			wakeup(&needsbuffer);
615118611Snjl			needsbuffer=0;
616118611Snjl		}
617118611Snjl		/* buffers that are locked */
618118611Snjl	} else if (bp->b_flags & B_LOCKED) {
619118611Snjl		bp->b_qindex = QUEUE_LOCKED;
620118611Snjl		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
621118611Snjl		/* buffers with stale but valid contents */
622118611Snjl	} else if (bp->b_flags & B_AGE) {
623118611Snjl		bp->b_qindex = QUEUE_AGE;
624118611Snjl		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
625151937Sjkim		if (needsbuffer) {
626240716Sjkim			wakeup(&needsbuffer);
627240716Sjkim			needsbuffer=0;
628240716Sjkim		}
629240716Sjkim		/* buffers with valid and quite potentially reuseable contents */
630151937Sjkim	} else {
631240716Sjkim		bp->b_qindex = QUEUE_LRU;
632240716Sjkim		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
633118611Snjl		if (needsbuffer) {
634240716Sjkim			wakeup(&needsbuffer);
635240716Sjkim			needsbuffer=0;
636240716Sjkim		}
637240716Sjkim	}
638240716Sjkim
639240716Sjkim	/* unlock */
640240716Sjkim	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
641240716Sjkim				B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
642118611Snjl	splx(s);
643118611Snjl}
644118611Snjl
645118611Snjl/*
646118611Snjl * Release a buffer.
647118611Snjl */
648118611Snjlvoid
649118611Snjlbqrelse(struct buf * bp)
650118611Snjl{
651118611Snjl	int s;
652118611Snjl
653118611Snjl	s = splbio();
654118611Snjl
655118611Snjl
656118611Snjl	/* anyone need this block? */
657118611Snjl	if (bp->b_flags & B_WANTED) {
658118611Snjl		bp->b_flags &= ~(B_WANTED | B_AGE);
659118611Snjl		wakeup(bp);
660118611Snjl	}
661151937Sjkim
662151937Sjkim	if (bp->b_qindex != QUEUE_NONE)
663118611Snjl		panic("bqrelse: free buffer onto another queue???");
664118611Snjl
665118611Snjl	if (bp->b_flags & B_LOCKED) {
666118611Snjl		bp->b_flags &= ~B_ERROR;
667118611Snjl		bp->b_qindex = QUEUE_LOCKED;
668118611Snjl		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
669118611Snjl		/* buffers with stale but valid contents */
670118611Snjl	} else {
671118611Snjl		bp->b_qindex = QUEUE_LRU;
672118611Snjl		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
673151937Sjkim		if (needsbuffer) {
674151937Sjkim			wakeup(&needsbuffer);
675118611Snjl			needsbuffer=0;
676118611Snjl		}
677118611Snjl	}
678118611Snjl
679118611Snjl	/* unlock */
680118611Snjl	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
681118611Snjl		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
682118611Snjl	splx(s);
683118611Snjl}
684118611Snjl
685151937Sjkimstatic void
686151937Sjkimvfs_vmio_release(bp)
687118611Snjl	struct buf *bp;
688118611Snjl{
689118611Snjl	int i;
690118611Snjl	vm_page_t m;
691167802Sjkim
692193529Sjkim	for (i = 0; i < bp->b_npages; i++) {
693193529Sjkim		m = bp->b_pages[i];
694118611Snjl		bp->b_pages[i] = NULL;
695167802Sjkim		vm_page_unwire(m);
696118611Snjl		/*
697118611Snjl		 * We don't mess with busy pages, it is
698118611Snjl		 * the responsibility of the process that
699118611Snjl		 * busied the pages to deal with them.
700118611Snjl		 */
701118611Snjl		if ((m->flags & PG_BUSY) || (m->busy != 0))
702118611Snjl			continue;
703118611Snjl
704118611Snjl		if (m->wire_count == 0) {
705118611Snjl
706118611Snjl			if (m->flags & PG_WANTED) {
707118611Snjl				m->flags &= ~PG_WANTED;
708118611Snjl				wakeup(m);
709118611Snjl			}
710118611Snjl
711118611Snjl			/*
712118611Snjl			 * If this is an async free -- we cannot place
713118611Snjl			 * pages onto the cache queue.  If it is an
714118611Snjl			 * async free, then we don't modify any queues.
715118611Snjl			 * This is probably in error (for perf reasons),
716118611Snjl			 * and we will eventually need to build
717118611Snjl			 * a more complete infrastructure to support I/O
718118611Snjl			 * rundown.
719118611Snjl			 */
720118611Snjl			if ((bp->b_flags & B_ASYNC) == 0) {
721118611Snjl
722118611Snjl			/*
723118611Snjl			 * In the case of sync buffer frees, we can do pretty much
724118611Snjl			 * anything to any of the memory queues.  Specifically,
725118611Snjl			 * the cache queue is okay to be modified.
726118611Snjl			 */
727118611Snjl				if (m->valid) {
728118611Snjl					if(m->dirty == 0)
729118611Snjl						vm_page_test_dirty(m);
730118611Snjl					/*
731118611Snjl					 * this keeps pressure off of the process memory
732118611Snjl					 */
733118611Snjl					if (m->dirty == 0 && m->hold_count == 0)
734118611Snjl						vm_page_cache(m);
735118611Snjl					else
736118611Snjl						vm_page_deactivate(m);
737118611Snjl				} else if (m->hold_count == 0) {
738118611Snjl					vm_page_protect(m, VM_PROT_NONE);
739118611Snjl					vm_page_free(m);
740118611Snjl				}
741118611Snjl			} else {
742118611Snjl				/*
743241973Sjkim				 * If async, then at least we clear the
744118611Snjl				 * act_count.
745118611Snjl				 */
746118611Snjl				m->act_count = 0;
747118611Snjl			}
748118611Snjl		}
749118611Snjl	}
750167802Sjkim	bufspace -= bp->b_bufsize;
751193529Sjkim	vmiospace -= bp->b_bufsize;
752118611Snjl	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
753118611Snjl	bp->b_npages = 0;
754118611Snjl	bp->b_bufsize = 0;
755118611Snjl	bp->b_flags &= ~B_VMIO;
756118611Snjl	if (bp->b_vp)
757118611Snjl		brelvp(bp);
758118611Snjl}
759118611Snjl
760118611Snjl/*
761118611Snjl * Check to see if a block is currently memory resident.
762118611Snjl */
763118611Snjlstruct buf *
764118611Snjlgbincore(struct vnode * vp, daddr_t blkno)
765118611Snjl{
766118611Snjl	struct buf *bp;
767118611Snjl	struct bufhashhdr *bh;
768118611Snjl
769118611Snjl	bh = BUFHASH(vp, blkno);
770118611Snjl	bp = bh->lh_first;
771118611Snjl
772118611Snjl	/* Search hash chain */
773118611Snjl	while (bp != NULL) {
774118611Snjl		/* hit */
775118611Snjl		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
776118611Snjl		    (bp->b_flags & B_INVAL) == 0) {
777118611Snjl			break;
778118611Snjl		}
779118611Snjl		bp = bp->b_hash.le_next;
780118611Snjl	}
781118611Snjl	return (bp);
782118611Snjl}
783118611Snjl
784118611Snjl/*
785118611Snjl * this routine implements clustered async writes for
786118611Snjl * clearing out B_DELWRI buffers...  This is much better
787118611Snjl * than the old way of writing only one buffer at a time.
788118611Snjl */
789118611Snjlint
790118611Snjlvfs_bio_awrite(struct buf * bp)
791118611Snjl{
792118611Snjl	int i;
793118611Snjl	daddr_t lblkno = bp->b_lblkno;
794118611Snjl	struct vnode *vp = bp->b_vp;
795118611Snjl	int s;
796118611Snjl	int ncl;
797118611Snjl	struct buf *bpa;
798118611Snjl	int nwritten;
799167802Sjkim
800167802Sjkim	s = splbio();
801118611Snjl	/*
802118611Snjl	 * right now we support clustered writing only to regular files
803118611Snjl	 */
804118611Snjl	if ((vp->v_type == VREG) &&
805	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
806	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
807		int size;
808		int maxcl;
809
810		size = vp->v_mount->mnt_stat.f_iosize;
811		maxcl = MAXPHYS / size;
812
813		for (i = 1; i < maxcl; i++) {
814			if ((bpa = gbincore(vp, lblkno + i)) &&
815			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
816			    (B_DELWRI | B_CLUSTEROK)) &&
817			    (bpa->b_bufsize == size)) {
818				if ((bpa->b_blkno == bpa->b_lblkno) ||
819				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
820					break;
821			} else {
822				break;
823			}
824		}
825		ncl = i;
826		/*
827		 * this is a possible cluster write
828		 */
829		if (ncl != 1) {
830			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
831			splx(s);
832			return nwritten;
833		}
834	}
835	bremfree(bp);
836	splx(s);
837	/*
838	 * default (old) behavior, writing out only one block
839	 */
840	bp->b_flags |= B_BUSY | B_ASYNC;
841	nwritten = bp->b_bufsize;
842	(void) VOP_BWRITE(bp);
843	return nwritten;
844}
845
846
847/*
848 * Find a buffer header which is available for use.
849 */
850static struct buf *
851getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
852{
853	struct buf *bp;
854	int nbyteswritten = 0;
855	vm_offset_t addr;
856
857start:
858	if (bufspace >= maxbufspace)
859		goto trytofreespace;
860
861	/* can we constitute a new buffer? */
862	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
863		if (bp->b_qindex != QUEUE_EMPTY)
864			panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
865			    bp->b_qindex);
866		bp->b_flags |= B_BUSY;
867		bremfree(bp);
868		goto fillbuf;
869	}
870trytofreespace:
871	/*
872	 * We keep the file I/O from hogging metadata I/O
873	 * This is desirable because file data is cached in the
874	 * VM/Buffer cache even if a buffer is freed.
875	 */
876	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
877		if (bp->b_qindex != QUEUE_AGE)
878			panic("getnewbuf: inconsistent AGE queue, qindex=%d",
879			    bp->b_qindex);
880	} else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
881		if (bp->b_qindex != QUEUE_LRU)
882			panic("getnewbuf: inconsistent LRU queue, qindex=%d",
883			    bp->b_qindex);
884	}
885	if (!bp) {
886		/* wait for a free buffer of any kind */
887		needsbuffer = 1;
888		tsleep(&needsbuffer,
889			(PRIBIO + 1) | slpflag, "newbuf", slptimeo);
890		return (0);
891	}
892
893#if defined(DIAGNOSTIC)
894	if (bp->b_flags & B_BUSY) {
895		panic("getnewbuf: busy buffer on free list\n");
896	}
897#endif
898
899	/*
900	 * We are fairly aggressive about freeing VMIO buffers, but since
901	 * the buffering is intact without buffer headers, there is not
902	 * much loss.  We gain by maintaining non-VMIOed metadata in buffers.
903	 */
904	if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
905		if ((bp->b_flags & B_VMIO) == 0 ||
906			(vmiospace < maxvmiobufspace)) {
907			--bp->b_usecount;
908			TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
909			if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
910				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
911				goto start;
912			}
913			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
914		}
915	}
916
917	/* if we are a delayed write, convert to an async write */
918	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
919		nbyteswritten += vfs_bio_awrite(bp);
920		if (!slpflag && !slptimeo) {
921			return (0);
922		}
923		goto start;
924	}
925
926	if (bp->b_flags & B_WANTED) {
927		bp->b_flags &= ~B_WANTED;
928		wakeup(bp);
929	}
930	bremfree(bp);
931	bp->b_flags |= B_BUSY;
932
933	if (bp->b_flags & B_VMIO) {
934		bp->b_flags &= ~B_ASYNC;
935		vfs_vmio_release(bp);
936	}
937
938	if (bp->b_vp)
939		brelvp(bp);
940
941fillbuf:
942	/* we are not free, nor do we contain interesting data */
943	if (bp->b_rcred != NOCRED) {
944		crfree(bp->b_rcred);
945		bp->b_rcred = NOCRED;
946	}
947	if (bp->b_wcred != NOCRED) {
948		crfree(bp->b_wcred);
949		bp->b_wcred = NOCRED;
950	}
951
952	LIST_REMOVE(bp, b_hash);
953	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
954	if (bp->b_bufsize) {
955		allocbuf(bp, 0);
956	}
957	bp->b_flags = B_BUSY;
958	bp->b_dev = NODEV;
959	bp->b_vp = NULL;
960	bp->b_blkno = bp->b_lblkno = 0;
961	bp->b_iodone = 0;
962	bp->b_error = 0;
963	bp->b_resid = 0;
964	bp->b_bcount = 0;
965	bp->b_npages = 0;
966	bp->b_dirtyoff = bp->b_dirtyend = 0;
967	bp->b_validoff = bp->b_validend = 0;
968	bp->b_usecount = 4;
969
970	maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
971
972	/*
973	 * we assume that buffer_map is not at address 0
974	 */
975	addr = 0;
976	if (maxsize != bp->b_kvasize) {
977		bfreekva(bp);
978
979		/*
980		 * See if we have buffer kva space
981		 */
982		if (vm_map_findspace(buffer_map,
983			vm_map_min(buffer_map), maxsize, &addr)) {
984			bp->b_flags |= B_INVAL;
985			brelse(bp);
986			goto trytofreespace;
987		}
988	}
989
990	/*
991	 * See if we are below are allocated minimum
992	 */
993	if (bufspace >= (maxbufspace + nbyteswritten)) {
994		bp->b_flags |= B_INVAL;
995		brelse(bp);
996		goto trytofreespace;
997	}
998
999	/*
1000	 * create a map entry for the buffer -- in essence
1001	 * reserving the kva space.
1002	 */
1003	if (addr) {
1004		vm_map_insert(buffer_map, NULL, 0,
1005			addr, addr + maxsize,
1006			VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1007
1008		bp->b_kvabase = (caddr_t) addr;
1009		bp->b_kvasize = maxsize;
1010	}
1011	bp->b_data = bp->b_kvabase;
1012
1013	return (bp);
1014}
1015
1016/*
1017 * Check to see if a block is currently memory resident.
1018 */
1019struct buf *
1020incore(struct vnode * vp, daddr_t blkno)
1021{
1022	struct buf *bp;
1023
1024	int s = splbio();
1025	bp = gbincore(vp, blkno);
1026	splx(s);
1027	return (bp);
1028}
1029
1030/*
1031 * Returns true if no I/O is needed to access the
1032 * associated VM object.  This is like incore except
1033 * it also hunts around in the VM system for the data.
1034 */
1035
1036int
1037inmem(struct vnode * vp, daddr_t blkno)
1038{
1039	vm_object_t obj;
1040	vm_offset_t toff, tinc;
1041	vm_page_t m;
1042	vm_ooffset_t off;
1043
1044	if (incore(vp, blkno))
1045		return 1;
1046	if (vp->v_mount == NULL)
1047		return 0;
1048	if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0)
1049		return 0;
1050
1051	obj = vp->v_object;
1052	tinc = PAGE_SIZE;
1053	if (tinc > vp->v_mount->mnt_stat.f_iosize)
1054		tinc = vp->v_mount->mnt_stat.f_iosize;
1055	off = blkno * vp->v_mount->mnt_stat.f_iosize;
1056
1057	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1058
1059		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1060		if (!m)
1061			return 0;
1062		if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
1063			return 0;
1064	}
1065	return 1;
1066}
1067
1068/*
1069 * now we set the dirty range for the buffer --
1070 * for NFS -- if the file is mapped and pages have
1071 * been written to, let it know.  We want the
1072 * entire range of the buffer to be marked dirty if
1073 * any of the pages have been written to for consistancy
1074 * with the b_validoff, b_validend set in the nfs write
1075 * code, and used by the nfs read code.
1076 */
1077static void
1078vfs_setdirty(struct buf *bp) {
1079	int i;
1080	vm_object_t object;
1081	vm_offset_t boffset, offset;
1082	/*
1083	 * We qualify the scan for modified pages on whether the
1084	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1085	 * is not cleared simply by protecting pages off.
1086	 */
1087	if ((bp->b_flags & B_VMIO) &&
1088		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
1089		/*
1090		 * test the pages to see if they have been modified directly
1091		 * by users through the VM system.
1092		 */
1093		for (i = 0; i < bp->b_npages; i++)
1094			vm_page_test_dirty(bp->b_pages[i]);
1095
1096		/*
1097		 * scan forwards for the first page modified
1098		 */
1099		for (i = 0; i < bp->b_npages; i++) {
1100			if (bp->b_pages[i]->dirty) {
1101				break;
1102			}
1103		}
1104		boffset = (i << PAGE_SHIFT);
1105		if (boffset < bp->b_dirtyoff) {
1106			bp->b_dirtyoff = boffset;
1107		}
1108
1109		/*
1110		 * scan backwards for the last page modified
1111		 */
1112		for (i = bp->b_npages - 1; i >= 0; --i) {
1113			if (bp->b_pages[i]->dirty) {
1114				break;
1115			}
1116		}
1117		boffset = (i + 1);
1118		offset = boffset + bp->b_pages[0]->pindex;
1119		if (offset >= object->size)
1120			boffset = object->size - bp->b_pages[0]->pindex;
1121		if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
1122			bp->b_dirtyend = (boffset << PAGE_SHIFT);
1123	}
1124}
1125
1126/*
1127 * Get a block given a specified block and offset into a file/device.
1128 */
1129struct buf *
1130getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1131{
1132	struct buf *bp;
1133	int s;
1134	struct bufhashhdr *bh;
1135	int maxsize;
1136
1137	if (vp->v_mount) {
1138		maxsize = vp->v_mount->mnt_stat.f_iosize;
1139		/*
1140		 * This happens on mount points.
1141		 */
1142		if (maxsize < size)
1143			maxsize = size;
1144	} else {
1145		maxsize = size;
1146	}
1147
1148	if (size > MAXBSIZE)
1149		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1150
1151	s = splbio();
1152loop:
1153	if ((bp = gbincore(vp, blkno))) {
1154		if (bp->b_flags & B_BUSY) {
1155			bp->b_flags |= B_WANTED;
1156			if (bp->b_usecount < BUF_MAXUSE)
1157				++bp->b_usecount;
1158			if (!tsleep(bp,
1159				(PRIBIO + 1) | slpflag, "getblk", slptimeo))
1160				goto loop;
1161
1162			splx(s);
1163			return (struct buf *) NULL;
1164		}
1165		bp->b_flags |= B_BUSY | B_CACHE;
1166		bremfree(bp);
1167
1168		/*
1169		 * check for size inconsistancies (note that they shouldn't happen
1170		 * but do when filesystems don't handle the size changes correctly.)
1171		 * We are conservative on metadata and don't just extend the buffer
1172		 * but write and re-constitute it.
1173		 */
1174
1175		if (bp->b_bcount != size) {
1176			if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
1177				allocbuf(bp, size);
1178			} else {
1179				bp->b_flags |= B_NOCACHE;
1180				VOP_BWRITE(bp);
1181				goto loop;
1182			}
1183		}
1184
1185		if (bp->b_usecount < BUF_MAXUSE)
1186			++bp->b_usecount;
1187		splx(s);
1188		return (bp);
1189	} else {
1190		vm_object_t obj;
1191
1192		if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == 0) {
1193			if (slpflag || slptimeo) {
1194				splx(s);
1195				return NULL;
1196			}
1197			goto loop;
1198		}
1199
1200		/*
1201		 * This code is used to make sure that a buffer is not
1202		 * created while the getnewbuf routine is blocked.
1203		 * Normally the vnode is locked so this isn't a problem.
1204		 * VBLK type I/O requests, however, don't lock the vnode.
1205		 */
1206		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
1207			bp->b_flags |= B_INVAL;
1208			brelse(bp);
1209			goto loop;
1210		}
1211
1212		/*
1213		 * Insert the buffer into the hash, so that it can
1214		 * be found by incore.
1215		 */
1216		bp->b_blkno = bp->b_lblkno = blkno;
1217		bgetvp(vp, bp);
1218		LIST_REMOVE(bp, b_hash);
1219		bh = BUFHASH(vp, blkno);
1220		LIST_INSERT_HEAD(bh, bp, b_hash);
1221
1222		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
1223			bp->b_flags |= (B_VMIO | B_CACHE);
1224#if defined(VFS_BIO_DEBUG)
1225			if (vp->v_type != VREG && vp->v_type != VBLK)
1226				printf("getblk: vmioing file type %d???\n", vp->v_type);
1227#endif
1228		} else {
1229			bp->b_flags &= ~B_VMIO;
1230		}
1231		splx(s);
1232
1233		allocbuf(bp, size);
1234#ifdef	PC98
1235		/*
1236		 * 1024byte/sector support
1237		 */
1238#define B_XXX2 0x8000000
1239		if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2;
1240#endif
1241		return (bp);
1242	}
1243}
1244
1245/*
1246 * Get an empty, disassociated buffer of given size.
1247 */
1248struct buf *
1249geteblk(int size)
1250{
1251	struct buf *bp;
1252	int s;
1253
1254	s = splbio();
1255	while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0);
1256	splx(s);
1257	allocbuf(bp, size);
1258	bp->b_flags |= B_INVAL;
1259	return (bp);
1260}
1261
1262
1263/*
1264 * This code constitutes the buffer memory from either anonymous system
1265 * memory (in the case of non-VMIO operations) or from an associated
1266 * VM object (in the case of VMIO operations).
1267 *
1268 * Note that this code is tricky, and has many complications to resolve
1269 * deadlock or inconsistant data situations.  Tread lightly!!!
1270 *
1271 * Modify the length of a buffer's underlying buffer storage without
1272 * destroying information (unless, of course the buffer is shrinking).
1273 */
1274int
1275allocbuf(struct buf * bp, int size)
1276{
1277
1278	int s;
1279	int newbsize, mbsize;
1280	int i;
1281
1282	if (!(bp->b_flags & B_BUSY))
1283		panic("allocbuf: buffer not busy");
1284
1285	if (bp->b_kvasize < size)
1286		panic("allocbuf: buffer too small");
1287
1288	if ((bp->b_flags & B_VMIO) == 0) {
1289		caddr_t origbuf;
1290		int origbufsize;
1291		/*
1292		 * Just get anonymous memory from the kernel
1293		 */
1294		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1295#if !defined(NO_B_MALLOC)
1296		if (bp->b_flags & B_MALLOC)
1297			newbsize = mbsize;
1298		else
1299#endif
1300			newbsize = round_page(size);
1301
1302		if (newbsize < bp->b_bufsize) {
1303#if !defined(NO_B_MALLOC)
1304			/*
1305			 * malloced buffers are not shrunk
1306			 */
1307			if (bp->b_flags & B_MALLOC) {
1308				if (newbsize) {
1309					bp->b_bcount = size;
1310				} else {
1311					free(bp->b_data, M_BIOBUF);
1312					bufspace -= bp->b_bufsize;
1313					bufmallocspace -= bp->b_bufsize;
1314					bp->b_data = bp->b_kvabase;
1315					bp->b_bufsize = 0;
1316					bp->b_bcount = 0;
1317					bp->b_flags &= ~B_MALLOC;
1318				}
1319				return 1;
1320			}
1321#endif
1322			vm_hold_free_pages(
1323			    bp,
1324			    (vm_offset_t) bp->b_data + newbsize,
1325			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1326		} else if (newbsize > bp->b_bufsize) {
1327#if !defined(NO_B_MALLOC)
1328			/*
1329			 * We only use malloced memory on the first allocation.
1330			 * and revert to page-allocated memory when the buffer grows.
1331			 */
1332			if ( (bufmallocspace < maxbufmallocspace) &&
1333				(bp->b_bufsize == 0) &&
1334				(mbsize <= PAGE_SIZE/2)) {
1335
1336				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
1337				bp->b_bufsize = mbsize;
1338				bp->b_bcount = size;
1339				bp->b_flags |= B_MALLOC;
1340				bufspace += mbsize;
1341				bufmallocspace += mbsize;
1342				return 1;
1343			}
1344#endif
1345			origbuf = NULL;
1346			origbufsize = 0;
1347#if !defined(NO_B_MALLOC)
1348			/*
1349			 * If the buffer is growing on it's other-than-first allocation,
1350			 * then we revert to the page-allocation scheme.
1351			 */
1352			if (bp->b_flags & B_MALLOC) {
1353				origbuf = bp->b_data;
1354				origbufsize = bp->b_bufsize;
1355				bp->b_data = bp->b_kvabase;
1356				bufspace -= bp->b_bufsize;
1357				bufmallocspace -= bp->b_bufsize;
1358				bp->b_bufsize = 0;
1359				bp->b_flags &= ~B_MALLOC;
1360				newbsize = round_page(newbsize);
1361			}
1362#endif
1363			vm_hold_load_pages(
1364			    bp,
1365			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1366			    (vm_offset_t) bp->b_data + newbsize);
1367#if !defined(NO_B_MALLOC)
1368			if (origbuf) {
1369				bcopy(origbuf, bp->b_data, origbufsize);
1370				free(origbuf, M_BIOBUF);
1371			}
1372#endif
1373		}
1374	} else {
1375		vm_page_t m;
1376		int desiredpages;
1377
1378		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1379		desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
1380
1381#if !defined(NO_B_MALLOC)
1382		if (bp->b_flags & B_MALLOC)
1383			panic("allocbuf: VMIO buffer can't be malloced");
1384#endif
1385
1386		if (newbsize < bp->b_bufsize) {
1387			if (desiredpages < bp->b_npages) {
1388				for (i = desiredpages; i < bp->b_npages; i++) {
1389					/*
1390					 * the page is not freed here -- it
1391					 * is the responsibility of vnode_pager_setsize
1392					 */
1393					m = bp->b_pages[i];
1394#if defined(DIAGNOSTIC)
1395					if (m == bogus_page)
1396						panic("allocbuf: bogus page found");
1397#endif
1398					s = splvm();
1399					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1400						m->flags |= PG_WANTED;
1401						tsleep(m, PVM, "biodep", 0);
1402					}
1403					splx(s);
1404
1405					bp->b_pages[i] = NULL;
1406					vm_page_unwire(m);
1407				}
1408				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1409				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
1410				bp->b_npages = desiredpages;
1411			}
1412		} else if (newbsize > bp->b_bufsize) {
1413			vm_object_t obj;
1414			vm_offset_t tinc, toff;
1415			vm_ooffset_t off;
1416			vm_pindex_t objoff;
1417			int pageindex, curbpnpages;
1418			struct vnode *vp;
1419			int bsize;
1420
1421			vp = bp->b_vp;
1422
1423			if (vp->v_type == VBLK)
1424				bsize = DEV_BSIZE;
1425			else
1426				bsize = vp->v_mount->mnt_stat.f_iosize;
1427
1428			if (bp->b_npages < desiredpages) {
1429				obj = vp->v_object;
1430				tinc = PAGE_SIZE;
1431				if (tinc > bsize)
1432					tinc = bsize;
1433				off = (vm_ooffset_t) bp->b_lblkno * bsize;
1434				curbpnpages = bp->b_npages;
1435		doretry:
1436				bp->b_flags |= B_CACHE;
1437				bp->b_validoff = bp->b_validend = 0;
1438				for (toff = 0; toff < newbsize; toff += tinc) {
1439					int bytesinpage;
1440
1441					pageindex = toff >> PAGE_SHIFT;
1442					objoff = OFF_TO_IDX(off + toff);
1443					if (pageindex < curbpnpages) {
1444
1445						m = bp->b_pages[pageindex];
1446#ifdef VFS_BIO_DIAG
1447						if (m->pindex != objoff)
1448							panic("allocbuf: page changed offset??!!!?");
1449#endif
1450						bytesinpage = tinc;
1451						if (tinc > (newbsize - toff))
1452							bytesinpage = newbsize - toff;
1453						if (bp->b_flags & B_CACHE)
1454							vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1455						continue;
1456					}
1457					m = vm_page_lookup(obj, objoff);
1458					if (!m) {
1459						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1460						if (!m) {
1461							VM_WAIT;
1462							goto doretry;
1463						}
1464						/*
1465						 * Normally it is unwise to clear PG_BUSY without
1466						 * PAGE_WAKEUP -- but it is okay here, as there is
1467						 * no chance for blocking between here and vm_page_alloc
1468						 */
1469						m->flags &= ~PG_BUSY;
1470						vm_page_wire(m);
1471						bp->b_flags &= ~B_CACHE;
1472					} else if (m->flags & PG_BUSY) {
1473						s = splvm();
1474						if (m->flags & PG_BUSY) {
1475							m->flags |= PG_WANTED;
1476							tsleep(m, PVM, "pgtblk", 0);
1477						}
1478						splx(s);
1479						goto doretry;
1480					} else {
1481						if ((curproc != pageproc) &&
1482							((m->queue - m->pc) == PQ_CACHE) &&
1483						    ((cnt.v_free_count + cnt.v_cache_count) <
1484								(cnt.v_free_min + cnt.v_cache_min))) {
1485							pagedaemon_wakeup();
1486						}
1487						bytesinpage = tinc;
1488						if (tinc > (newbsize - toff))
1489							bytesinpage = newbsize - toff;
1490						if (bp->b_flags & B_CACHE)
1491							vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1492						vm_page_wire(m);
1493					}
1494					bp->b_pages[pageindex] = m;
1495					curbpnpages = pageindex + 1;
1496				}
1497				if (vp->v_tag == VT_NFS && bp->b_validend == 0)
1498					bp->b_flags &= ~B_CACHE;
1499				bp->b_data = (caddr_t) trunc_page(bp->b_data);
1500				bp->b_npages = curbpnpages;
1501				pmap_qenter((vm_offset_t) bp->b_data,
1502					bp->b_pages, bp->b_npages);
1503				((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
1504			}
1505		}
1506	}
1507	if (bp->b_flags & B_VMIO)
1508		vmiospace += bp->b_bufsize;
1509	bufspace += (newbsize - bp->b_bufsize);
1510	bp->b_bufsize = newbsize;
1511	bp->b_bcount = size;
1512	return 1;
1513}
1514
1515/*
1516 * Wait for buffer I/O completion, returning error status.
1517 */
1518int
1519biowait(register struct buf * bp)
1520{
1521	int s;
1522
1523	s = splbio();
1524	while ((bp->b_flags & B_DONE) == 0)
1525		tsleep(bp, PRIBIO, "biowait", 0);
1526	splx(s);
1527	if (bp->b_flags & B_EINTR) {
1528		bp->b_flags &= ~B_EINTR;
1529		return (EINTR);
1530	}
1531	if (bp->b_flags & B_ERROR) {
1532		return (bp->b_error ? bp->b_error : EIO);
1533	} else {
1534		return (0);
1535	}
1536}
1537
1538/*
1539 * Finish I/O on a buffer, calling an optional function.
1540 * This is usually called from interrupt level, so process blocking
1541 * is not *a good idea*.
1542 */
1543void
1544biodone(register struct buf * bp)
1545{
1546	int s;
1547
1548	s = splbio();
1549	if (!(bp->b_flags & B_BUSY))
1550		panic("biodone: buffer not busy");
1551
1552	if (bp->b_flags & B_DONE) {
1553		splx(s);
1554		printf("biodone: buffer already done\n");
1555		return;
1556	}
1557	bp->b_flags |= B_DONE;
1558
1559	if ((bp->b_flags & B_READ) == 0) {
1560		vwakeup(bp);
1561	}
1562#ifdef BOUNCE_BUFFERS
1563	if (bp->b_flags & B_BOUNCE)
1564		vm_bounce_free(bp);
1565#endif
1566
1567	/* call optional completion function if requested */
1568	if (bp->b_flags & B_CALL) {
1569		bp->b_flags &= ~B_CALL;
1570		(*bp->b_iodone) (bp);
1571		splx(s);
1572		return;
1573	}
1574	if (bp->b_flags & B_VMIO) {
1575		int i, resid;
1576		vm_ooffset_t foff;
1577		vm_page_t m;
1578		vm_object_t obj;
1579		int iosize;
1580		struct vnode *vp = bp->b_vp;
1581
1582		if (vp->v_type == VBLK)
1583			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1584		else
1585			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1586		obj = vp->v_object;
1587		if (!obj) {
1588			panic("biodone: no object");
1589		}
1590#if defined(VFS_BIO_DEBUG)
1591		if (obj->paging_in_progress < bp->b_npages) {
1592			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1593			    obj->paging_in_progress, bp->b_npages);
1594		}
1595#endif
1596		iosize = bp->b_bufsize;
1597		for (i = 0; i < bp->b_npages; i++) {
1598			int bogusflag = 0;
1599			m = bp->b_pages[i];
1600			if (m == bogus_page) {
1601				bogusflag = 1;
1602				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
1603				if (!m) {
1604#if defined(VFS_BIO_DEBUG)
1605					printf("biodone: page disappeared\n");
1606#endif
1607					--obj->paging_in_progress;
1608					continue;
1609				}
1610				bp->b_pages[i] = m;
1611				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1612			}
1613#if defined(VFS_BIO_DEBUG)
1614			if (OFF_TO_IDX(foff) != m->pindex) {
1615				printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1616			}
1617#endif
1618			resid = IDX_TO_OFF(m->pindex + 1) - foff;
1619			if (resid > iosize)
1620				resid = iosize;
1621			/*
1622			 * In the write case, the valid and clean bits are
1623			 * already changed correctly, so we only need to do this
1624			 * here in the read case.
1625			 */
1626			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1627				vfs_page_set_valid(bp, foff, i, m);
1628			}
1629
1630			/*
1631			 * when debugging new filesystems or buffer I/O methods, this
1632			 * is the most common error that pops up.  if you see this, you
1633			 * have not set the page busy flag correctly!!!
1634			 */
1635			if (m->busy == 0) {
1636				printf("biodone: page busy < 0, "
1637				    "pindex: %d, foff: 0x(%x,%x), "
1638				    "resid: %d, index: %d\n",
1639				    (int) m->pindex, (int)(foff >> 32),
1640						(int) foff & 0xffffffff, resid, i);
1641				if (vp->v_type != VBLK)
1642					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
1643					    bp->b_vp->v_mount->mnt_stat.f_iosize,
1644					    (int) bp->b_lblkno,
1645					    bp->b_flags, bp->b_npages);
1646				else
1647					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1648					    (int) bp->b_lblkno,
1649					    bp->b_flags, bp->b_npages);
1650				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1651				    m->valid, m->dirty, m->wire_count);
1652				panic("biodone: page busy < 0\n");
1653			}
1654			--m->busy;
1655			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1656				m->flags &= ~PG_WANTED;
1657				wakeup(m);
1658			}
1659			--obj->paging_in_progress;
1660			foff += resid;
1661			iosize -= resid;
1662		}
1663		if (obj && obj->paging_in_progress == 0 &&
1664		    (obj->flags & OBJ_PIPWNT)) {
1665			obj->flags &= ~OBJ_PIPWNT;
1666			wakeup(obj);
1667		}
1668	}
1669	/*
1670	 * For asynchronous completions, release the buffer now. The brelse
1671	 * checks for B_WANTED and will do the wakeup there if necessary - so
1672	 * no need to do a wakeup here in the async case.
1673	 */
1674
1675	if (bp->b_flags & B_ASYNC) {
1676		if ((bp->b_flags & B_ORDERED) == 0) {
1677			if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
1678				brelse(bp);
1679			else
1680				bqrelse(bp);
1681		}
1682	} else {
1683		bp->b_flags &= ~B_WANTED;
1684		wakeup(bp);
1685	}
1686	splx(s);
1687}
1688
1689int
1690count_lock_queue()
1691{
1692	int count;
1693	struct buf *bp;
1694
1695	count = 0;
1696	for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]);
1697	    bp != NULL;
1698	    bp = TAILQ_NEXT(bp, b_freelist))
1699		count++;
1700	return (count);
1701}
1702
1703int vfs_update_interval = 30;
1704
1705static void
1706vfs_update()
1707{
1708	while (1) {
1709		tsleep(&vfs_update_wakeup, PUSER, "update",
1710		    hz * vfs_update_interval);
1711		vfs_update_wakeup = 0;
1712		sync(curproc, NULL, NULL);
1713	}
1714}
1715
1716static int
1717sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
1718{
1719	int error = sysctl_handle_int(oidp,
1720		oidp->oid_arg1, oidp->oid_arg2, req);
1721	if (!error)
1722		wakeup(&vfs_update_wakeup);
1723	return error;
1724}
1725
1726SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
1727	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
1728
1729
1730/*
1731 * This routine is called in lieu of iodone in the case of
1732 * incomplete I/O.  This keeps the busy status for pages
1733 * consistant.
1734 */
1735void
1736vfs_unbusy_pages(struct buf * bp)
1737{
1738	int i;
1739
1740	if (bp->b_flags & B_VMIO) {
1741		struct vnode *vp = bp->b_vp;
1742		vm_object_t obj = vp->v_object;
1743		vm_ooffset_t foff;
1744
1745		foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1746
1747		for (i = 0; i < bp->b_npages; i++) {
1748			vm_page_t m = bp->b_pages[i];
1749
1750			if (m == bogus_page) {
1751				m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1752				if (!m) {
1753					panic("vfs_unbusy_pages: page missing\n");
1754				}
1755				bp->b_pages[i] = m;
1756				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1757			}
1758			--obj->paging_in_progress;
1759			--m->busy;
1760			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1761				m->flags &= ~PG_WANTED;
1762				wakeup(m);
1763			}
1764		}
1765		if (obj->paging_in_progress == 0 &&
1766		    (obj->flags & OBJ_PIPWNT)) {
1767			obj->flags &= ~OBJ_PIPWNT;
1768			wakeup(obj);
1769		}
1770	}
1771}
1772
1773/*
1774 * Set NFS' b_validoff and b_validend fields from the valid bits
1775 * of a page.  If the consumer is not NFS, and the page is not
1776 * valid for the entire range, clear the B_CACHE flag to force
1777 * the consumer to re-read the page.
1778 */
1779static void
1780vfs_buf_set_valid(struct buf *bp,
1781		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
1782		  vm_page_t m)
1783{
1784	if (bp->b_vp->v_tag == VT_NFS) {
1785		vm_offset_t svalid, evalid;
1786		int validbits = m->valid;
1787
1788		/*
1789		 * This only bothers with the first valid range in the
1790		 * page.
1791		 */
1792		svalid = off;
1793		while (validbits && !(validbits & 1)) {
1794			svalid += DEV_BSIZE;
1795			validbits >>= 1;
1796		}
1797		evalid = svalid;
1798		while (validbits & 1) {
1799			evalid += DEV_BSIZE;
1800			validbits >>= 1;
1801		}
1802		/*
1803		 * Make sure this range is contiguous with the range
1804		 * built up from previous pages.  If not, then we will
1805		 * just use the range from the previous pages.
1806		 */
1807		if (svalid == bp->b_validend) {
1808			bp->b_validoff = min(bp->b_validoff, svalid);
1809			bp->b_validend = max(bp->b_validend, evalid);
1810		}
1811	} else if (!vm_page_is_valid(m,
1812				     (vm_offset_t) ((foff + off) & PAGE_MASK),
1813				     size)) {
1814		bp->b_flags &= ~B_CACHE;
1815	}
1816}
1817
1818/*
1819 * Set the valid bits in a page, taking care of the b_validoff,
1820 * b_validend fields which NFS uses to optimise small reads.  Off is
1821 * the offset within the file and pageno is the page index within the buf.
1822 */
1823static void
1824vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
1825{
1826	struct vnode *vp = bp->b_vp;
1827	vm_ooffset_t soff, eoff;
1828
1829	soff = off;
1830	eoff = off + min(PAGE_SIZE, bp->b_bufsize);
1831	vm_page_set_invalid(m,
1832			    (vm_offset_t) (soff & PAGE_MASK),
1833			    (vm_offset_t) (eoff - soff));
1834	if (vp->v_tag == VT_NFS) {
1835		vm_ooffset_t sv, ev;
1836		off = off - pageno * PAGE_SIZE;
1837		sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
1838		ev = off + (bp->b_validend & ~(DEV_BSIZE - 1));
1839		soff = max(sv, soff);
1840		eoff = min(ev, eoff);
1841	}
1842	if (eoff > soff)
1843		vm_page_set_validclean(m,
1844				       (vm_offset_t) (soff & PAGE_MASK),
1845				       (vm_offset_t) (eoff - soff));
1846}
1847
1848/*
1849 * This routine is called before a device strategy routine.
1850 * It is used to tell the VM system that paging I/O is in
1851 * progress, and treat the pages associated with the buffer
1852 * almost as being PG_BUSY.  Also the object paging_in_progress
1853 * flag is handled to make sure that the object doesn't become
1854 * inconsistant.
1855 */
1856void
1857vfs_busy_pages(struct buf * bp, int clear_modify)
1858{
1859	int i;
1860
1861	if (bp->b_flags & B_VMIO) {
1862		struct vnode *vp = bp->b_vp;
1863		vm_object_t obj = vp->v_object;
1864		vm_ooffset_t foff;
1865
1866		if (vp->v_type == VBLK)
1867			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1868		else
1869			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1870		vfs_setdirty(bp);
1871		for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
1872			vm_page_t m = bp->b_pages[i];
1873
1874			if ((bp->b_flags & B_CLUSTER) == 0) {
1875				obj->paging_in_progress++;
1876				m->busy++;
1877			}
1878			vm_page_protect(m, VM_PROT_NONE);
1879			if (clear_modify)
1880				vfs_page_set_valid(bp, foff, i, m);
1881			else if (bp->b_bcount >= PAGE_SIZE) {
1882				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1883					bp->b_pages[i] = bogus_page;
1884					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1885				}
1886			}
1887		}
1888	}
1889}
1890
1891/*
1892 * Tell the VM system that the pages associated with this buffer
1893 * are clean.  This is used for delayed writes where the data is
1894 * going to go to disk eventually without additional VM intevention.
1895 */
1896void
1897vfs_clean_pages(struct buf * bp)
1898{
1899	int i;
1900
1901	if (bp->b_flags & B_VMIO) {
1902		struct vnode *vp = bp->b_vp;
1903		vm_object_t obj = vp->v_object;
1904		vm_ooffset_t foff;
1905
1906		if (vp->v_type == VBLK)
1907			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1908		else
1909			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1910		for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
1911			vm_page_t m = bp->b_pages[i];
1912
1913			vfs_page_set_valid(bp, foff, i, m);
1914		}
1915	}
1916}
1917
1918void
1919vfs_bio_clrbuf(struct buf *bp) {
1920	int i;
1921	if( bp->b_flags & B_VMIO) {
1922		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1923			int mask;
1924			mask = 0;
1925			for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
1926				mask |= (1 << (i/DEV_BSIZE));
1927			if( bp->b_pages[0]->valid != mask) {
1928				bzero(bp->b_data, bp->b_bufsize);
1929			}
1930			bp->b_pages[0]->valid = mask;
1931			bp->b_resid = 0;
1932			return;
1933		}
1934		for(i=0;i<bp->b_npages;i++) {
1935			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1936				continue;
1937			if( bp->b_pages[i]->valid == 0) {
1938				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
1939					bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
1940				}
1941			} else {
1942				int j;
1943				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1944					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1945						bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
1946				}
1947			}
1948			/* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
1949		}
1950		bp->b_resid = 0;
1951	} else {
1952		clrbuf(bp);
1953	}
1954}
1955
1956/*
1957 * vm_hold_load_pages and vm_hold_unload pages get pages into
1958 * a buffers address space.  The pages are anonymous and are
1959 * not associated with a file object.
1960 */
1961void
1962vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1963{
1964	vm_offset_t pg;
1965	vm_page_t p;
1966	int index;
1967
1968	to = round_page(to);
1969	from = round_page(from);
1970	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1971
1972	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1973
1974tryagain:
1975
1976		p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
1977		    VM_ALLOC_NORMAL);
1978		if (!p) {
1979			VM_WAIT;
1980			goto tryagain;
1981		}
1982		vm_page_wire(p);
1983		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1984		bp->b_pages[index] = p;
1985		PAGE_WAKEUP(p);
1986	}
1987	bp->b_npages = to >> PAGE_SHIFT;
1988}
1989
1990void
1991vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1992{
1993	vm_offset_t pg;
1994	vm_page_t p;
1995	int index;
1996
1997	from = round_page(from);
1998	to = round_page(to);
1999	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
2000
2001	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2002		p = bp->b_pages[index];
2003		if (p && (index < bp->b_npages)) {
2004			if (p->busy) {
2005				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2006					bp->b_blkno, bp->b_lblkno);
2007			}
2008			bp->b_pages[index] = NULL;
2009			pmap_kremove(pg);
2010			vm_page_unwire(p);
2011			vm_page_free(p);
2012		}
2013	}
2014	bp->b_npages = from >> PAGE_SHIFT;
2015}
2016
2017
2018#include "opt_ddb.h"
2019#ifdef DDB
2020#include <ddb/ddb.h>
2021
2022DB_SHOW_COMMAND(buffer, db_show_buffer)
2023{
2024	/* get args */
2025	struct buf *bp = (struct buf *)addr;
2026
2027	if (!have_addr) {
2028		db_printf("usage: show buffer <addr>\n");
2029		return;
2030	}
2031
2032	db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc,
2033		  bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered"
2034		  "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape"
2035		  "\25read\24raw\23phys\22clusterok\21malloc\20nocache"
2036		  "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty"
2037		  "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age");
2038	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
2039		  "b_resid = %ld\nb_dev = 0x%x, b_un.b_addr = %p, "
2040		  "b_blkno = %d, b_pblkno = %d\n",
2041		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
2042		  bp->b_dev, bp->b_un.b_addr, bp->b_blkno, bp->b_pblkno);
2043}
2044#endif /* DDB */
2045