vfs_bio.c revision 33134
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 *		John S. Dyson.
13 *
14 * $Id: vfs_bio.c,v 1.148 1998/02/04 22:32:39 eivind Exp $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme.  Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *
23 * Author:  John S. Dyson
24 * Significant help during the development and debugging phases
25 * had been provided by David Greenman, also of the FreeBSD core team.
26 */
27
28#include "opt_bounce.h"
29
30#define VMIO
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/sysproto.h>
34#include <sys/kernel.h>
35#include <sys/sysctl.h>
36#include <sys/proc.h>
37#include <sys/vnode.h>
38#include <sys/vmmeter.h>
39#include <sys/lock.h>
40#include <vm/vm.h>
41#include <vm/vm_param.h>
42#include <vm/vm_prot.h>
43#include <vm/vm_kern.h>
44#include <vm/vm_pageout.h>
45#include <vm/vm_page.h>
46#include <vm/vm_object.h>
47#include <vm/vm_extern.h>
48#include <vm/vm_map.h>
49#include <sys/buf.h>
50#include <sys/mount.h>
51#include <sys/malloc.h>
52#include <sys/resourcevar.h>
53
54static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
55
56static void vfs_update __P((void));
57static struct	proc *updateproc;
58static struct kproc_desc up_kp = {
59	"update",
60	vfs_update,
61	&updateproc
62};
63SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
64
65struct buf *buf;		/* buffer header pool */
66struct swqueue bswlist;
67
68int count_lock_queue __P((void));
69static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
70		vm_offset_t to);
71static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
72		vm_offset_t to);
73static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff,
74			      vm_offset_t off, vm_offset_t size,
75			      vm_page_t m);
76static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
77			       int pageno, vm_page_t m);
78static void vfs_clean_pages(struct buf * bp);
79static void vfs_setdirty(struct buf *bp);
80static void vfs_vmio_release(struct buf *bp);
81static void flushdirtybuffers(int slpflag, int slptimeo);
82
83int needsbuffer;
84
85/*
86 * Internal update daemon, process 3
87 *	The variable vfs_update_wakeup allows for internal syncs.
88 */
89int vfs_update_wakeup;
90
91
92/*
93 * buffers base kva
94 */
95
96/*
97 * bogus page -- for I/O to/from partially complete buffers
98 * this is a temporary solution to the problem, but it is not
99 * really that bad.  it would be better to split the buffer
100 * for input in the case of buffers partially already in memory,
101 * but the code is intricate enough already.
102 */
103vm_page_t bogus_page;
104static vm_offset_t bogus_offset;
105
106static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
107	bufmallocspace, maxbufmallocspace;
108int numdirtybuffers, lodirtybuffers, hidirtybuffers;
109static int numfreebuffers, lofreebuffers, hifreebuffers;
110static int kvafreespace;
111
112SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
113	&numdirtybuffers, 0, "");
114SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW,
115	&lodirtybuffers, 0, "");
116SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
117	&hidirtybuffers, 0, "");
118SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
119	&numfreebuffers, 0, "");
120SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
121	&lofreebuffers, 0, "");
122SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
123	&hifreebuffers, 0, "");
124SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
125	&maxbufspace, 0, "");
126SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
127	&bufspace, 0, "");
128SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW,
129	&maxvmiobufspace, 0, "");
130SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
131	&vmiospace, 0, "");
132SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
133	&maxbufmallocspace, 0, "");
134SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
135	&bufmallocspace, 0, "");
136SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD,
137	&kvafreespace, 0, "");
138
139static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
140static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
141
142extern int vm_swap_size;
143
144#define BUF_MAXUSE 24
145
146#define VFS_BIO_NEED_ANY 1
147#define VFS_BIO_NEED_LOWLIMIT 2
148#define VFS_BIO_NEED_FREE 4
149
150/*
151 * Initialize buffer headers and related structures.
152 */
153void
154bufinit()
155{
156	struct buf *bp;
157	int i;
158
159	TAILQ_INIT(&bswlist);
160	LIST_INIT(&invalhash);
161
162	/* first, make a null hash table */
163	for (i = 0; i < BUFHSZ; i++)
164		LIST_INIT(&bufhashtbl[i]);
165
166	/* next, make a null set of free lists */
167	for (i = 0; i < BUFFER_QUEUES; i++)
168		TAILQ_INIT(&bufqueues[i]);
169
170	/* finally, initialize each buffer header and stick on empty q */
171	for (i = 0; i < nbuf; i++) {
172		bp = &buf[i];
173		bzero(bp, sizeof *bp);
174		bp->b_flags = B_INVAL;	/* we're just an empty header */
175		bp->b_dev = NODEV;
176		bp->b_rcred = NOCRED;
177		bp->b_wcred = NOCRED;
178		bp->b_qindex = QUEUE_EMPTY;
179		bp->b_vnbufs.le_next = NOLIST;
180		bp->b_generation = 0;
181		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
182		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
183	}
184/*
185 * maxbufspace is currently calculated to support all filesystem blocks
186 * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
187 * cache is still the same as it would be for 8K filesystems.  This
188 * keeps the size of the buffer cache "in check" for big block filesystems.
189 */
190	maxbufspace = (nbuf + 8) * DFLTBSIZE;
191/*
192 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
193 */
194	maxvmiobufspace = 2 * maxbufspace / 3;
195/*
196 * Limit the amount of malloc memory since it is wired permanently into
197 * the kernel space.  Even though this is accounted for in the buffer
198 * allocation, we don't want the malloced region to grow uncontrolled.
199 * The malloc scheme improves memory utilization significantly on average
200 * (small) directories.
201 */
202	maxbufmallocspace = maxbufspace / 20;
203
204/*
205 * Remove the probability of deadlock conditions by limiting the
206 * number of dirty buffers.
207 */
208	hidirtybuffers = nbuf / 8 + 20;
209	lodirtybuffers = nbuf / 16 + 10;
210	numdirtybuffers = 0;
211	lofreebuffers = nbuf / 18 + 5;
212	hifreebuffers = 2 * lofreebuffers;
213	numfreebuffers = nbuf;
214	kvafreespace = 0;
215
216	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
217	bogus_page = vm_page_alloc(kernel_object,
218			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
219			VM_ALLOC_NORMAL);
220
221}
222
223/*
224 * Free the kva allocation for a buffer
225 * Must be called only at splbio or higher,
226 *  as this is the only locking for buffer_map.
227 */
228static void
229bfreekva(struct buf * bp)
230{
231	if (bp->b_kvasize == 0)
232		return;
233
234	vm_map_delete(buffer_map,
235		(vm_offset_t) bp->b_kvabase,
236		(vm_offset_t) bp->b_kvabase + bp->b_kvasize);
237
238	bp->b_kvasize = 0;
239
240}
241
242/*
243 * remove the buffer from the appropriate free list
244 */
245void
246bremfree(struct buf * bp)
247{
248	int s = splbio();
249
250	if (bp->b_qindex != QUEUE_NONE) {
251		if (bp->b_qindex == QUEUE_EMPTY) {
252			kvafreespace -= bp->b_kvasize;
253		}
254		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
255		bp->b_qindex = QUEUE_NONE;
256	} else {
257#if !defined(MAX_PERF)
258		panic("bremfree: removing a buffer when not on a queue");
259#endif
260	}
261	if ((bp->b_flags & B_INVAL) ||
262		(bp->b_flags & (B_DELWRI|B_LOCKED)) == 0)
263		--numfreebuffers;
264	splx(s);
265}
266
267
268/*
269 * Get a buffer with the specified data.  Look in the cache first.
270 */
271int
272bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
273    struct buf ** bpp)
274{
275	struct buf *bp;
276
277	bp = getblk(vp, blkno, size, 0, 0);
278	*bpp = bp;
279
280	/* if not found in cache, do some I/O */
281	if ((bp->b_flags & B_CACHE) == 0) {
282		if (curproc != NULL)
283			curproc->p_stats->p_ru.ru_inblock++;
284		bp->b_flags |= B_READ;
285		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
286		if (bp->b_rcred == NOCRED) {
287			if (cred != NOCRED)
288				crhold(cred);
289			bp->b_rcred = cred;
290		}
291		vfs_busy_pages(bp, 0);
292		VOP_STRATEGY(bp);
293		return (biowait(bp));
294	}
295	return (0);
296}
297
298/*
299 * Operates like bread, but also starts asynchronous I/O on
300 * read-ahead blocks.
301 */
302int
303breadn(struct vnode * vp, daddr_t blkno, int size,
304    daddr_t * rablkno, int *rabsize,
305    int cnt, struct ucred * cred, struct buf ** bpp)
306{
307	struct buf *bp, *rabp;
308	int i;
309	int rv = 0, readwait = 0;
310
311	*bpp = bp = getblk(vp, blkno, size, 0, 0);
312
313	/* if not found in cache, do some I/O */
314	if ((bp->b_flags & B_CACHE) == 0) {
315		if (curproc != NULL)
316			curproc->p_stats->p_ru.ru_inblock++;
317		bp->b_flags |= B_READ;
318		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
319		if (bp->b_rcred == NOCRED) {
320			if (cred != NOCRED)
321				crhold(cred);
322			bp->b_rcred = cred;
323		}
324		vfs_busy_pages(bp, 0);
325		VOP_STRATEGY(bp);
326		++readwait;
327	}
328	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
329		if (inmem(vp, *rablkno))
330			continue;
331		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
332
333		if ((rabp->b_flags & B_CACHE) == 0) {
334			if (curproc != NULL)
335				curproc->p_stats->p_ru.ru_inblock++;
336			rabp->b_flags |= B_READ | B_ASYNC;
337			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
338			if (rabp->b_rcred == NOCRED) {
339				if (cred != NOCRED)
340					crhold(cred);
341				rabp->b_rcred = cred;
342			}
343			vfs_busy_pages(rabp, 0);
344			VOP_STRATEGY(rabp);
345		} else {
346			brelse(rabp);
347		}
348	}
349
350	if (readwait) {
351		rv = biowait(bp);
352	}
353	return (rv);
354}
355
356/*
357 * Write, release buffer on completion.  (Done by iodone
358 * if async.)
359 */
360int
361bwrite(struct buf * bp)
362{
363	int oldflags = bp->b_flags;
364
365	if (bp->b_flags & B_INVAL) {
366		brelse(bp);
367		return (0);
368	}
369#if !defined(MAX_PERF)
370	if (!(bp->b_flags & B_BUSY))
371		panic("bwrite: buffer is not busy???");
372#endif
373
374	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
375	bp->b_flags |= B_WRITEINPROG;
376
377	if ((oldflags & B_DELWRI) == B_DELWRI) {
378		--numdirtybuffers;
379		reassignbuf(bp, bp->b_vp);
380	}
381
382	bp->b_vp->v_numoutput++;
383	vfs_busy_pages(bp, 1);
384	if (curproc != NULL)
385		curproc->p_stats->p_ru.ru_oublock++;
386	VOP_STRATEGY(bp);
387
388	if ((oldflags & B_ASYNC) == 0) {
389		int rtval = biowait(bp);
390
391		if (oldflags & B_DELWRI) {
392			reassignbuf(bp, bp->b_vp);
393		}
394		brelse(bp);
395		return (rtval);
396	}
397	return (0);
398}
399
400inline void
401vfs_bio_need_satisfy(void) {
402	++numfreebuffers;
403	if (!needsbuffer)
404		return;
405	if (numdirtybuffers < lodirtybuffers) {
406		needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT);
407	} else {
408		needsbuffer &= ~VFS_BIO_NEED_ANY;
409	}
410	if (numfreebuffers >= hifreebuffers) {
411		needsbuffer &= ~VFS_BIO_NEED_FREE;
412	}
413	wakeup(&needsbuffer);
414}
415
416/*
417 * Delayed write. (Buffer is marked dirty).
418 */
419void
420bdwrite(struct buf * bp)
421{
422
423#if !defined(MAX_PERF)
424	if ((bp->b_flags & B_BUSY) == 0) {
425		panic("bdwrite: buffer is not busy");
426	}
427#endif
428
429	if (bp->b_flags & B_INVAL) {
430		brelse(bp);
431		return;
432	}
433	if (bp->b_flags & B_TAPE) {
434		bawrite(bp);
435		return;
436	}
437	bp->b_flags &= ~(B_READ|B_RELBUF);
438	if ((bp->b_flags & B_DELWRI) == 0) {
439		bp->b_flags |= B_DONE | B_DELWRI;
440		reassignbuf(bp, bp->b_vp);
441		++numdirtybuffers;
442	}
443
444	/*
445	 * This bmap keeps the system from needing to do the bmap later,
446	 * perhaps when the system is attempting to do a sync.  Since it
447	 * is likely that the indirect block -- or whatever other datastructure
448	 * that the filesystem needs is still in memory now, it is a good
449	 * thing to do this.  Note also, that if the pageout daemon is
450	 * requesting a sync -- there might not be enough memory to do
451	 * the bmap then...  So, this is important to do.
452	 */
453	if (bp->b_lblkno == bp->b_blkno) {
454		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
455	}
456
457	/*
458	 * Set the *dirty* buffer range based upon the VM system dirty pages.
459	 */
460	vfs_setdirty(bp);
461
462	/*
463	 * We need to do this here to satisfy the vnode_pager and the
464	 * pageout daemon, so that it thinks that the pages have been
465	 * "cleaned".  Note that since the pages are in a delayed write
466	 * buffer -- the VFS layer "will" see that the pages get written
467	 * out on the next sync, or perhaps the cluster will be completed.
468	 */
469	vfs_clean_pages(bp);
470	bqrelse(bp);
471
472	if (numdirtybuffers >= hidirtybuffers)
473		flushdirtybuffers(0, 0);
474
475	return;
476}
477
478/*
479 * Asynchronous write.
480 * Start output on a buffer, but do not wait for it to complete.
481 * The buffer is released when the output completes.
482 */
483void
484bawrite(struct buf * bp)
485{
486	bp->b_flags |= B_ASYNC;
487	(void) VOP_BWRITE(bp);
488}
489
490/*
491 * Ordered write.
492 * Start output on a buffer, but only wait for it to complete if the
493 * output device cannot guarantee ordering in some other way.  Devices
494 * that can perform asynchronous ordered writes will set the B_ASYNC
495 * flag in their strategy routine.
496 * The buffer is released when the output completes.
497 */
498int
499bowrite(struct buf * bp)
500{
501	/*
502	 * XXX Add in B_ASYNC once the SCSI
503	 *     layer can deal with ordered
504	 *     writes properly.
505	 */
506	bp->b_flags |= B_ORDERED;
507	return (VOP_BWRITE(bp));
508}
509
510/*
511 * Release a buffer.
512 */
513void
514brelse(struct buf * bp)
515{
516	int s;
517
518	if (bp->b_flags & B_CLUSTER) {
519		relpbuf(bp);
520		return;
521	}
522	/* anyone need a "free" block? */
523	s = splbio();
524
525	/* anyone need this block? */
526	if (bp->b_flags & B_WANTED) {
527		bp->b_flags &= ~(B_WANTED | B_AGE);
528		wakeup(bp);
529	}
530
531	if (bp->b_flags & B_LOCKED)
532		bp->b_flags &= ~B_ERROR;
533
534	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
535	    (bp->b_bufsize <= 0)) {
536		bp->b_flags |= B_INVAL;
537		if (bp->b_flags & B_DELWRI)
538			--numdirtybuffers;
539		bp->b_flags &= ~(B_DELWRI | B_CACHE);
540		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
541			if (bp->b_bufsize)
542				allocbuf(bp, 0);
543			brelvp(bp);
544		}
545	}
546
547	/*
548	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
549	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
550	 * but the VM object is kept around.  The B_NOCACHE flag is used to
551	 * invalidate the pages in the VM object.
552	 *
553	 * If the buffer is a partially filled NFS buffer, keep it
554	 * since invalidating it now will lose informatio.  The valid
555	 * flags in the vm_pages have only DEV_BSIZE resolution but
556	 * the b_validoff, b_validend fields have byte resolution.
557	 * This can avoid unnecessary re-reads of the buffer.
558	 * XXX this seems to cause performance problems.
559	 */
560	if ((bp->b_flags & B_VMIO)
561	    && !(bp->b_vp->v_tag == VT_NFS &&
562		 bp->b_vp->v_type != VBLK &&
563		 (bp->b_flags & B_DELWRI) != 0)
564#ifdef notdef
565	    && (bp->b_vp->v_tag != VT_NFS
566		|| bp->b_vp->v_type == VBLK
567		|| (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR))
568		|| bp->b_validend == 0
569		|| (bp->b_validoff == 0
570		    && bp->b_validend == bp->b_bufsize))
571#endif
572	    ) {
573		vm_ooffset_t foff;
574		vm_object_t obj;
575		int i, resid;
576		vm_page_t m;
577		struct vnode *vp;
578		int iototal = bp->b_bufsize;
579
580		vp = bp->b_vp;
581
582#if !defined(MAX_PERF)
583		if (!vp)
584			panic("brelse: missing vp");
585#endif
586
587		if (bp->b_npages) {
588			vm_pindex_t poff;
589			obj = (vm_object_t) vp->v_object;
590			if (vp->v_type == VBLK)
591				foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
592			else
593				foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
594			poff = OFF_TO_IDX(foff);
595			for (i = 0; i < bp->b_npages; i++) {
596				m = bp->b_pages[i];
597				if (m == bogus_page) {
598					m = vm_page_lookup(obj, poff + i);
599#if !defined(MAX_PERF)
600					if (!m) {
601						panic("brelse: page missing\n");
602					}
603#endif
604					bp->b_pages[i] = m;
605					pmap_qenter(trunc_page(bp->b_data),
606						bp->b_pages, bp->b_npages);
607				}
608				resid = IDX_TO_OFF(m->pindex+1) - foff;
609				if (resid > iototal)
610					resid = iototal;
611				if (resid > 0) {
612					/*
613					 * Don't invalidate the page if the local machine has already
614					 * modified it.  This is the lesser of two evils, and should
615					 * be fixed.
616					 */
617					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
618						vm_page_test_dirty(m);
619						if (m->dirty == 0) {
620							vm_page_set_invalid(m, (vm_offset_t) foff, resid);
621							if (m->valid == 0)
622								vm_page_protect(m, VM_PROT_NONE);
623						}
624					}
625					if (resid >= PAGE_SIZE) {
626						if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
627							bp->b_flags |= B_INVAL;
628						}
629					} else {
630						if (!vm_page_is_valid(m,
631							(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
632							bp->b_flags |= B_INVAL;
633						}
634					}
635				}
636				foff += resid;
637				iototal -= resid;
638			}
639		}
640		if (bp->b_flags & (B_INVAL | B_RELBUF))
641			vfs_vmio_release(bp);
642	} else if (bp->b_flags & B_VMIO) {
643		if (bp->b_flags && (B_INVAL | B_RELBUF))
644			vfs_vmio_release(bp);
645	}
646
647#if !defined(MAX_PERF)
648	if (bp->b_qindex != QUEUE_NONE)
649		panic("brelse: free buffer onto another queue???");
650#endif
651
652	/* enqueue */
653	/* buffers with no memory */
654	if (bp->b_bufsize == 0) {
655		bp->b_flags |= B_INVAL;
656		bp->b_qindex = QUEUE_EMPTY;
657		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
658		LIST_REMOVE(bp, b_hash);
659		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
660		bp->b_dev = NODEV;
661		kvafreespace += bp->b_kvasize;
662		bp->b_generation++;
663
664	/* buffers with junk contents */
665	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
666		bp->b_flags |= B_INVAL;
667		bp->b_qindex = QUEUE_AGE;
668		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
669		LIST_REMOVE(bp, b_hash);
670		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
671		bp->b_dev = NODEV;
672		bp->b_generation++;
673
674	/* buffers that are locked */
675	} else if (bp->b_flags & B_LOCKED) {
676		bp->b_qindex = QUEUE_LOCKED;
677		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
678
679	/* buffers with stale but valid contents */
680	} else if (bp->b_flags & B_AGE) {
681		bp->b_qindex = QUEUE_AGE;
682		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
683
684	/* buffers with valid and quite potentially reuseable contents */
685	} else {
686		bp->b_qindex = QUEUE_LRU;
687		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
688	}
689
690	if ((bp->b_flags & B_INVAL) ||
691		(bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) {
692		if (bp->b_flags & B_DELWRI) {
693			--numdirtybuffers;
694			bp->b_flags &= ~B_DELWRI;
695		}
696		vfs_bio_need_satisfy();
697	}
698
699	/* unlock */
700	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
701				B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
702	splx(s);
703}
704
705/*
706 * Release a buffer.
707 */
708void
709bqrelse(struct buf * bp)
710{
711	int s;
712
713	s = splbio();
714
715	/* anyone need this block? */
716	if (bp->b_flags & B_WANTED) {
717		bp->b_flags &= ~(B_WANTED | B_AGE);
718		wakeup(bp);
719	}
720
721#if !defined(MAX_PERF)
722	if (bp->b_qindex != QUEUE_NONE)
723		panic("bqrelse: free buffer onto another queue???");
724#endif
725
726	if (bp->b_flags & B_LOCKED) {
727		bp->b_flags &= ~B_ERROR;
728		bp->b_qindex = QUEUE_LOCKED;
729		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
730		/* buffers with stale but valid contents */
731	} else {
732		bp->b_qindex = QUEUE_LRU;
733		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
734	}
735
736	if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) {
737		vfs_bio_need_satisfy();
738	}
739
740	/* unlock */
741	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
742		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
743	splx(s);
744}
745
746static void
747vfs_vmio_release(bp)
748	struct buf *bp;
749{
750	int i;
751	vm_page_t m;
752
753	for (i = 0; i < bp->b_npages; i++) {
754		m = bp->b_pages[i];
755		bp->b_pages[i] = NULL;
756		vm_page_unwire(m);
757		/*
758		 * We don't mess with busy pages, it is
759		 * the responsibility of the process that
760		 * busied the pages to deal with them.
761		 */
762		if ((m->flags & PG_BUSY) || (m->busy != 0))
763			continue;
764
765		if (m->wire_count == 0) {
766
767			if (m->flags & PG_WANTED) {
768				m->flags &= ~PG_WANTED;
769				wakeup(m);
770			}
771
772			/*
773			 * If this is an async free -- we cannot place
774			 * pages onto the cache queue.  If it is an
775			 * async free, then we don't modify any queues.
776			 * This is probably in error (for perf reasons),
777			 * and we will eventually need to build
778			 * a more complete infrastructure to support I/O
779			 * rundown.
780			 */
781			if ((bp->b_flags & B_ASYNC) == 0) {
782
783			/*
784			 * In the case of sync buffer frees, we can do pretty much
785			 * anything to any of the memory queues.  Specifically,
786			 * the cache queue is okay to be modified.
787			 */
788				if (m->valid) {
789					if(m->dirty == 0)
790						vm_page_test_dirty(m);
791					/*
792					 * this keeps pressure off of the process memory
793					 */
794					if (m->dirty == 0 && m->hold_count == 0)
795						vm_page_cache(m);
796					else
797						vm_page_deactivate(m);
798				} else if (m->hold_count == 0) {
799					m->flags |= PG_BUSY;
800					vm_page_protect(m, VM_PROT_NONE);
801					vm_page_free(m);
802				}
803			} else {
804				/*
805				 * If async, then at least we clear the
806				 * act_count.
807				 */
808				m->act_count = 0;
809			}
810		}
811	}
812	bufspace -= bp->b_bufsize;
813	vmiospace -= bp->b_bufsize;
814	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
815	bp->b_npages = 0;
816	bp->b_bufsize = 0;
817	bp->b_flags &= ~B_VMIO;
818	if (bp->b_vp)
819		brelvp(bp);
820}
821
822/*
823 * Check to see if a block is currently memory resident.
824 */
825struct buf *
826gbincore(struct vnode * vp, daddr_t blkno)
827{
828	struct buf *bp;
829	struct bufhashhdr *bh;
830
831	bh = BUFHASH(vp, blkno);
832	bp = bh->lh_first;
833
834	/* Search hash chain */
835	while (bp != NULL) {
836		/* hit */
837		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
838		    (bp->b_flags & B_INVAL) == 0) {
839			break;
840		}
841		bp = bp->b_hash.le_next;
842	}
843	return (bp);
844}
845
846/*
847 * this routine implements clustered async writes for
848 * clearing out B_DELWRI buffers...  This is much better
849 * than the old way of writing only one buffer at a time.
850 */
851int
852vfs_bio_awrite(struct buf * bp)
853{
854	int i;
855	daddr_t lblkno = bp->b_lblkno;
856	struct vnode *vp = bp->b_vp;
857	int s;
858	int ncl;
859	struct buf *bpa;
860	int nwritten;
861	int size;
862	int maxcl;
863
864	s = splbio();
865	/*
866	 * right now we support clustered writing only to regular files
867	 */
868	if ((vp->v_type == VREG) &&
869	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
870	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
871
872		size = vp->v_mount->mnt_stat.f_iosize;
873		maxcl = MAXPHYS / size;
874
875		for (i = 1; i < maxcl; i++) {
876			if ((bpa = gbincore(vp, lblkno + i)) &&
877			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
878			    (B_DELWRI | B_CLUSTEROK)) &&
879			    (bpa->b_bufsize == size)) {
880				if ((bpa->b_blkno == bpa->b_lblkno) ||
881				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
882					break;
883			} else {
884				break;
885			}
886		}
887		ncl = i;
888		/*
889		 * this is a possible cluster write
890		 */
891		if (ncl != 1) {
892			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
893			splx(s);
894			return nwritten;
895		}
896	}
897#if 0
898   	else if ((vp->v_flag & VOBJBUF) && (vp->v_type == VBLK) &&
899		((size = bp->b_bufsize) >= PAGE_SIZE)) {
900		maxcl = MAXPHYS / size;
901		for (i = 1; i < maxcl; i++) {
902			if ((bpa = gbincore(vp, lblkno + i)) &&
903			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
904			    (B_DELWRI | B_CLUSTEROK)) &&
905			    (bpa->b_bufsize == size)) {
906				    if (bpa->b_blkno !=
907						bp->b_blkno + ((i * size) >> DEV_BSHIFT))
908							break;
909			} else {
910				break;
911			}
912		}
913		ncl = i;
914		/*
915		 * this is a possible cluster write
916		 */
917		if (ncl != 1) {
918			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
919			splx(s);
920			return nwritten;
921		}
922	}
923#endif
924
925	bremfree(bp);
926	splx(s);
927	/*
928	 * default (old) behavior, writing out only one block
929	 */
930	bp->b_flags |= B_BUSY | B_ASYNC;
931	nwritten = bp->b_bufsize;
932	(void) VOP_BWRITE(bp);
933	return nwritten;
934}
935
936
937/*
938 * Find a buffer header which is available for use.
939 */
940static struct buf *
941getnewbuf(struct vnode *vp, daddr_t blkno,
942	int slpflag, int slptimeo, int size, int maxsize)
943{
944	struct buf *bp, *bp1;
945	int nbyteswritten = 0;
946	vm_offset_t addr;
947	static int writerecursion = 0;
948
949start:
950	if (bufspace >= maxbufspace)
951		goto trytofreespace;
952
953	/* can we constitute a new buffer? */
954	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
955#if !defined(MAX_PERF)
956		if (bp->b_qindex != QUEUE_EMPTY)
957			panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
958			    bp->b_qindex);
959#endif
960		bp->b_flags |= B_BUSY;
961		bremfree(bp);
962		goto fillbuf;
963	}
964trytofreespace:
965	/*
966	 * We keep the file I/O from hogging metadata I/O
967	 * This is desirable because file data is cached in the
968	 * VM/Buffer cache even if a buffer is freed.
969	 */
970	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
971#if !defined(MAX_PERF)
972		if (bp->b_qindex != QUEUE_AGE)
973			panic("getnewbuf: inconsistent AGE queue, qindex=%d",
974			    bp->b_qindex);
975#endif
976	} else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
977#if !defined(MAX_PERF)
978		if (bp->b_qindex != QUEUE_LRU)
979			panic("getnewbuf: inconsistent LRU queue, qindex=%d",
980			    bp->b_qindex);
981#endif
982	}
983	if (!bp) {
984		/* wait for a free buffer of any kind */
985		needsbuffer |= VFS_BIO_NEED_ANY;
986		do
987			tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf",
988			    slptimeo);
989		while (needsbuffer & VFS_BIO_NEED_ANY);
990		return (0);
991	}
992
993#if defined(DIAGNOSTIC)
994	if (bp->b_flags & B_BUSY) {
995		panic("getnewbuf: busy buffer on free list\n");
996	}
997#endif
998
999	/*
1000	 * We are fairly aggressive about freeing VMIO buffers, but since
1001	 * the buffering is intact without buffer headers, there is not
1002	 * much loss.  We gain by maintaining non-VMIOed metadata in buffers.
1003	 */
1004	if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
1005		if ((bp->b_flags & B_VMIO) == 0 ||
1006			(vmiospace < maxvmiobufspace)) {
1007			--bp->b_usecount;
1008			TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1009			if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
1010				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1011				goto start;
1012			}
1013			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1014		}
1015	}
1016
1017
1018	/* if we are a delayed write, convert to an async write */
1019	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
1020
1021		/*
1022		 * If our delayed write is likely to be used soon, then
1023		 * recycle back onto the LRU queue.
1024		 */
1025		if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) &&
1026			(bp->b_lblkno >= blkno) && (maxsize > 0)) {
1027
1028			if (bp->b_usecount > 0) {
1029				if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) {
1030
1031					TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1032
1033					if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
1034						TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1035						bp->b_usecount--;
1036						goto start;
1037					}
1038					TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1039				}
1040			}
1041		}
1042
1043		/*
1044		 * Certain layered filesystems can recursively re-enter the vfs_bio
1045		 * code, due to delayed writes.  This helps keep the system from
1046		 * deadlocking.
1047		 */
1048		if (writerecursion > 0) {
1049			bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1050			while (bp) {
1051				if ((bp->b_flags & B_DELWRI) == 0)
1052					break;
1053				bp = TAILQ_NEXT(bp, b_freelist);
1054			}
1055			if (bp == NULL) {
1056				bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1057				while (bp) {
1058					if ((bp->b_flags & B_DELWRI) == 0)
1059						break;
1060					bp = TAILQ_NEXT(bp, b_freelist);
1061				}
1062			}
1063			if (bp == NULL)
1064				panic("getnewbuf: cannot get buffer, infinite recursion failure");
1065		} else {
1066			++writerecursion;
1067			nbyteswritten += vfs_bio_awrite(bp);
1068			--writerecursion;
1069			if (!slpflag && !slptimeo) {
1070				return (0);
1071			}
1072			goto start;
1073		}
1074	}
1075
1076	if (bp->b_flags & B_WANTED) {
1077		bp->b_flags &= ~B_WANTED;
1078		wakeup(bp);
1079	}
1080	bremfree(bp);
1081	bp->b_flags |= B_BUSY;
1082
1083	if (bp->b_flags & B_VMIO) {
1084		bp->b_flags &= ~B_ASYNC;
1085		vfs_vmio_release(bp);
1086	}
1087
1088	if (bp->b_vp)
1089		brelvp(bp);
1090
1091fillbuf:
1092	bp->b_generation++;
1093
1094	/* we are not free, nor do we contain interesting data */
1095	if (bp->b_rcred != NOCRED) {
1096		crfree(bp->b_rcred);
1097		bp->b_rcred = NOCRED;
1098	}
1099	if (bp->b_wcred != NOCRED) {
1100		crfree(bp->b_wcred);
1101		bp->b_wcred = NOCRED;
1102	}
1103
1104	LIST_REMOVE(bp, b_hash);
1105	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1106	if (bp->b_bufsize) {
1107		allocbuf(bp, 0);
1108	}
1109	bp->b_flags = B_BUSY;
1110	bp->b_dev = NODEV;
1111	bp->b_vp = NULL;
1112	bp->b_blkno = bp->b_lblkno = 0;
1113	bp->b_iodone = 0;
1114	bp->b_error = 0;
1115	bp->b_resid = 0;
1116	bp->b_bcount = 0;
1117	bp->b_npages = 0;
1118	bp->b_dirtyoff = bp->b_dirtyend = 0;
1119	bp->b_validoff = bp->b_validend = 0;
1120	bp->b_usecount = 5;
1121
1122	maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
1123
1124	/*
1125	 * we assume that buffer_map is not at address 0
1126	 */
1127	addr = 0;
1128	if (maxsize != bp->b_kvasize) {
1129		bfreekva(bp);
1130
1131findkvaspace:
1132		/*
1133		 * See if we have buffer kva space
1134		 */
1135		if (vm_map_findspace(buffer_map,
1136			vm_map_min(buffer_map), maxsize, &addr)) {
1137			if (kvafreespace > 0) {
1138				int totfree = 0, freed;
1139				do {
1140					freed = 0;
1141					for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1142						bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) {
1143						if (bp1->b_kvasize != 0) {
1144							totfree += bp1->b_kvasize;
1145							freed = bp1->b_kvasize;
1146							bremfree(bp1);
1147							bfreekva(bp1);
1148							brelse(bp1);
1149							break;
1150						}
1151					}
1152				} while (freed);
1153				/*
1154				 * if we found free space, then retry with the same buffer.
1155				 */
1156				if (totfree)
1157					goto findkvaspace;
1158			}
1159			bp->b_flags |= B_INVAL;
1160			brelse(bp);
1161			goto trytofreespace;
1162		}
1163	}
1164
1165	/*
1166	 * See if we are below are allocated minimum
1167	 */
1168	if (bufspace >= (maxbufspace + nbyteswritten)) {
1169		bp->b_flags |= B_INVAL;
1170		brelse(bp);
1171		goto trytofreespace;
1172	}
1173
1174	/*
1175	 * create a map entry for the buffer -- in essence
1176	 * reserving the kva space.
1177	 */
1178	if (addr) {
1179		vm_map_insert(buffer_map, NULL, 0,
1180			addr, addr + maxsize,
1181			VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1182
1183		bp->b_kvabase = (caddr_t) addr;
1184		bp->b_kvasize = maxsize;
1185	}
1186	bp->b_data = bp->b_kvabase;
1187
1188	return (bp);
1189}
1190
1191static void
1192waitfreebuffers(int slpflag, int slptimeo) {
1193	while (numfreebuffers < hifreebuffers) {
1194		flushdirtybuffers(slpflag, slptimeo);
1195		if (numfreebuffers < hifreebuffers)
1196			break;
1197		needsbuffer |= VFS_BIO_NEED_FREE;
1198		if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo))
1199			break;
1200	}
1201}
1202
1203static void
1204flushdirtybuffers(int slpflag, int slptimeo) {
1205	int s;
1206	static pid_t flushing = 0;
1207
1208	s = splbio();
1209
1210	if (flushing) {
1211		if (flushing == curproc->p_pid) {
1212			splx(s);
1213			return;
1214		}
1215		while (flushing) {
1216			if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) {
1217				splx(s);
1218				return;
1219			}
1220		}
1221	}
1222	flushing = curproc->p_pid;
1223
1224	while (numdirtybuffers > lodirtybuffers) {
1225		struct buf *bp;
1226		needsbuffer |= VFS_BIO_NEED_LOWLIMIT;
1227		bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1228		if (bp == NULL)
1229			bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1230
1231		while (bp && ((bp->b_flags & B_DELWRI) == 0)) {
1232			bp = TAILQ_NEXT(bp, b_freelist);
1233		}
1234
1235		if (bp) {
1236			vfs_bio_awrite(bp);
1237			continue;
1238		}
1239		break;
1240	}
1241
1242	flushing = 0;
1243	wakeup(&flushing);
1244	splx(s);
1245}
1246
1247/*
1248 * Check to see if a block is currently memory resident.
1249 */
1250struct buf *
1251incore(struct vnode * vp, daddr_t blkno)
1252{
1253	struct buf *bp;
1254
1255	int s = splbio();
1256	bp = gbincore(vp, blkno);
1257	splx(s);
1258	return (bp);
1259}
1260
1261/*
1262 * Returns true if no I/O is needed to access the
1263 * associated VM object.  This is like incore except
1264 * it also hunts around in the VM system for the data.
1265 */
1266
1267int
1268inmem(struct vnode * vp, daddr_t blkno)
1269{
1270	vm_object_t obj;
1271	vm_offset_t toff, tinc;
1272	vm_page_t m;
1273	vm_ooffset_t off;
1274
1275	if (incore(vp, blkno))
1276		return 1;
1277	if (vp->v_mount == NULL)
1278		return 0;
1279	if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
1280		return 0;
1281
1282	obj = vp->v_object;
1283	tinc = PAGE_SIZE;
1284	if (tinc > vp->v_mount->mnt_stat.f_iosize)
1285		tinc = vp->v_mount->mnt_stat.f_iosize;
1286	off = blkno * vp->v_mount->mnt_stat.f_iosize;
1287
1288	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1289
1290		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1291		if (!m)
1292			return 0;
1293		if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
1294			return 0;
1295	}
1296	return 1;
1297}
1298
1299/*
1300 * now we set the dirty range for the buffer --
1301 * for NFS -- if the file is mapped and pages have
1302 * been written to, let it know.  We want the
1303 * entire range of the buffer to be marked dirty if
1304 * any of the pages have been written to for consistancy
1305 * with the b_validoff, b_validend set in the nfs write
1306 * code, and used by the nfs read code.
1307 */
1308static void
1309vfs_setdirty(struct buf *bp) {
1310	int i;
1311	vm_object_t object;
1312	vm_offset_t boffset, offset;
1313	/*
1314	 * We qualify the scan for modified pages on whether the
1315	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1316	 * is not cleared simply by protecting pages off.
1317	 */
1318	if ((bp->b_flags & B_VMIO) &&
1319		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
1320		/*
1321		 * test the pages to see if they have been modified directly
1322		 * by users through the VM system.
1323		 */
1324		for (i = 0; i < bp->b_npages; i++)
1325			vm_page_test_dirty(bp->b_pages[i]);
1326
1327		/*
1328		 * scan forwards for the first page modified
1329		 */
1330		for (i = 0; i < bp->b_npages; i++) {
1331			if (bp->b_pages[i]->dirty) {
1332				break;
1333			}
1334		}
1335		boffset = (i << PAGE_SHIFT);
1336		if (boffset < bp->b_dirtyoff) {
1337			bp->b_dirtyoff = boffset;
1338		}
1339
1340		/*
1341		 * scan backwards for the last page modified
1342		 */
1343		for (i = bp->b_npages - 1; i >= 0; --i) {
1344			if (bp->b_pages[i]->dirty) {
1345				break;
1346			}
1347		}
1348		boffset = (i + 1);
1349		offset = boffset + bp->b_pages[0]->pindex;
1350		if (offset >= object->size)
1351			boffset = object->size - bp->b_pages[0]->pindex;
1352		if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
1353			bp->b_dirtyend = (boffset << PAGE_SHIFT);
1354	}
1355}
1356
1357/*
1358 * Get a block given a specified block and offset into a file/device.
1359 */
1360struct buf *
1361getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1362{
1363	struct buf *bp;
1364	int s;
1365	struct bufhashhdr *bh;
1366	int maxsize;
1367	int generation;
1368
1369	if (vp->v_mount) {
1370		maxsize = vp->v_mount->mnt_stat.f_iosize;
1371		/*
1372		 * This happens on mount points.
1373		 */
1374		if (maxsize < size)
1375			maxsize = size;
1376	} else {
1377		maxsize = size;
1378	}
1379
1380#if !defined(MAX_PERF)
1381	if (size > MAXBSIZE)
1382		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1383#endif
1384
1385	s = splbio();
1386loop:
1387	if (numfreebuffers < lofreebuffers) {
1388		waitfreebuffers(slpflag, slptimeo);
1389	}
1390
1391	if ((bp = gbincore(vp, blkno))) {
1392loop1:
1393		generation = bp->b_generation;
1394		if (bp->b_flags & B_BUSY) {
1395			bp->b_flags |= B_WANTED;
1396			if (bp->b_usecount < BUF_MAXUSE)
1397				++bp->b_usecount;
1398			if (!tsleep(bp,
1399				(PRIBIO + 1) | slpflag, "getblk", slptimeo)) {
1400				if (bp->b_generation != generation)
1401					goto loop;
1402				goto loop1;
1403			} else {
1404				splx(s);
1405				return (struct buf *) NULL;
1406			}
1407		}
1408		bp->b_flags |= B_BUSY | B_CACHE;
1409		bremfree(bp);
1410
1411		/*
1412		 * check for size inconsistancies (note that they shouldn't
1413		 * happen but do when filesystems don't handle the size changes
1414		 * correctly.) We are conservative on metadata and don't just
1415		 * extend the buffer but write and re-constitute it.
1416		 */
1417
1418		if (bp->b_bcount != size) {
1419			bp->b_generation++;
1420			if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
1421				allocbuf(bp, size);
1422			} else {
1423				bp->b_flags |= B_NOCACHE;
1424				VOP_BWRITE(bp);
1425				goto loop;
1426			}
1427		}
1428
1429		if (bp->b_usecount < BUF_MAXUSE)
1430			++bp->b_usecount;
1431		splx(s);
1432		return (bp);
1433	} else {
1434		vm_object_t obj;
1435
1436		if ((bp = getnewbuf(vp, blkno,
1437			slpflag, slptimeo, size, maxsize)) == 0) {
1438			if (slpflag || slptimeo) {
1439				splx(s);
1440				return NULL;
1441			}
1442			goto loop;
1443		}
1444
1445		/*
1446		 * This code is used to make sure that a buffer is not
1447		 * created while the getnewbuf routine is blocked.
1448		 * Normally the vnode is locked so this isn't a problem.
1449		 * VBLK type I/O requests, however, don't lock the vnode.
1450		 */
1451		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
1452			bp->b_flags |= B_INVAL;
1453			brelse(bp);
1454			goto loop;
1455		}
1456
1457		/*
1458		 * Insert the buffer into the hash, so that it can
1459		 * be found by incore.
1460		 */
1461		bp->b_blkno = bp->b_lblkno = blkno;
1462		bgetvp(vp, bp);
1463		LIST_REMOVE(bp, b_hash);
1464		bh = BUFHASH(vp, blkno);
1465		LIST_INSERT_HEAD(bh, bp, b_hash);
1466
1467		if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) {
1468			bp->b_flags |= (B_VMIO | B_CACHE);
1469#if defined(VFS_BIO_DEBUG)
1470			if (vp->v_type != VREG && vp->v_type != VBLK)
1471				printf("getblk: vmioing file type %d???\n", vp->v_type);
1472#endif
1473		} else {
1474			bp->b_flags &= ~B_VMIO;
1475		}
1476		splx(s);
1477
1478		allocbuf(bp, size);
1479#ifdef	PC98
1480		/*
1481		 * 1024byte/sector support
1482		 */
1483#define B_XXX2 0x8000000
1484		if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2;
1485#endif
1486		return (bp);
1487	}
1488}
1489
1490/*
1491 * Get an empty, disassociated buffer of given size.
1492 */
1493struct buf *
1494geteblk(int size)
1495{
1496	struct buf *bp;
1497	int s;
1498
1499	s = splbio();
1500	while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0);
1501	splx(s);
1502	allocbuf(bp, size);
1503	bp->b_flags |= B_INVAL;
1504	return (bp);
1505}
1506
1507
1508/*
1509 * This code constitutes the buffer memory from either anonymous system
1510 * memory (in the case of non-VMIO operations) or from an associated
1511 * VM object (in the case of VMIO operations).
1512 *
1513 * Note that this code is tricky, and has many complications to resolve
1514 * deadlock or inconsistant data situations.  Tread lightly!!!
1515 *
1516 * Modify the length of a buffer's underlying buffer storage without
1517 * destroying information (unless, of course the buffer is shrinking).
1518 */
1519int
1520allocbuf(struct buf * bp, int size)
1521{
1522
1523	int s;
1524	int newbsize, mbsize;
1525	int i;
1526
1527#if !defined(MAX_PERF)
1528	if (!(bp->b_flags & B_BUSY))
1529		panic("allocbuf: buffer not busy");
1530
1531	if (bp->b_kvasize < size)
1532		panic("allocbuf: buffer too small");
1533#endif
1534
1535	if ((bp->b_flags & B_VMIO) == 0) {
1536		caddr_t origbuf;
1537		int origbufsize;
1538		/*
1539		 * Just get anonymous memory from the kernel
1540		 */
1541		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1542#if !defined(NO_B_MALLOC)
1543		if (bp->b_flags & B_MALLOC)
1544			newbsize = mbsize;
1545		else
1546#endif
1547			newbsize = round_page(size);
1548
1549		if (newbsize < bp->b_bufsize) {
1550#if !defined(NO_B_MALLOC)
1551			/*
1552			 * malloced buffers are not shrunk
1553			 */
1554			if (bp->b_flags & B_MALLOC) {
1555				if (newbsize) {
1556					bp->b_bcount = size;
1557				} else {
1558					free(bp->b_data, M_BIOBUF);
1559					bufspace -= bp->b_bufsize;
1560					bufmallocspace -= bp->b_bufsize;
1561					bp->b_data = bp->b_kvabase;
1562					bp->b_bufsize = 0;
1563					bp->b_bcount = 0;
1564					bp->b_flags &= ~B_MALLOC;
1565				}
1566				return 1;
1567			}
1568#endif
1569			vm_hold_free_pages(
1570			    bp,
1571			    (vm_offset_t) bp->b_data + newbsize,
1572			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1573		} else if (newbsize > bp->b_bufsize) {
1574#if !defined(NO_B_MALLOC)
1575			/*
1576			 * We only use malloced memory on the first allocation.
1577			 * and revert to page-allocated memory when the buffer grows.
1578			 */
1579			if ( (bufmallocspace < maxbufmallocspace) &&
1580				(bp->b_bufsize == 0) &&
1581				(mbsize <= PAGE_SIZE/2)) {
1582
1583				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
1584				bp->b_bufsize = mbsize;
1585				bp->b_bcount = size;
1586				bp->b_flags |= B_MALLOC;
1587				bufspace += mbsize;
1588				bufmallocspace += mbsize;
1589				return 1;
1590			}
1591#endif
1592			origbuf = NULL;
1593			origbufsize = 0;
1594#if !defined(NO_B_MALLOC)
1595			/*
1596			 * If the buffer is growing on it's other-than-first allocation,
1597			 * then we revert to the page-allocation scheme.
1598			 */
1599			if (bp->b_flags & B_MALLOC) {
1600				origbuf = bp->b_data;
1601				origbufsize = bp->b_bufsize;
1602				bp->b_data = bp->b_kvabase;
1603				bufspace -= bp->b_bufsize;
1604				bufmallocspace -= bp->b_bufsize;
1605				bp->b_bufsize = 0;
1606				bp->b_flags &= ~B_MALLOC;
1607				newbsize = round_page(newbsize);
1608			}
1609#endif
1610			vm_hold_load_pages(
1611			    bp,
1612			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1613			    (vm_offset_t) bp->b_data + newbsize);
1614#if !defined(NO_B_MALLOC)
1615			if (origbuf) {
1616				bcopy(origbuf, bp->b_data, origbufsize);
1617				free(origbuf, M_BIOBUF);
1618			}
1619#endif
1620		}
1621	} else {
1622		vm_page_t m;
1623		int desiredpages;
1624
1625		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1626		desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
1627
1628#if !defined(NO_B_MALLOC)
1629		if (bp->b_flags & B_MALLOC)
1630			panic("allocbuf: VMIO buffer can't be malloced");
1631#endif
1632
1633		if (newbsize < bp->b_bufsize) {
1634			if (desiredpages < bp->b_npages) {
1635				for (i = desiredpages; i < bp->b_npages; i++) {
1636					/*
1637					 * the page is not freed here -- it
1638					 * is the responsibility of vnode_pager_setsize
1639					 */
1640					m = bp->b_pages[i];
1641#if defined(DIAGNOSTIC)
1642					if (m == bogus_page)
1643						panic("allocbuf: bogus page found");
1644#endif
1645					s = splvm();
1646					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1647						m->flags |= PG_WANTED;
1648						tsleep(m, PVM, "biodep", 0);
1649					}
1650					splx(s);
1651
1652					bp->b_pages[i] = NULL;
1653					vm_page_unwire(m);
1654				}
1655				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1656				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
1657				bp->b_npages = desiredpages;
1658			}
1659		} else if (newbsize > bp->b_bufsize) {
1660			vm_object_t obj;
1661			vm_offset_t tinc, toff;
1662			vm_ooffset_t off;
1663			vm_pindex_t objoff;
1664			int pageindex, curbpnpages;
1665			struct vnode *vp;
1666			int bsize;
1667
1668			vp = bp->b_vp;
1669
1670			if (vp->v_type == VBLK)
1671				bsize = DEV_BSIZE;
1672			else
1673				bsize = vp->v_mount->mnt_stat.f_iosize;
1674
1675			if (bp->b_npages < desiredpages) {
1676				obj = vp->v_object;
1677				tinc = PAGE_SIZE;
1678				if (tinc > bsize)
1679					tinc = bsize;
1680				off = (vm_ooffset_t) bp->b_lblkno * bsize;
1681				curbpnpages = bp->b_npages;
1682		doretry:
1683				bp->b_flags |= B_CACHE;
1684				bp->b_validoff = bp->b_validend = 0;
1685				for (toff = 0; toff < newbsize; toff += tinc) {
1686					int bytesinpage;
1687
1688					pageindex = toff >> PAGE_SHIFT;
1689					objoff = OFF_TO_IDX(off + toff);
1690					if (pageindex < curbpnpages) {
1691
1692						m = bp->b_pages[pageindex];
1693#ifdef VFS_BIO_DIAG
1694						if (m->pindex != objoff)
1695							panic("allocbuf: page changed offset??!!!?");
1696#endif
1697						bytesinpage = tinc;
1698						if (tinc > (newbsize - toff))
1699							bytesinpage = newbsize - toff;
1700						if (bp->b_flags & B_CACHE)
1701							vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1702						continue;
1703					}
1704					m = vm_page_lookup(obj, objoff);
1705					if (!m) {
1706						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1707						if (!m) {
1708							VM_WAIT;
1709							vm_pageout_deficit += (desiredpages - bp->b_npages);
1710							goto doretry;
1711						}
1712						/*
1713						 * Normally it is unwise to clear PG_BUSY without
1714						 * PAGE_WAKEUP -- but it is okay here, as there is
1715						 * no chance for blocking between here and vm_page_alloc
1716						 */
1717						m->flags &= ~PG_BUSY;
1718						vm_page_wire(m);
1719						bp->b_flags &= ~B_CACHE;
1720					} else if (m->flags & PG_BUSY) {
1721						s = splvm();
1722						if (m->flags & PG_BUSY) {
1723							m->flags |= PG_WANTED;
1724							tsleep(m, PVM, "pgtblk", 0);
1725						}
1726						splx(s);
1727						goto doretry;
1728					} else {
1729						if ((curproc != pageproc) &&
1730							((m->queue - m->pc) == PQ_CACHE) &&
1731						    ((cnt.v_free_count + cnt.v_cache_count) <
1732								(cnt.v_free_min + cnt.v_cache_min))) {
1733							pagedaemon_wakeup();
1734						}
1735						bytesinpage = tinc;
1736						if (tinc > (newbsize - toff))
1737							bytesinpage = newbsize - toff;
1738						if (bp->b_flags & B_CACHE)
1739							vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1740						vm_page_wire(m);
1741					}
1742					bp->b_pages[pageindex] = m;
1743					curbpnpages = pageindex + 1;
1744				}
1745				if (vp->v_tag == VT_NFS &&
1746				    vp->v_type != VBLK) {
1747					if (bp->b_dirtyend > 0) {
1748						bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
1749						bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
1750					}
1751					if (bp->b_validend == 0)
1752						bp->b_flags &= ~B_CACHE;
1753				}
1754				bp->b_data = (caddr_t) trunc_page(bp->b_data);
1755				bp->b_npages = curbpnpages;
1756				pmap_qenter((vm_offset_t) bp->b_data,
1757					bp->b_pages, bp->b_npages);
1758				((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
1759			}
1760		}
1761	}
1762	if (bp->b_flags & B_VMIO)
1763		vmiospace += (newbsize - bp->b_bufsize);
1764	bufspace += (newbsize - bp->b_bufsize);
1765	bp->b_bufsize = newbsize;
1766	bp->b_bcount = size;
1767	return 1;
1768}
1769
1770/*
1771 * Wait for buffer I/O completion, returning error status.
1772 */
1773int
1774biowait(register struct buf * bp)
1775{
1776	int s;
1777
1778	s = splbio();
1779	while ((bp->b_flags & B_DONE) == 0)
1780#if defined(NO_SCHEDULE_MODS)
1781		tsleep(bp, PRIBIO, "biowait", 0);
1782#else
1783		if (bp->b_flags & B_READ)
1784			tsleep(bp, PRIBIO, "biord", 0);
1785		else
1786			tsleep(bp, curproc->p_usrpri, "biowr", 0);
1787#endif
1788	splx(s);
1789	if (bp->b_flags & B_EINTR) {
1790		bp->b_flags &= ~B_EINTR;
1791		return (EINTR);
1792	}
1793	if (bp->b_flags & B_ERROR) {
1794		return (bp->b_error ? bp->b_error : EIO);
1795	} else {
1796		return (0);
1797	}
1798}
1799
1800/*
1801 * Finish I/O on a buffer, calling an optional function.
1802 * This is usually called from interrupt level, so process blocking
1803 * is not *a good idea*.
1804 */
1805void
1806biodone(register struct buf * bp)
1807{
1808	int s;
1809
1810	s = splbio();
1811
1812#if !defined(MAX_PERF)
1813	if (!(bp->b_flags & B_BUSY))
1814		panic("biodone: buffer not busy");
1815#endif
1816
1817	if (bp->b_flags & B_DONE) {
1818		splx(s);
1819#if !defined(MAX_PERF)
1820		printf("biodone: buffer already done\n");
1821#endif
1822		return;
1823	}
1824	bp->b_flags |= B_DONE;
1825
1826	if ((bp->b_flags & B_READ) == 0) {
1827		vwakeup(bp);
1828	}
1829#ifdef BOUNCE_BUFFERS
1830	if (bp->b_flags & B_BOUNCE)
1831		vm_bounce_free(bp);
1832#endif
1833
1834	/* call optional completion function if requested */
1835	if (bp->b_flags & B_CALL) {
1836		bp->b_flags &= ~B_CALL;
1837		(*bp->b_iodone) (bp);
1838		splx(s);
1839		return;
1840	}
1841	if (bp->b_flags & B_VMIO) {
1842		int i, resid;
1843		vm_ooffset_t foff;
1844		vm_page_t m;
1845		vm_object_t obj;
1846		int iosize;
1847		struct vnode *vp = bp->b_vp;
1848
1849		obj = vp->v_object;
1850
1851#if defined(VFS_BIO_DEBUG)
1852		if (vp->v_usecount == 0) {
1853			panic("biodone: zero vnode ref count");
1854		}
1855
1856		if (vp->v_object == NULL) {
1857			panic("biodone: missing VM object");
1858		}
1859
1860		if ((vp->v_flag & VOBJBUF) == 0) {
1861			panic("biodone: vnode is not setup for merged cache");
1862		}
1863#endif
1864
1865		if (vp->v_type == VBLK)
1866			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1867		else
1868			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1869#if !defined(MAX_PERF)
1870		if (!obj) {
1871			panic("biodone: no object");
1872		}
1873#endif
1874#if defined(VFS_BIO_DEBUG)
1875		if (obj->paging_in_progress < bp->b_npages) {
1876			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1877			    obj->paging_in_progress, bp->b_npages);
1878		}
1879#endif
1880		iosize = bp->b_bufsize;
1881		for (i = 0; i < bp->b_npages; i++) {
1882			int bogusflag = 0;
1883			m = bp->b_pages[i];
1884			if (m == bogus_page) {
1885				bogusflag = 1;
1886				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
1887				if (!m) {
1888#if defined(VFS_BIO_DEBUG)
1889					printf("biodone: page disappeared\n");
1890#endif
1891					--obj->paging_in_progress;
1892					continue;
1893				}
1894				bp->b_pages[i] = m;
1895				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1896			}
1897#if defined(VFS_BIO_DEBUG)
1898			if (OFF_TO_IDX(foff) != m->pindex) {
1899				printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1900			}
1901#endif
1902			resid = IDX_TO_OFF(m->pindex + 1) - foff;
1903			if (resid > iosize)
1904				resid = iosize;
1905			/*
1906			 * In the write case, the valid and clean bits are
1907			 * already changed correctly, so we only need to do this
1908			 * here in the read case.
1909			 */
1910			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1911				vfs_page_set_valid(bp, foff, i, m);
1912			}
1913
1914			/*
1915			 * when debugging new filesystems or buffer I/O methods, this
1916			 * is the most common error that pops up.  if you see this, you
1917			 * have not set the page busy flag correctly!!!
1918			 */
1919			if (m->busy == 0) {
1920#if !defined(MAX_PERF)
1921				printf("biodone: page busy < 0, "
1922				    "pindex: %d, foff: 0x(%x,%x), "
1923				    "resid: %d, index: %d\n",
1924				    (int) m->pindex, (int)(foff >> 32),
1925						(int) foff & 0xffffffff, resid, i);
1926#endif
1927				if (vp->v_type != VBLK)
1928#if !defined(MAX_PERF)
1929					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
1930					    bp->b_vp->v_mount->mnt_stat.f_iosize,
1931					    (int) bp->b_lblkno,
1932					    bp->b_flags, bp->b_npages);
1933				else
1934					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1935					    (int) bp->b_lblkno,
1936					    bp->b_flags, bp->b_npages);
1937				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1938				    m->valid, m->dirty, m->wire_count);
1939#endif
1940				panic("biodone: page busy < 0\n");
1941			}
1942			--m->busy;
1943			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1944				m->flags &= ~PG_WANTED;
1945				wakeup(m);
1946			}
1947			--obj->paging_in_progress;
1948			foff += resid;
1949			iosize -= resid;
1950		}
1951		if (obj && obj->paging_in_progress == 0 &&
1952		    (obj->flags & OBJ_PIPWNT)) {
1953			obj->flags &= ~OBJ_PIPWNT;
1954			wakeup(obj);
1955		}
1956	}
1957	/*
1958	 * For asynchronous completions, release the buffer now. The brelse
1959	 * checks for B_WANTED and will do the wakeup there if necessary - so
1960	 * no need to do a wakeup here in the async case.
1961	 */
1962
1963	if (bp->b_flags & B_ASYNC) {
1964		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
1965			brelse(bp);
1966		else
1967			bqrelse(bp);
1968	} else {
1969		bp->b_flags &= ~B_WANTED;
1970		wakeup(bp);
1971	}
1972	splx(s);
1973}
1974
1975int
1976count_lock_queue()
1977{
1978	int count;
1979	struct buf *bp;
1980
1981	count = 0;
1982	for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]);
1983	    bp != NULL;
1984	    bp = TAILQ_NEXT(bp, b_freelist))
1985		count++;
1986	return (count);
1987}
1988
1989int vfs_update_interval = 30;
1990
1991static void
1992vfs_update()
1993{
1994	while (1) {
1995		tsleep(&vfs_update_wakeup, PUSER, "update",
1996		    hz * vfs_update_interval);
1997		vfs_update_wakeup = 0;
1998		sync(curproc, NULL);
1999	}
2000}
2001
2002static int
2003sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
2004{
2005	int error = sysctl_handle_int(oidp,
2006		oidp->oid_arg1, oidp->oid_arg2, req);
2007	if (!error)
2008		wakeup(&vfs_update_wakeup);
2009	return error;
2010}
2011
2012SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
2013	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
2014
2015
2016/*
2017 * This routine is called in lieu of iodone in the case of
2018 * incomplete I/O.  This keeps the busy status for pages
2019 * consistant.
2020 */
2021void
2022vfs_unbusy_pages(struct buf * bp)
2023{
2024	int i;
2025
2026	if (bp->b_flags & B_VMIO) {
2027		struct vnode *vp = bp->b_vp;
2028		vm_object_t obj = vp->v_object;
2029		vm_ooffset_t foff;
2030
2031		foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2032
2033		for (i = 0; i < bp->b_npages; i++) {
2034			vm_page_t m = bp->b_pages[i];
2035
2036			if (m == bogus_page) {
2037				m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
2038#if !defined(MAX_PERF)
2039				if (!m) {
2040					panic("vfs_unbusy_pages: page missing\n");
2041				}
2042#endif
2043				bp->b_pages[i] = m;
2044				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
2045			}
2046			--obj->paging_in_progress;
2047			--m->busy;
2048			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
2049				m->flags &= ~PG_WANTED;
2050				wakeup(m);
2051			}
2052		}
2053		if (obj->paging_in_progress == 0 &&
2054		    (obj->flags & OBJ_PIPWNT)) {
2055			obj->flags &= ~OBJ_PIPWNT;
2056			wakeup(obj);
2057		}
2058	}
2059}
2060
2061/*
2062 * Set NFS' b_validoff and b_validend fields from the valid bits
2063 * of a page.  If the consumer is not NFS, and the page is not
2064 * valid for the entire range, clear the B_CACHE flag to force
2065 * the consumer to re-read the page.
2066 */
2067static void
2068vfs_buf_set_valid(struct buf *bp,
2069		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
2070		  vm_page_t m)
2071{
2072	if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) {
2073		vm_offset_t svalid, evalid;
2074		int validbits = m->valid;
2075
2076		/*
2077		 * This only bothers with the first valid range in the
2078		 * page.
2079		 */
2080		svalid = off;
2081		while (validbits && !(validbits & 1)) {
2082			svalid += DEV_BSIZE;
2083			validbits >>= 1;
2084		}
2085		evalid = svalid;
2086		while (validbits & 1) {
2087			evalid += DEV_BSIZE;
2088			validbits >>= 1;
2089		}
2090		/*
2091		 * Make sure this range is contiguous with the range
2092		 * built up from previous pages.  If not, then we will
2093		 * just use the range from the previous pages.
2094		 */
2095		if (svalid == bp->b_validend) {
2096			bp->b_validoff = min(bp->b_validoff, svalid);
2097			bp->b_validend = max(bp->b_validend, evalid);
2098		}
2099	} else if (!vm_page_is_valid(m,
2100				     (vm_offset_t) ((foff + off) & PAGE_MASK),
2101				     size)) {
2102		bp->b_flags &= ~B_CACHE;
2103	}
2104}
2105
2106/*
2107 * Set the valid bits in a page, taking care of the b_validoff,
2108 * b_validend fields which NFS uses to optimise small reads.  Off is
2109 * the offset within the file and pageno is the page index within the buf.
2110 */
2111static void
2112vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2113{
2114	struct vnode *vp = bp->b_vp;
2115	vm_ooffset_t soff, eoff;
2116
2117	soff = off;
2118	eoff = off + min(PAGE_SIZE, bp->b_bufsize);
2119	vm_page_set_invalid(m,
2120			    (vm_offset_t) (soff & PAGE_MASK),
2121			    (vm_offset_t) (eoff - soff));
2122	if (vp->v_tag == VT_NFS && vp->v_type != VBLK) {
2123		vm_ooffset_t sv, ev;
2124		off = off - pageno * PAGE_SIZE;
2125		sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
2126		ev = off + (bp->b_validend & ~(DEV_BSIZE - 1));
2127		soff = max(sv, soff);
2128		eoff = min(ev, eoff);
2129	}
2130	if (eoff > soff)
2131		vm_page_set_validclean(m,
2132				       (vm_offset_t) (soff & PAGE_MASK),
2133				       (vm_offset_t) (eoff - soff));
2134}
2135
2136/*
2137 * This routine is called before a device strategy routine.
2138 * It is used to tell the VM system that paging I/O is in
2139 * progress, and treat the pages associated with the buffer
2140 * almost as being PG_BUSY.  Also the object paging_in_progress
2141 * flag is handled to make sure that the object doesn't become
2142 * inconsistant.
2143 */
2144void
2145vfs_busy_pages(struct buf * bp, int clear_modify)
2146{
2147	int i,s;
2148
2149	if (bp->b_flags & B_VMIO) {
2150		struct vnode *vp = bp->b_vp;
2151		vm_object_t obj = vp->v_object;
2152		vm_ooffset_t foff;
2153
2154		if (vp->v_type == VBLK)
2155			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
2156		else
2157			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2158
2159		vfs_setdirty(bp);
2160
2161retry:
2162		for (i = 0; i < bp->b_npages; i++) {
2163			vm_page_t m = bp->b_pages[i];
2164
2165			if (m && (m->flags & PG_BUSY)) {
2166				s = splvm();
2167				while (m->flags & PG_BUSY) {
2168					m->flags |= PG_WANTED;
2169					tsleep(m, PVM, "vbpage", 0);
2170				}
2171				splx(s);
2172				goto retry;
2173			}
2174		}
2175
2176		for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2177			vm_page_t m = bp->b_pages[i];
2178
2179			if ((bp->b_flags & B_CLUSTER) == 0) {
2180				obj->paging_in_progress++;
2181				m->busy++;
2182			}
2183
2184			vm_page_protect(m, VM_PROT_NONE);
2185			if (clear_modify)
2186				vfs_page_set_valid(bp, foff, i, m);
2187			else if (bp->b_bcount >= PAGE_SIZE) {
2188				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
2189					bp->b_pages[i] = bogus_page;
2190					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
2191				}
2192			}
2193		}
2194	}
2195}
2196
2197/*
2198 * Tell the VM system that the pages associated with this buffer
2199 * are clean.  This is used for delayed writes where the data is
2200 * going to go to disk eventually without additional VM intevention.
2201 */
2202void
2203vfs_clean_pages(struct buf * bp)
2204{
2205	int i;
2206
2207	if (bp->b_flags & B_VMIO) {
2208		struct vnode *vp = bp->b_vp;
2209		vm_ooffset_t foff;
2210
2211		if (vp->v_type == VBLK)
2212			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
2213		else
2214			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2215		for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2216			vm_page_t m = bp->b_pages[i];
2217
2218			vfs_page_set_valid(bp, foff, i, m);
2219		}
2220	}
2221}
2222
2223void
2224vfs_bio_clrbuf(struct buf *bp) {
2225	int i;
2226	if( bp->b_flags & B_VMIO) {
2227		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
2228			int mask;
2229			mask = 0;
2230			for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
2231				mask |= (1 << (i/DEV_BSIZE));
2232			if( bp->b_pages[0]->valid != mask) {
2233				bzero(bp->b_data, bp->b_bufsize);
2234			}
2235			bp->b_pages[0]->valid = mask;
2236			bp->b_resid = 0;
2237			return;
2238		}
2239		for(i=0;i<bp->b_npages;i++) {
2240			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
2241				continue;
2242			if( bp->b_pages[i]->valid == 0) {
2243				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2244					bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
2245				}
2246			} else {
2247				int j;
2248				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
2249					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
2250						bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
2251				}
2252			}
2253			/* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
2254		}
2255		bp->b_resid = 0;
2256	} else {
2257		clrbuf(bp);
2258	}
2259}
2260
2261/*
2262 * vm_hold_load_pages and vm_hold_unload pages get pages into
2263 * a buffers address space.  The pages are anonymous and are
2264 * not associated with a file object.
2265 */
2266void
2267vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2268{
2269	vm_offset_t pg;
2270	vm_page_t p;
2271	int index;
2272
2273	to = round_page(to);
2274	from = round_page(from);
2275	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
2276
2277	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2278
2279tryagain:
2280
2281		p = vm_page_alloc(kernel_object,
2282			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
2283		    VM_ALLOC_NORMAL);
2284		if (!p) {
2285			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
2286			VM_WAIT;
2287			goto tryagain;
2288		}
2289		vm_page_wire(p);
2290		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
2291		bp->b_pages[index] = p;
2292		PAGE_WAKEUP(p);
2293	}
2294	bp->b_npages = index;
2295}
2296
2297void
2298vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2299{
2300	vm_offset_t pg;
2301	vm_page_t p;
2302	int index, newnpages;
2303
2304	from = round_page(from);
2305	to = round_page(to);
2306	newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
2307
2308	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2309		p = bp->b_pages[index];
2310		if (p && (index < bp->b_npages)) {
2311#if !defined(MAX_PERF)
2312			if (p->busy) {
2313				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2314					bp->b_blkno, bp->b_lblkno);
2315			}
2316#endif
2317			bp->b_pages[index] = NULL;
2318			pmap_kremove(pg);
2319			p->flags |= PG_BUSY;
2320			vm_page_unwire(p);
2321			vm_page_free(p);
2322		}
2323	}
2324	bp->b_npages = newnpages;
2325}
2326
2327
2328#include "opt_ddb.h"
2329#ifdef DDB
2330#include <ddb/ddb.h>
2331
2332DB_SHOW_COMMAND(buffer, db_show_buffer)
2333{
2334	/* get args */
2335	struct buf *bp = (struct buf *)addr;
2336
2337	if (!have_addr) {
2338		db_printf("usage: show buffer <addr>\n");
2339		return;
2340	}
2341
2342	db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc,
2343		  bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered"
2344		  "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape"
2345		  "\25read\24raw\23phys\22clusterok\21malloc\20nocache"
2346		  "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty"
2347		  "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age");
2348	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
2349		  "b_resid = %ld\nb_dev = 0x%x, b_data = %p, "
2350		  "b_blkno = %d, b_pblkno = %d\n",
2351		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
2352		  bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno);
2353	if (bp->b_npages) {
2354		int i;
2355		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
2356		for (i = 0; i < bp->b_npages; i++) {
2357			vm_page_t m;
2358			m = bp->b_pages[i];
2359			db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex,
2360				VM_PAGE_TO_PHYS(m));
2361			if ((i + 1) < bp->b_npages)
2362				db_printf(",");
2363		}
2364		db_printf("\n");
2365	}
2366}
2367#endif /* DDB */
2368