vfs_bio.c revision 48225
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 *		John S. Dyson.
13 *
14 * $Id: vfs_bio.c,v 1.215 1999/06/22 01:39:53 mckusick Exp $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme.  Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *
23 * Author:  John S. Dyson
24 * Significant help during the development and debugging phases
25 * had been provided by David Greenman, also of the FreeBSD core team.
26 *
27 * see man buf(9) for more info.
28 */
29
30#define VMIO
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/sysproto.h>
34#include <sys/kernel.h>
35#include <sys/sysctl.h>
36#include <sys/proc.h>
37#include <sys/vnode.h>
38#include <sys/vmmeter.h>
39#include <sys/lock.h>
40#include <miscfs/specfs/specdev.h>
41#include <vm/vm.h>
42#include <vm/vm_param.h>
43#include <vm/vm_prot.h>
44#include <vm/vm_kern.h>
45#include <vm/vm_pageout.h>
46#include <vm/vm_page.h>
47#include <vm/vm_object.h>
48#include <vm/vm_extern.h>
49#include <vm/vm_map.h>
50#include <sys/buf.h>
51#include <sys/mount.h>
52#include <sys/malloc.h>
53#include <sys/resourcevar.h>
54
55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
56
57struct	bio_ops bioops;		/* I/O operation notification */
58
59#if 0 	/* replaced bu sched_sync */
60static void vfs_update __P((void));
61static struct	proc *updateproc;
62static struct kproc_desc up_kp = {
63	"update",
64	vfs_update,
65	&updateproc
66};
67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
68#endif
69
70struct buf *buf;		/* buffer header pool */
71struct swqueue bswlist;
72
73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74		vm_offset_t to);
75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76		vm_offset_t to);
77static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
78			       int pageno, vm_page_t m);
79static void vfs_clean_pages(struct buf * bp);
80static void vfs_setdirty(struct buf *bp);
81static void vfs_vmio_release(struct buf *bp);
82static void flushdirtybuffers(int slpflag, int slptimeo);
83static int flushbufqueues(void);
84
85/*
86 * Internal update daemon, process 3
87 *	The variable vfs_update_wakeup allows for internal syncs.
88 */
89int vfs_update_wakeup;
90
91/*
92 * bogus page -- for I/O to/from partially complete buffers
93 * this is a temporary solution to the problem, but it is not
94 * really that bad.  it would be better to split the buffer
95 * for input in the case of buffers partially already in memory,
96 * but the code is intricate enough already.
97 */
98vm_page_t bogus_page;
99int runningbufspace;
100static vm_offset_t bogus_offset;
101
102static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
103	bufmallocspace, maxbufmallocspace, hibufspace;
104static int needsbuffer;
105static int numdirtybuffers, lodirtybuffers, hidirtybuffers;
106static int numfreebuffers, lofreebuffers, hifreebuffers;
107static int kvafreespace;
108
109SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
110	&numdirtybuffers, 0, "");
111SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW,
112	&lodirtybuffers, 0, "");
113SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
114	&hidirtybuffers, 0, "");
115SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
116	&numfreebuffers, 0, "");
117SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
118	&lofreebuffers, 0, "");
119SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
120	&hifreebuffers, 0, "");
121SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD,
122	&runningbufspace, 0, "");
123SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
124	&maxbufspace, 0, "");
125SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD,
126	&hibufspace, 0, "");
127SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
128	&bufspace, 0, "");
129SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW,
130	&maxvmiobufspace, 0, "");
131SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
132	&vmiospace, 0, "");
133SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
134	&maxbufmallocspace, 0, "");
135SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
136	&bufmallocspace, 0, "");
137SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD,
138	&kvafreespace, 0, "");
139
140static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
141struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } };
142char *buf_wmesg = BUF_WMESG;
143
144extern int vm_swap_size;
145
146#define BUF_MAXUSE		24
147
148#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
149#define VFS_BIO_NEED_RESERVED02	0x02	/* unused */
150#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
151#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
152#define VFS_BIO_NEED_KVASPACE	0x10	/* wait for buffer_map space, emerg  */
153
154/*
155 *	kvaspacewakeup:
156 *
157 *	Called when kva space is potential available for recovery or when
158 *	kva space is recovered in the buffer_map.  This function wakes up
159 *	anyone waiting for buffer_map kva space.  Even though the buffer_map
160 *	is larger then maxbufspace, this situation will typically occur
161 *	when the buffer_map gets fragmented.
162 */
163
164static __inline void
165kvaspacewakeup(void)
166{
167	/*
168	 * If someone is waiting for KVA space, wake them up.  Even
169	 * though we haven't freed the kva space yet, the waiting
170	 * process will be able to now.
171	 */
172	if (needsbuffer & VFS_BIO_NEED_KVASPACE) {
173		needsbuffer &= ~VFS_BIO_NEED_KVASPACE;
174		wakeup(&needsbuffer);
175	}
176}
177
178/*
179 *	bufspacewakeup:
180 *
181 *	Called when buffer space is potentially available for recovery or when
182 *	buffer space is recovered.  getnewbuf() will block on this flag when
183 *	it is unable to free sufficient buffer space.  Buffer space becomes
184 *	recoverable when bp's get placed back in the queues.
185 */
186
187static __inline void
188bufspacewakeup(void)
189{
190	/*
191	 * If someone is waiting for BUF space, wake them up.  Even
192	 * though we haven't freed the kva space yet, the waiting
193	 * process will be able to now.
194	 */
195	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
196		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
197		wakeup(&needsbuffer);
198	}
199}
200
201/*
202 *	bufcountwakeup:
203 *
204 *	Called when a buffer has been added to one of the free queues to
205 *	account for the buffer and to wakeup anyone waiting for free buffers.
206 *	This typically occurs when large amounts of metadata are being handled
207 *	by the buffer cache ( else buffer space runs out first, usually ).
208 */
209
210static __inline void
211bufcountwakeup(void)
212{
213	++numfreebuffers;
214	if (needsbuffer) {
215		needsbuffer &= ~VFS_BIO_NEED_ANY;
216		if (numfreebuffers >= hifreebuffers)
217			needsbuffer &= ~VFS_BIO_NEED_FREE;
218		wakeup(&needsbuffer);
219	}
220}
221
222/*
223 *	vfs_buf_test_cache:
224 *
225 *	Called when a buffer is extended.  This function clears the B_CACHE
226 *	bit if the newly extended portion of the buffer does not contain
227 *	valid data.
228 */
229static __inline__
230void
231vfs_buf_test_cache(struct buf *bp,
232		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
233		  vm_page_t m)
234{
235	if (bp->b_flags & B_CACHE) {
236		int base = (foff + off) & PAGE_MASK;
237		if (vm_page_is_valid(m, base, size) == 0)
238			bp->b_flags &= ~B_CACHE;
239	}
240}
241
242
243/*
244 * Initialize buffer headers and related structures.
245 */
246void
247bufinit()
248{
249	struct buf *bp;
250	int i;
251
252	TAILQ_INIT(&bswlist);
253	LIST_INIT(&invalhash);
254	simple_lock_init(&buftimelock);
255
256	/* first, make a null hash table */
257	for (i = 0; i < BUFHSZ; i++)
258		LIST_INIT(&bufhashtbl[i]);
259
260	/* next, make a null set of free lists */
261	for (i = 0; i < BUFFER_QUEUES; i++)
262		TAILQ_INIT(&bufqueues[i]);
263
264	/* finally, initialize each buffer header and stick on empty q */
265	for (i = 0; i < nbuf; i++) {
266		bp = &buf[i];
267		bzero(bp, sizeof *bp);
268		bp->b_flags = B_INVAL;	/* we're just an empty header */
269		bp->b_dev = NODEV;
270		bp->b_rcred = NOCRED;
271		bp->b_wcred = NOCRED;
272		bp->b_qindex = QUEUE_EMPTY;
273		bp->b_xflags = 0;
274		LIST_INIT(&bp->b_dep);
275		BUF_LOCKINIT(bp);
276		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
277		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
278	}
279
280	/*
281	 * maxbufspace is currently calculated to support all filesystem
282	 * blocks to be 8K.  If you happen to use a 16K filesystem, the size
283	 * of the buffer cache is still the same as it would be for 8K
284	 * filesystems.  This keeps the size of the buffer cache "in check"
285	 * for big block filesystems.
286	 *
287	 * maxbufspace is calculated as around 50% of the KVA available in
288	 * the buffer_map ( DFLTSIZE vs BKVASIZE ), I presume to reduce the
289	 * effect of fragmentation.
290	 */
291	maxbufspace = (nbuf + 8) * DFLTBSIZE;
292	if ((hibufspace = maxbufspace - MAXBSIZE * 5) <= MAXBSIZE)
293		hibufspace = 3 * maxbufspace / 4;
294/*
295 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
296 */
297	maxvmiobufspace = 2 * hibufspace / 3;
298/*
299 * Limit the amount of malloc memory since it is wired permanently into
300 * the kernel space.  Even though this is accounted for in the buffer
301 * allocation, we don't want the malloced region to grow uncontrolled.
302 * The malloc scheme improves memory utilization significantly on average
303 * (small) directories.
304 */
305	maxbufmallocspace = hibufspace / 20;
306
307/*
308 * Reduce the chance of a deadlock occuring by limiting the number
309 * of delayed-write dirty buffers we allow to stack up.
310 */
311	lodirtybuffers = nbuf / 16 + 10;
312	hidirtybuffers = nbuf / 8 + 20;
313	numdirtybuffers = 0;
314
315/*
316 * Try to keep the number of free buffers in the specified range,
317 * and give the syncer access to an emergency reserve.
318 */
319	lofreebuffers = nbuf / 18 + 5;
320	hifreebuffers = 2 * lofreebuffers;
321	numfreebuffers = nbuf;
322
323	kvafreespace = 0;
324
325	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
326	bogus_page = vm_page_alloc(kernel_object,
327			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
328			VM_ALLOC_NORMAL);
329
330}
331
332/*
333 * Free the kva allocation for a buffer
334 * Must be called only at splbio or higher,
335 *  as this is the only locking for buffer_map.
336 */
337static void
338bfreekva(struct buf * bp)
339{
340	if (bp->b_kvasize) {
341		vm_map_delete(buffer_map,
342		    (vm_offset_t) bp->b_kvabase,
343		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize
344		);
345		bp->b_kvasize = 0;
346		kvaspacewakeup();
347	}
348}
349
350/*
351 *	bremfree:
352 *
353 *	Remove the buffer from the appropriate free list.
354 */
355void
356bremfree(struct buf * bp)
357{
358	int s = splbio();
359	int old_qindex = bp->b_qindex;
360
361	if (bp->b_qindex != QUEUE_NONE) {
362		if (bp->b_qindex == QUEUE_EMPTY) {
363			kvafreespace -= bp->b_kvasize;
364		}
365		if (BUF_REFCNT(bp) == 1)
366			TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
367		else if (BUF_REFCNT(bp) == 0)
368			panic("bremfree: not locked");
369		else
370			/* Temporary panic to verify exclusive locking */
371			/* This panic goes away when we allow shared refs */
372			panic("bremfree: multiple refs");
373		bp->b_qindex = QUEUE_NONE;
374		runningbufspace += bp->b_bufsize;
375	} else {
376#if !defined(MAX_PERF)
377		panic("bremfree: removing a buffer when not on a queue");
378#endif
379	}
380
381	/*
382	 * Fixup numfreebuffers count.  If the buffer is invalid or not
383	 * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
384	 * the buffer was free and we must decrement numfreebuffers.
385	 */
386	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
387		switch(old_qindex) {
388		case QUEUE_EMPTY:
389		case QUEUE_LRU:
390		case QUEUE_AGE:
391			--numfreebuffers;
392			break;
393		default:
394			break;
395		}
396	}
397	splx(s);
398}
399
400
401/*
402 * Get a buffer with the specified data.  Look in the cache first.  We
403 * must clear B_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
404 * is set, the buffer is valid and we do not have to do anything ( see
405 * getblk() ).
406 */
407int
408bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
409    struct buf ** bpp)
410{
411	struct buf *bp;
412
413	bp = getblk(vp, blkno, size, 0, 0);
414	*bpp = bp;
415
416	/* if not found in cache, do some I/O */
417	if ((bp->b_flags & B_CACHE) == 0) {
418		if (curproc != NULL)
419			curproc->p_stats->p_ru.ru_inblock++;
420		KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
421		bp->b_flags |= B_READ;
422		bp->b_flags &= ~(B_ERROR | B_INVAL);
423		if (bp->b_rcred == NOCRED) {
424			if (cred != NOCRED)
425				crhold(cred);
426			bp->b_rcred = cred;
427		}
428		vfs_busy_pages(bp, 0);
429		VOP_STRATEGY(vp, bp);
430		return (biowait(bp));
431	}
432	return (0);
433}
434
435/*
436 * Operates like bread, but also starts asynchronous I/O on
437 * read-ahead blocks.  We must clear B_ERROR and B_INVAL prior
438 * to initiating I/O . If B_CACHE is set, the buffer is valid
439 * and we do not have to do anything.
440 */
441int
442breadn(struct vnode * vp, daddr_t blkno, int size,
443    daddr_t * rablkno, int *rabsize,
444    int cnt, struct ucred * cred, struct buf ** bpp)
445{
446	struct buf *bp, *rabp;
447	int i;
448	int rv = 0, readwait = 0;
449
450	*bpp = bp = getblk(vp, blkno, size, 0, 0);
451
452	/* if not found in cache, do some I/O */
453	if ((bp->b_flags & B_CACHE) == 0) {
454		if (curproc != NULL)
455			curproc->p_stats->p_ru.ru_inblock++;
456		bp->b_flags |= B_READ;
457		bp->b_flags &= ~(B_ERROR | B_INVAL);
458		if (bp->b_rcred == NOCRED) {
459			if (cred != NOCRED)
460				crhold(cred);
461			bp->b_rcred = cred;
462		}
463		vfs_busy_pages(bp, 0);
464		VOP_STRATEGY(vp, bp);
465		++readwait;
466	}
467
468	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
469		if (inmem(vp, *rablkno))
470			continue;
471		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
472
473		if ((rabp->b_flags & B_CACHE) == 0) {
474			if (curproc != NULL)
475				curproc->p_stats->p_ru.ru_inblock++;
476			rabp->b_flags |= B_READ | B_ASYNC;
477			rabp->b_flags &= ~(B_ERROR | B_INVAL);
478			if (rabp->b_rcred == NOCRED) {
479				if (cred != NOCRED)
480					crhold(cred);
481				rabp->b_rcred = cred;
482			}
483			vfs_busy_pages(rabp, 0);
484			BUF_KERNPROC(bp);
485			VOP_STRATEGY(vp, rabp);
486		} else {
487			brelse(rabp);
488		}
489	}
490
491	if (readwait) {
492		rv = biowait(bp);
493	}
494	return (rv);
495}
496
497/*
498 * Write, release buffer on completion.  (Done by iodone
499 * if async).  Do not bother writing anything if the buffer
500 * is invalid.
501 *
502 * Note that we set B_CACHE here, indicating that buffer is
503 * fully valid and thus cacheable.  This is true even of NFS
504 * now so we set it generally.  This could be set either here
505 * or in biodone() since the I/O is synchronous.  We put it
506 * here.
507 */
508int
509bwrite(struct buf * bp)
510{
511	int oldflags, s;
512	struct vnode *vp;
513	struct mount *mp;
514
515	if (bp->b_flags & B_INVAL) {
516		brelse(bp);
517		return (0);
518	}
519
520	oldflags = bp->b_flags;
521
522#if !defined(MAX_PERF)
523	if (BUF_REFCNT(bp) == 0)
524		panic("bwrite: buffer is not busy???");
525#endif
526	s = splbio();
527	bundirty(bp);
528
529	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
530	bp->b_flags |= B_WRITEINPROG | B_CACHE;
531
532	bp->b_vp->v_numoutput++;
533	vfs_busy_pages(bp, 1);
534	if (curproc != NULL)
535		curproc->p_stats->p_ru.ru_oublock++;
536	splx(s);
537	BUF_KERNPROC(bp);
538	VOP_STRATEGY(bp->b_vp, bp);
539
540	/*
541	 * Collect statistics on synchronous and asynchronous writes.
542	 * Writes to block devices are charged to their associated
543	 * filesystem (if any).
544	 */
545	if ((vp = bp->b_vp) != NULL) {
546		if (vp->v_type == VBLK)
547			mp = vp->v_specmountpoint;
548		else
549			mp = vp->v_mount;
550		if (mp != NULL) {
551			if ((oldflags & B_ASYNC) == 0)
552				mp->mnt_stat.f_syncwrites++;
553			else
554				mp->mnt_stat.f_asyncwrites++;
555		}
556	}
557
558	if ((oldflags & B_ASYNC) == 0) {
559		int rtval = biowait(bp);
560		brelse(bp);
561		return (rtval);
562	}
563
564	return (0);
565}
566
567/*
568 * Delayed write. (Buffer is marked dirty).  Do not bother writing
569 * anything if the buffer is marked invalid.
570 *
571 * Note that since the buffer must be completely valid, we can safely
572 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
573 * biodone() in order to prevent getblk from writing the buffer
574 * out synchronously.
575 */
576void
577bdwrite(struct buf * bp)
578{
579	struct vnode *vp;
580
581#if !defined(MAX_PERF)
582	if (BUF_REFCNT(bp) == 0)
583		panic("bdwrite: buffer is not busy");
584#endif
585
586	if (bp->b_flags & B_INVAL) {
587		brelse(bp);
588		return;
589	}
590	bdirty(bp);
591
592	/*
593	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
594	 * true even of NFS now.
595	 */
596	bp->b_flags |= B_CACHE;
597
598	/*
599	 * This bmap keeps the system from needing to do the bmap later,
600	 * perhaps when the system is attempting to do a sync.  Since it
601	 * is likely that the indirect block -- or whatever other datastructure
602	 * that the filesystem needs is still in memory now, it is a good
603	 * thing to do this.  Note also, that if the pageout daemon is
604	 * requesting a sync -- there might not be enough memory to do
605	 * the bmap then...  So, this is important to do.
606	 */
607	if (bp->b_lblkno == bp->b_blkno) {
608		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
609	}
610
611	/*
612	 * Set the *dirty* buffer range based upon the VM system dirty pages.
613	 */
614	vfs_setdirty(bp);
615
616	/*
617	 * We need to do this here to satisfy the vnode_pager and the
618	 * pageout daemon, so that it thinks that the pages have been
619	 * "cleaned".  Note that since the pages are in a delayed write
620	 * buffer -- the VFS layer "will" see that the pages get written
621	 * out on the next sync, or perhaps the cluster will be completed.
622	 */
623	vfs_clean_pages(bp);
624	bqrelse(bp);
625
626	/*
627	 * XXX The soft dependency code is not prepared to
628	 * have I/O done when a bdwrite is requested. For
629	 * now we just let the write be delayed if it is
630	 * requested by the soft dependency code.
631	 */
632	if ((vp = bp->b_vp) &&
633	    ((vp->v_type == VBLK && vp->v_specmountpoint &&
634		  (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) ||
635		 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))))
636		return;
637
638	if (numdirtybuffers >= hidirtybuffers)
639		flushdirtybuffers(0, 0);
640}
641
642/*
643 *	bdirty:
644 *
645 *	Turn buffer into delayed write request.  We must clear B_READ and
646 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
647 *	itself to properly update it in the dirty/clean lists.  We mark it
648 *	B_DONE to ensure that any asynchronization of the buffer properly
649 *	clears B_DONE ( else a panic will occur later ).
650 *
651 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
652 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
653 *	should only be called if the buffer is known-good.
654 *
655 *	Since the buffer is not on a queue, we do not update the numfreebuffers
656 *	count.
657 *
658 *	Must be called at splbio().
659 *	The buffer must be on QUEUE_NONE.
660 */
661void
662bdirty(bp)
663	struct buf *bp;
664{
665	KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
666	bp->b_flags &= ~(B_READ|B_RELBUF);
667
668	if ((bp->b_flags & B_DELWRI) == 0) {
669		bp->b_flags |= B_DONE | B_DELWRI;
670		reassignbuf(bp, bp->b_vp);
671		++numdirtybuffers;
672	}
673}
674
675/*
676 *	bundirty:
677 *
678 *	Clear B_DELWRI for buffer.
679 *
680 *	Since the buffer is not on a queue, we do not update the numfreebuffers
681 *	count.
682 *
683 *	Must be called at splbio().
684 *	The buffer must be on QUEUE_NONE.
685 */
686
687void
688bundirty(bp)
689	struct buf *bp;
690{
691	KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
692
693	if (bp->b_flags & B_DELWRI) {
694		bp->b_flags &= ~B_DELWRI;
695		reassignbuf(bp, bp->b_vp);
696		--numdirtybuffers;
697	}
698}
699
700/*
701 *	bawrite:
702 *
703 *	Asynchronous write.  Start output on a buffer, but do not wait for
704 *	it to complete.  The buffer is released when the output completes.
705 *
706 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
707 *	B_INVAL buffers.  Not us.
708 */
709void
710bawrite(struct buf * bp)
711{
712	bp->b_flags |= B_ASYNC;
713	(void) VOP_BWRITE(bp->b_vp, bp);
714}
715
716/*
717 *	bowrite:
718 *
719 *	Ordered write.  Start output on a buffer, and flag it so that the
720 *	device will write it in the order it was queued.  The buffer is
721 *	released when the output completes.  bwrite() ( or the VOP routine
722 *	anyway ) is responsible for handling B_INVAL buffers.
723 */
724int
725bowrite(struct buf * bp)
726{
727	bp->b_flags |= B_ORDERED | B_ASYNC;
728	return (VOP_BWRITE(bp->b_vp, bp));
729}
730
731/*
732 *	brelse:
733 *
734 *	Release a busy buffer and, if requested, free its resources.  The
735 *	buffer will be stashed in the appropriate bufqueue[] allowing it
736 *	to be accessed later as a cache entity or reused for other purposes.
737 */
738void
739brelse(struct buf * bp)
740{
741	int s;
742
743	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
744
745#if 0
746	if (bp->b_flags & B_CLUSTER) {
747		relpbuf(bp, NULL);
748		return;
749	}
750#endif
751
752	s = splbio();
753
754	if (bp->b_flags & B_LOCKED)
755		bp->b_flags &= ~B_ERROR;
756
757	if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) {
758		/*
759		 * Failed write, redirty.  Must clear B_ERROR to prevent
760		 * pages from being scrapped.  Note: B_INVAL is ignored
761		 * here but will presumably be dealt with later.
762		 */
763		bp->b_flags &= ~B_ERROR;
764		bdirty(bp);
765	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) ||
766	    (bp->b_bufsize <= 0)) {
767		/*
768		 * Either a failed I/O or we were asked to free or not
769		 * cache the buffer.
770		 */
771		bp->b_flags |= B_INVAL;
772		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
773			(*bioops.io_deallocate)(bp);
774		if (bp->b_flags & B_DELWRI)
775			--numdirtybuffers;
776		bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF);
777		if ((bp->b_flags & B_VMIO) == 0) {
778			if (bp->b_bufsize)
779				allocbuf(bp, 0);
780			if (bp->b_vp)
781				brelvp(bp);
782		}
783	}
784
785	/*
786	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
787	 * is called with B_DELWRI set, the underlying pages may wind up
788	 * getting freed causing a previous write (bdwrite()) to get 'lost'
789	 * because pages associated with a B_DELWRI bp are marked clean.
790	 *
791	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
792	 * if B_DELWRI is set.
793	 */
794
795	if (bp->b_flags & B_DELWRI)
796		bp->b_flags &= ~B_RELBUF;
797
798	/*
799	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
800	 * constituted, not even NFS buffers now.  Two flags effect this.  If
801	 * B_INVAL, the struct buf is invalidated but the VM object is kept
802	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
803	 *
804	 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be
805	 * invalidated.  B_ERROR cannot be set for a failed write unless the
806	 * buffer is also B_INVAL because it hits the re-dirtying code above.
807	 *
808	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
809	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
810	 * the commit state and we cannot afford to lose the buffer.
811	 */
812	if ((bp->b_flags & B_VMIO)
813	    && !(bp->b_vp->v_tag == VT_NFS &&
814		 bp->b_vp->v_type != VBLK &&
815		 (bp->b_flags & B_DELWRI))
816	    ) {
817
818		int i, j, resid;
819		vm_page_t m;
820		off_t foff;
821		vm_pindex_t poff;
822		vm_object_t obj;
823		struct vnode *vp;
824
825		vp = bp->b_vp;
826
827		/*
828		 * Get the base offset and length of the buffer.  Note that
829		 * for block sizes that are less then PAGE_SIZE, the b_data
830		 * base of the buffer does not represent exactly b_offset and
831		 * neither b_offset nor b_size are necessarily page aligned.
832		 * Instead, the starting position of b_offset is:
833		 *
834		 * 	b_data + (b_offset & PAGE_MASK)
835		 *
836		 * block sizes less then DEV_BSIZE (usually 512) are not
837		 * supported due to the page granularity bits (m->valid,
838		 * m->dirty, etc...).
839		 *
840		 * See man buf(9) for more information
841		 */
842
843		resid = bp->b_bufsize;
844		foff = bp->b_offset;
845
846		for (i = 0; i < bp->b_npages; i++) {
847			m = bp->b_pages[i];
848			vm_page_flag_clear(m, PG_ZERO);
849			if (m == bogus_page) {
850
851				obj = (vm_object_t) vp->v_object;
852				poff = OFF_TO_IDX(bp->b_offset);
853
854				for (j = i; j < bp->b_npages; j++) {
855					m = bp->b_pages[j];
856					if (m == bogus_page) {
857						m = vm_page_lookup(obj, poff + j);
858#if !defined(MAX_PERF)
859						if (!m) {
860							panic("brelse: page missing\n");
861						}
862#endif
863						bp->b_pages[j] = m;
864					}
865				}
866
867				if ((bp->b_flags & B_INVAL) == 0) {
868					pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
869				}
870			}
871			if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
872				int poffset = foff & PAGE_MASK;
873				int presid = resid > (PAGE_SIZE - poffset) ?
874					(PAGE_SIZE - poffset) : resid;
875
876				KASSERT(presid >= 0, ("brelse: extra page"));
877				vm_page_set_invalid(m, poffset, presid);
878			}
879			resid -= PAGE_SIZE - (foff & PAGE_MASK);
880			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
881		}
882
883		if (bp->b_flags & (B_INVAL | B_RELBUF))
884			vfs_vmio_release(bp);
885
886	} else if (bp->b_flags & B_VMIO) {
887
888		if (bp->b_flags & (B_INVAL | B_RELBUF))
889			vfs_vmio_release(bp);
890
891	}
892
893#if !defined(MAX_PERF)
894	if (bp->b_qindex != QUEUE_NONE)
895		panic("brelse: free buffer onto another queue???");
896#endif
897	if (BUF_REFCNT(bp) > 1) {
898		/* Temporary panic to verify exclusive locking */
899		/* This panic goes away when we allow shared refs */
900		panic("brelse: multiple refs");
901		/* do not release to free list */
902		BUF_UNLOCK(bp);
903		splx(s);
904		return;
905	}
906
907	/* enqueue */
908
909	/* buffers with no memory */
910	if (bp->b_bufsize == 0) {
911		bp->b_flags |= B_INVAL;
912		bp->b_qindex = QUEUE_EMPTY;
913		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
914		LIST_REMOVE(bp, b_hash);
915		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
916		bp->b_dev = NODEV;
917		kvafreespace += bp->b_kvasize;
918		if (bp->b_kvasize)
919			kvaspacewakeup();
920	/* buffers with junk contents */
921	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
922		bp->b_flags |= B_INVAL;
923		bp->b_qindex = QUEUE_AGE;
924		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
925		LIST_REMOVE(bp, b_hash);
926		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
927		bp->b_dev = NODEV;
928
929	/* buffers that are locked */
930	} else if (bp->b_flags & B_LOCKED) {
931		bp->b_qindex = QUEUE_LOCKED;
932		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
933
934	/* buffers with stale but valid contents */
935	} else if (bp->b_flags & B_AGE) {
936		bp->b_qindex = QUEUE_AGE;
937		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
938
939	/* buffers with valid and quite potentially reuseable contents */
940	} else {
941		bp->b_qindex = QUEUE_LRU;
942		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
943	}
944
945	/*
946	 * If B_INVAL, clear B_DELWRI.
947	 */
948	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) {
949		bp->b_flags &= ~B_DELWRI;
950		--numdirtybuffers;
951	}
952
953	runningbufspace -= bp->b_bufsize;
954
955	/*
956	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
957	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
958	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
959	 * if B_INVAL is set ).
960	 */
961
962	if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
963		bufcountwakeup();
964
965	/*
966	 * Something we can maybe free.
967	 */
968
969	if (bp->b_bufsize)
970		bufspacewakeup();
971
972	/* unlock */
973	BUF_UNLOCK(bp);
974	bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
975	splx(s);
976}
977
978/*
979 * Release a buffer back to the appropriate queue but do not try to free
980 * it.
981 *
982 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
983 * biodone() to requeue an async I/O on completion.  It is also used when
984 * known good buffers need to be requeued but we think we may need the data
985 * again soon.
986 */
987void
988bqrelse(struct buf * bp)
989{
990	int s;
991
992	s = splbio();
993
994	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
995
996#if !defined(MAX_PERF)
997	if (bp->b_qindex != QUEUE_NONE)
998		panic("bqrelse: free buffer onto another queue???");
999#endif
1000	if (BUF_REFCNT(bp) > 1) {
1001		/* do not release to free list */
1002		panic("bqrelse: multiple refs");
1003		BUF_UNLOCK(bp);
1004		splx(s);
1005		return;
1006	}
1007	if (bp->b_flags & B_LOCKED) {
1008		bp->b_flags &= ~B_ERROR;
1009		bp->b_qindex = QUEUE_LOCKED;
1010		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
1011		/* buffers with stale but valid contents */
1012	} else {
1013		bp->b_qindex = QUEUE_LRU;
1014		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1015	}
1016
1017	runningbufspace -= bp->b_bufsize;
1018
1019	if ((bp->b_flags & B_LOCKED) == 0 &&
1020	    ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
1021	) {
1022		bufcountwakeup();
1023	}
1024
1025	/*
1026	 * Something we can maybe wakeup
1027	 */
1028	if (bp->b_bufsize)
1029		bufspacewakeup();
1030
1031	/* unlock */
1032	BUF_UNLOCK(bp);
1033	bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1034	splx(s);
1035}
1036
1037static void
1038vfs_vmio_release(bp)
1039	struct buf *bp;
1040{
1041	int i, s;
1042	vm_page_t m;
1043
1044	s = splvm();
1045	for (i = 0; i < bp->b_npages; i++) {
1046		m = bp->b_pages[i];
1047		bp->b_pages[i] = NULL;
1048		/*
1049		 * In order to keep page LRU ordering consistent, put
1050		 * everything on the inactive queue.
1051		 */
1052		vm_page_unwire(m, 0);
1053		/*
1054		 * We don't mess with busy pages, it is
1055		 * the responsibility of the process that
1056		 * busied the pages to deal with them.
1057		 */
1058		if ((m->flags & PG_BUSY) || (m->busy != 0))
1059			continue;
1060
1061		if (m->wire_count == 0) {
1062			vm_page_flag_clear(m, PG_ZERO);
1063			/*
1064			 * Might as well free the page if we can and it has
1065			 * no valid data.
1066			 */
1067			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) {
1068				vm_page_busy(m);
1069				vm_page_protect(m, VM_PROT_NONE);
1070				vm_page_free(m);
1071			}
1072		}
1073	}
1074	bufspace -= bp->b_bufsize;
1075	vmiospace -= bp->b_bufsize;
1076	runningbufspace -= bp->b_bufsize;
1077	splx(s);
1078	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1079	if (bp->b_bufsize)
1080		bufspacewakeup();
1081	bp->b_npages = 0;
1082	bp->b_bufsize = 0;
1083	bp->b_flags &= ~B_VMIO;
1084	if (bp->b_vp)
1085		brelvp(bp);
1086}
1087
1088/*
1089 * Check to see if a block is currently memory resident.
1090 */
1091struct buf *
1092gbincore(struct vnode * vp, daddr_t blkno)
1093{
1094	struct buf *bp;
1095	struct bufhashhdr *bh;
1096
1097	bh = BUFHASH(vp, blkno);
1098	bp = bh->lh_first;
1099
1100	/* Search hash chain */
1101	while (bp != NULL) {
1102		/* hit */
1103		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
1104		    (bp->b_flags & B_INVAL) == 0) {
1105			break;
1106		}
1107		bp = bp->b_hash.le_next;
1108	}
1109	return (bp);
1110}
1111
1112/*
1113 * this routine implements clustered async writes for
1114 * clearing out B_DELWRI buffers...  This is much better
1115 * than the old way of writing only one buffer at a time.
1116 */
1117int
1118vfs_bio_awrite(struct buf * bp)
1119{
1120	int i;
1121	daddr_t lblkno = bp->b_lblkno;
1122	struct vnode *vp = bp->b_vp;
1123	int s;
1124	int ncl;
1125	struct buf *bpa;
1126	int nwritten;
1127	int size;
1128	int maxcl;
1129
1130	s = splbio();
1131	/*
1132	 * right now we support clustered writing only to regular files, and
1133	 * then only if our I/O system is not saturated.
1134	 */
1135	if ((vp->v_type == VREG) &&
1136	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1137	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1138
1139		size = vp->v_mount->mnt_stat.f_iosize;
1140		maxcl = MAXPHYS / size;
1141
1142		for (i = 1; i < maxcl; i++) {
1143			if ((bpa = gbincore(vp, lblkno + i)) &&
1144			    BUF_REFCNT(bpa) == 0 &&
1145			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1146			    (B_DELWRI | B_CLUSTEROK)) &&
1147			    (bpa->b_bufsize == size)) {
1148				if ((bpa->b_blkno == bpa->b_lblkno) ||
1149				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
1150					break;
1151			} else {
1152				break;
1153			}
1154		}
1155		ncl = i;
1156		/*
1157		 * this is a possible cluster write
1158		 */
1159		if (ncl != 1) {
1160			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
1161			splx(s);
1162			return nwritten;
1163		}
1164	}
1165
1166	BUF_LOCK(bp, LK_EXCLUSIVE);
1167	bremfree(bp);
1168	bp->b_flags |= B_ASYNC;
1169
1170	splx(s);
1171	/*
1172	 * default (old) behavior, writing out only one block
1173	 *
1174	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1175	 */
1176	nwritten = bp->b_bufsize;
1177	(void) VOP_BWRITE(bp->b_vp, bp);
1178
1179	return nwritten;
1180}
1181
1182/*
1183 *	getnewbuf:
1184 *
1185 *	Find and initialize a new buffer header, freeing up existing buffers
1186 *	in the bufqueues as necessary.  The new buffer is returned locked.
1187 *
1188 *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1189 *	buffer away, the caller must set B_INVAL prior to calling brelse().
1190 *
1191 *	We block if:
1192 *		We have insufficient buffer headers
1193 *		We have insufficient buffer space
1194 *		buffer_map is too fragmented ( space reservation fails )
1195 *
1196 *	We do *not* attempt to flush dirty buffers more then one level deep.
1197 *	I.e., if P_FLSINPROG is set we do not flush dirty buffers at all.
1198 *
1199 *	If P_FLSINPROG is set, we are allowed to dip into our emergency
1200 *	reserve.
1201 */
1202static struct buf *
1203getnewbuf(struct vnode *vp, daddr_t blkno,
1204	int slpflag, int slptimeo, int size, int maxsize)
1205{
1206	struct buf *bp;
1207	struct buf *nbp;
1208	struct buf *dbp;
1209	int outofspace;
1210	int nqindex;
1211	int defrag = 0;
1212	static int newbufcnt = 0;
1213	int lastnewbuf = newbufcnt;
1214
1215restart:
1216	/*
1217	 * Calculate whether we are out of buffer space.  This state is
1218	 * recalculated on every restart.  If we are out of space, we
1219	 * have to turn off defragmentation.  The outofspace code will
1220	 * defragment too, but the looping conditionals will be messed up
1221	 * if both outofspace and defrag are on.
1222	 */
1223
1224	dbp = NULL;
1225	outofspace = 0;
1226	if (bufspace >= hibufspace) {
1227		if ((curproc->p_flag & P_FLSINPROG) == 0 ||
1228		    bufspace >= maxbufspace
1229		) {
1230			outofspace = 1;
1231			defrag = 0;
1232		}
1233	}
1234
1235	/*
1236	 * defrag state is semi-persistant.  1 means we are flagged for
1237	 * defragging.  -1 means we actually defragged something.
1238	 */
1239	/* nop */
1240
1241	/*
1242	 * Setup for scan.  If we do not have enough free buffers,
1243	 * we setup a degenerate case that falls through the while.
1244	 *
1245	 * If we are in the middle of a flush, we can dip into the
1246	 * emergency reserve.
1247	 *
1248	 * If we are out of space, we skip trying to scan QUEUE_EMPTY
1249	 * because those buffers are, well, empty.
1250	 */
1251
1252	if ((curproc->p_flag & P_FLSINPROG) == 0 &&
1253	    numfreebuffers < lofreebuffers) {
1254		nqindex = QUEUE_LRU;
1255		nbp = NULL;
1256	} else {
1257		nqindex = QUEUE_EMPTY;
1258		if (outofspace ||
1259		    (nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY])) == NULL) {
1260			nqindex = QUEUE_AGE;
1261			nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1262			if (nbp == NULL) {
1263				nqindex = QUEUE_LRU;
1264				nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1265			}
1266		}
1267	}
1268
1269	/*
1270	 * Run scan, possibly freeing data and/or kva mappings on the fly
1271	 * depending.
1272	 */
1273
1274	while ((bp = nbp) != NULL) {
1275		int qindex = nqindex;
1276		/*
1277		 * Calculate next bp ( we can only use it if we do not block
1278		 * or do other fancy things ).
1279		 */
1280		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1281			switch(qindex) {
1282			case QUEUE_EMPTY:
1283				nqindex = QUEUE_AGE;
1284				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE])))
1285					break;
1286				/* fall through */
1287			case QUEUE_AGE:
1288				nqindex = QUEUE_LRU;
1289				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])))
1290					break;
1291				/* fall through */
1292			case QUEUE_LRU:
1293				/*
1294				 * nbp is NULL.
1295				 */
1296				break;
1297			}
1298		}
1299
1300		/*
1301		 * Sanity Checks
1302		 */
1303		KASSERT(BUF_REFCNT(bp) == 0, ("getnewbuf: busy buffer %p on free list", bp));
1304		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1305
1306		/*
1307		 * Here we try to move NON VMIO buffers to the end of the
1308		 * LRU queue in order to make VMIO buffers more readily
1309		 * freeable.  We also try to move buffers with a positive
1310		 * usecount to the end.
1311		 *
1312		 * Note that by moving the bp to the end, we setup a following
1313		 * loop.  Since we continue to decrement b_usecount this
1314		 * is ok and, in fact, desireable.
1315		 *
1316		 * If we are at the end of the list, we move ourself to the
1317		 * same place and need to fixup nbp and nqindex to handle
1318		 * the following case.
1319		 */
1320
1321		if ((qindex == QUEUE_LRU) && bp->b_usecount > 0) {
1322			if ((bp->b_flags & B_VMIO) == 0 ||
1323			    (vmiospace < maxvmiobufspace)
1324			) {
1325				--bp->b_usecount;
1326				TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1327				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1328				if (nbp == NULL) {
1329					nqindex = qindex;
1330					nbp = bp;
1331				}
1332				continue;
1333			}
1334		}
1335
1336		/*
1337		 * If we come across a delayed write and numdirtybuffers should
1338		 * be flushed, try to write it out.  Only if P_FLSINPROG is
1339		 * not set.  We can't afford to recursively stack more then
1340		 * one deep due to the possibility of having deep VFS call
1341		 * stacks.
1342		 *
1343		 * Limit the number of dirty buffers we are willing to try
1344		 * to recover since it really isn't our job here.
1345		 */
1346		if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
1347			/*
1348			 * This is rather complex, but necessary.  If we come
1349			 * across a B_DELWRI buffer we have to flush it in
1350			 * order to use it.  We only do this if we absolutely
1351			 * need to.  We must also protect against too much
1352			 * recursion which might run us out of stack due to
1353			 * deep VFS call stacks.
1354			 *
1355			 * In heavy-writing situations, QUEUE_LRU can contain
1356			 * a large number of DELWRI buffers at its head.  These
1357			 * buffers must be moved to the tail if they cannot be
1358			 * written async in order to reduce the scanning time
1359			 * required to skip past these buffers in later
1360			 * getnewbuf() calls.
1361			 */
1362			if ((curproc->p_flag & P_FLSINPROG) ||
1363			    numdirtybuffers < hidirtybuffers) {
1364				if (qindex == QUEUE_LRU) {
1365					/*
1366					 * dbp prevents us from looping forever
1367					 * if all bps in QUEUE_LRU are dirty.
1368					 */
1369					if (bp == dbp) {
1370						bp = NULL;
1371						break;
1372					}
1373					if (dbp == NULL)
1374						dbp = TAILQ_LAST(&bufqueues[QUEUE_LRU], bqueues);
1375					TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1376					TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1377				}
1378				continue;
1379			}
1380			curproc->p_flag |= P_FLSINPROG;
1381			vfs_bio_awrite(bp);
1382			curproc->p_flag &= ~P_FLSINPROG;
1383			goto restart;
1384		}
1385
1386		if (defrag > 0 && bp->b_kvasize == 0)
1387			continue;
1388		if (outofspace > 0 && bp->b_bufsize == 0)
1389			continue;
1390
1391		/*
1392		 * Start freeing the bp.  This is somewhat involved.  nbp
1393		 * remains valid only for QUEUE_EMPTY bp's.
1394		 */
1395
1396		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
1397			panic("getnewbuf: locked buf");
1398		bremfree(bp);
1399
1400		if (qindex == QUEUE_LRU || qindex == QUEUE_AGE) {
1401			if (bp->b_flags & B_VMIO) {
1402				bp->b_flags &= ~B_ASYNC;
1403				vfs_vmio_release(bp);
1404			}
1405			if (bp->b_vp)
1406				brelvp(bp);
1407		}
1408
1409		/*
1410		 * NOTE:  nbp is now entirely invalid.  We can only restart
1411		 * the scan from this point on.
1412		 *
1413		 * Get the rest of the buffer freed up.  b_kva* is still
1414		 * valid after this operation.
1415		 */
1416
1417		if (bp->b_rcred != NOCRED) {
1418			crfree(bp->b_rcred);
1419			bp->b_rcred = NOCRED;
1420		}
1421		if (bp->b_wcred != NOCRED) {
1422			crfree(bp->b_wcred);
1423			bp->b_wcred = NOCRED;
1424		}
1425		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1426			(*bioops.io_deallocate)(bp);
1427
1428		LIST_REMOVE(bp, b_hash);
1429		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1430
1431		if (bp->b_bufsize)
1432			allocbuf(bp, 0);
1433
1434		bp->b_flags = 0;
1435		bp->b_dev = NODEV;
1436		bp->b_vp = NULL;
1437		bp->b_blkno = bp->b_lblkno = 0;
1438		bp->b_offset = NOOFFSET;
1439		bp->b_iodone = 0;
1440		bp->b_error = 0;
1441		bp->b_resid = 0;
1442		bp->b_bcount = 0;
1443		bp->b_npages = 0;
1444		bp->b_dirtyoff = bp->b_dirtyend = 0;
1445		bp->b_usecount = 5;
1446
1447		LIST_INIT(&bp->b_dep);
1448
1449		/*
1450		 * Ok, now that we have a free buffer, if we are defragging
1451		 * we have to recover the kvaspace.
1452		 */
1453
1454		if (defrag > 0) {
1455			defrag = -1;
1456			bp->b_flags |= B_INVAL;
1457			bfreekva(bp);
1458			brelse(bp);
1459			goto restart;
1460		}
1461
1462		if (outofspace > 0) {
1463			outofspace = -1;
1464			bp->b_flags |= B_INVAL;
1465			bfreekva(bp);
1466			brelse(bp);
1467			goto restart;
1468		}
1469
1470		/*
1471		 * We are done
1472		 */
1473		break;
1474	}
1475
1476	/*
1477	 * If we exhausted our list, sleep as appropriate.
1478	 */
1479
1480	if (bp == NULL) {
1481		int flags;
1482
1483dosleep:
1484		if (defrag > 0)
1485			flags = VFS_BIO_NEED_KVASPACE;
1486		else if (outofspace > 0)
1487			flags = VFS_BIO_NEED_BUFSPACE;
1488		else
1489			flags = VFS_BIO_NEED_ANY;
1490
1491		(void) speedup_syncer();
1492		needsbuffer |= flags;
1493		while (needsbuffer & flags) {
1494			if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
1495			    "newbuf", slptimeo))
1496				return (NULL);
1497		}
1498	} else {
1499		/*
1500		 * We finally have a valid bp.  We aren't quite out of the
1501		 * woods, we still have to reserve kva space.
1502		 */
1503		vm_offset_t addr = 0;
1504
1505		maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
1506
1507		if (maxsize != bp->b_kvasize) {
1508			bfreekva(bp);
1509
1510			if (vm_map_findspace(buffer_map,
1511				vm_map_min(buffer_map), maxsize, &addr)
1512			) {
1513				/*
1514				 * Uh oh.  Buffer map is to fragmented.  Try
1515				 * to defragment.
1516				 */
1517				if (defrag <= 0) {
1518					defrag = 1;
1519					bp->b_flags |= B_INVAL;
1520					brelse(bp);
1521					goto restart;
1522				}
1523				/*
1524				 * Uh oh.  We couldn't seem to defragment
1525				 */
1526				bp = NULL;
1527				goto dosleep;
1528			}
1529		}
1530		if (addr) {
1531			vm_map_insert(buffer_map, NULL, 0,
1532				addr, addr + maxsize,
1533				VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1534
1535			bp->b_kvabase = (caddr_t) addr;
1536			bp->b_kvasize = maxsize;
1537		}
1538		bp->b_data = bp->b_kvabase;
1539	}
1540
1541	/*
1542	 * If we have slept at some point in this process and another
1543	 * process has managed to allocate a new buffer while we slept,
1544	 * we have to return NULL so that our caller can recheck to
1545	 * ensure that the other process did not create an identically
1546	 * identified buffer to the one we were requesting. We make this
1547	 * check by incrementing the static int newbufcnt each time we
1548	 * successfully allocate a new buffer. By saving the value of
1549	 * newbufcnt in our local lastnewbuf, we can compare newbufcnt
1550	 * with lastnewbuf to see if any other process managed to
1551	 * allocate a buffer while we were doing so ourselves.
1552	 *
1553	 * Note that bp, if valid, is locked.
1554	 */
1555	if (lastnewbuf == newbufcnt) {
1556		/*
1557		 * No buffers allocated, so we can return one if we were
1558		 * successful, or continue trying if we were not successful.
1559		 */
1560		if (bp != NULL) {
1561			newbufcnt += 1;
1562			return (bp);
1563		}
1564		goto restart;
1565	}
1566	/*
1567	 * Another process allocated a buffer since we were called, so
1568	 * we have to free the one we allocated and return NULL to let
1569	 * our caller recheck to see if a new buffer is still needed.
1570	 */
1571	if (bp != NULL) {
1572		bp->b_flags |= B_INVAL;
1573		brelse(bp);
1574	}
1575	return (NULL);
1576}
1577
1578/*
1579 *	waitfreebuffers:
1580 *
1581 *	Wait for sufficient free buffers.  This routine is not called if
1582 *	curproc is the update process so we do not have to do anything
1583 *	fancy.
1584 */
1585
1586static void
1587waitfreebuffers(int slpflag, int slptimeo)
1588{
1589	while (numfreebuffers < hifreebuffers) {
1590		flushdirtybuffers(slpflag, slptimeo);
1591		if (numfreebuffers >= hifreebuffers)
1592			break;
1593		needsbuffer |= VFS_BIO_NEED_FREE;
1594		if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo))
1595			break;
1596	}
1597}
1598
1599/*
1600 *	flushdirtybuffers:
1601 *
1602 *	This routine is called when we get too many dirty buffers.
1603 *
1604 *	We have to protect ourselves from recursion, but we also do not want
1605 *	other process's flushdirtybuffers() to interfere with the syncer if
1606 *	it decides to flushdirtybuffers().
1607 *
1608 *	In order to maximize operations, we allow any process to flush
1609 *	dirty buffers and use P_FLSINPROG to prevent recursion.
1610 */
1611
1612static void
1613flushdirtybuffers(int slpflag, int slptimeo)
1614{
1615	int s;
1616
1617	s = splbio();
1618
1619	if (curproc->p_flag & P_FLSINPROG) {
1620		splx(s);
1621		return;
1622	}
1623	curproc->p_flag |= P_FLSINPROG;
1624
1625	while (numdirtybuffers > lodirtybuffers) {
1626		if (flushbufqueues() == 0)
1627			break;
1628	}
1629
1630	curproc->p_flag &= ~P_FLSINPROG;
1631
1632	splx(s);
1633}
1634
1635static int
1636flushbufqueues(void)
1637{
1638	struct buf *bp;
1639	int qindex;
1640	int r = 0;
1641
1642	qindex = QUEUE_AGE;
1643	bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1644
1645	for (;;) {
1646		if (bp == NULL) {
1647			if (qindex == QUEUE_LRU)
1648				break;
1649			qindex = QUEUE_LRU;
1650			if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL)
1651				break;
1652		}
1653
1654		/*
1655		 * Try to free up B_INVAL delayed-write buffers rather then
1656		 * writing them out.  Note also that NFS is somewhat sensitive
1657		 * to B_INVAL buffers so it is doubly important that we do
1658		 * this.
1659		 */
1660		if ((bp->b_flags & B_DELWRI) != 0) {
1661			if (bp->b_flags & B_INVAL) {
1662				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
1663					panic("flushbufqueues: locked buf");
1664				bremfree(bp);
1665				brelse(bp);
1666			} else {
1667				vfs_bio_awrite(bp);
1668			}
1669			++r;
1670			break;
1671		}
1672		bp = TAILQ_NEXT(bp, b_freelist);
1673	}
1674	return(r);
1675}
1676
1677/*
1678 * Check to see if a block is currently memory resident.
1679 */
1680struct buf *
1681incore(struct vnode * vp, daddr_t blkno)
1682{
1683	struct buf *bp;
1684
1685	int s = splbio();
1686	bp = gbincore(vp, blkno);
1687	splx(s);
1688	return (bp);
1689}
1690
1691/*
1692 * Returns true if no I/O is needed to access the
1693 * associated VM object.  This is like incore except
1694 * it also hunts around in the VM system for the data.
1695 */
1696
1697int
1698inmem(struct vnode * vp, daddr_t blkno)
1699{
1700	vm_object_t obj;
1701	vm_offset_t toff, tinc, size;
1702	vm_page_t m;
1703	vm_ooffset_t off;
1704
1705	if (incore(vp, blkno))
1706		return 1;
1707	if (vp->v_mount == NULL)
1708		return 0;
1709	if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
1710		return 0;
1711
1712	obj = vp->v_object;
1713	size = PAGE_SIZE;
1714	if (size > vp->v_mount->mnt_stat.f_iosize)
1715		size = vp->v_mount->mnt_stat.f_iosize;
1716	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
1717
1718	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1719		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1720		if (!m)
1721			return 0;
1722		tinc = size;
1723		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
1724			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
1725		if (vm_page_is_valid(m,
1726		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
1727			return 0;
1728	}
1729	return 1;
1730}
1731
1732/*
1733 *	vfs_setdirty:
1734 *
1735 *	Sets the dirty range for a buffer based on the status of the dirty
1736 *	bits in the pages comprising the buffer.
1737 *
1738 *	The range is limited to the size of the buffer.
1739 *
1740 *	This routine is primarily used by NFS, but is generalized for the
1741 *	B_VMIO case.
1742 */
1743static void
1744vfs_setdirty(struct buf *bp)
1745{
1746	int i;
1747	vm_object_t object;
1748
1749	/*
1750	 * Degenerate case - empty buffer
1751	 */
1752
1753	if (bp->b_bufsize == 0)
1754		return;
1755
1756	/*
1757	 * We qualify the scan for modified pages on whether the
1758	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1759	 * is not cleared simply by protecting pages off.
1760	 */
1761
1762	if ((bp->b_flags & B_VMIO) == 0)
1763		return;
1764
1765	object = bp->b_pages[0]->object;
1766
1767	if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
1768		printf("Warning: object %p writeable but not mightbedirty\n", object);
1769	if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
1770		printf("Warning: object %p mightbedirty but not writeable\n", object);
1771
1772	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
1773		vm_offset_t boffset;
1774		vm_offset_t eoffset;
1775
1776		/*
1777		 * test the pages to see if they have been modified directly
1778		 * by users through the VM system.
1779		 */
1780		for (i = 0; i < bp->b_npages; i++) {
1781			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
1782			vm_page_test_dirty(bp->b_pages[i]);
1783		}
1784
1785		/*
1786		 * Calculate the encompassing dirty range, boffset and eoffset,
1787		 * (eoffset - boffset) bytes.
1788		 */
1789
1790		for (i = 0; i < bp->b_npages; i++) {
1791			if (bp->b_pages[i]->dirty)
1792				break;
1793		}
1794		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1795
1796		for (i = bp->b_npages - 1; i >= 0; --i) {
1797			if (bp->b_pages[i]->dirty) {
1798				break;
1799			}
1800		}
1801		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1802
1803		/*
1804		 * Fit it to the buffer.
1805		 */
1806
1807		if (eoffset > bp->b_bcount)
1808			eoffset = bp->b_bcount;
1809
1810		/*
1811		 * If we have a good dirty range, merge with the existing
1812		 * dirty range.
1813		 */
1814
1815		if (boffset < eoffset) {
1816			if (bp->b_dirtyoff > boffset)
1817				bp->b_dirtyoff = boffset;
1818			if (bp->b_dirtyend < eoffset)
1819				bp->b_dirtyend = eoffset;
1820		}
1821	}
1822}
1823
1824/*
1825 *	getblk:
1826 *
1827 *	Get a block given a specified block and offset into a file/device.
1828 *	The buffers B_DONE bit will be cleared on return, making it almost
1829 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
1830 *	return.  The caller should clear B_INVAL prior to initiating a
1831 *	READ.
1832 *
1833 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
1834 *	an existing buffer.
1835 *
1836 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
1837 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
1838 *	and then cleared based on the backing VM.  If the previous buffer is
1839 *	non-0-sized but invalid, B_CACHE will be cleared.
1840 *
1841 *	If getblk() must create a new buffer, the new buffer is returned with
1842 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
1843 *	case it is returned with B_INVAL clear and B_CACHE set based on the
1844 *	backing VM.
1845 *
1846 *	getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos
1847 *	B_CACHE bit is clear.
1848 *
1849 *	What this means, basically, is that the caller should use B_CACHE to
1850 *	determine whether the buffer is fully valid or not and should clear
1851 *	B_INVAL prior to issuing a read.  If the caller intends to validate
1852 *	the buffer by loading its data area with something, the caller needs
1853 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
1854 *	the caller should set B_CACHE ( as an optimization ), else the caller
1855 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
1856 *	a write attempt or if it was a successfull read.  If the caller
1857 *	intends to issue a READ, the caller must clear B_INVAL and B_ERROR
1858 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
1859 */
1860struct buf *
1861getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1862{
1863	struct buf *bp;
1864	int s;
1865	struct bufhashhdr *bh;
1866
1867#if !defined(MAX_PERF)
1868	if (size > MAXBSIZE)
1869		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1870#endif
1871
1872	s = splbio();
1873loop:
1874	/*
1875	 * Block if we are low on buffers.  The syncer is allowed more
1876	 * buffers in order to avoid a deadlock.
1877	 */
1878	if (curproc == updateproc && numfreebuffers == 0) {
1879		needsbuffer |= VFS_BIO_NEED_ANY;
1880		tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1881		    slptimeo);
1882	} else if (curproc != updateproc && numfreebuffers < lofreebuffers) {
1883		waitfreebuffers(slpflag, slptimeo);
1884	}
1885
1886	if ((bp = gbincore(vp, blkno))) {
1887		/*
1888		 * Buffer is in-core
1889		 */
1890
1891		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1892			if (bp->b_usecount < BUF_MAXUSE)
1893				++bp->b_usecount;
1894			if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
1895			    "getblk", slpflag, slptimeo) == ENOLCK)
1896				goto loop;
1897			splx(s);
1898			return (struct buf *) NULL;
1899		}
1900
1901		/*
1902		 * The buffer is locked.  B_CACHE is cleared if the buffer is
1903		 * invalid.  Ohterwise, for a non-VMIO buffer, B_CACHE is set
1904		 * and for a VMIO buffer B_CACHE is adjusted according to the
1905		 * backing VM cache.
1906		 */
1907		if (bp->b_flags & B_INVAL)
1908			bp->b_flags &= ~B_CACHE;
1909		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
1910			bp->b_flags |= B_CACHE;
1911		bremfree(bp);
1912
1913		/*
1914		 * check for size inconsistancies for non-VMIO case.
1915		 */
1916
1917		if (bp->b_bcount != size) {
1918			if ((bp->b_flags & B_VMIO) == 0 ||
1919			    (size > bp->b_kvasize)
1920			) {
1921				if (bp->b_flags & B_DELWRI) {
1922					bp->b_flags |= B_NOCACHE;
1923					VOP_BWRITE(bp->b_vp, bp);
1924				} else {
1925					if ((bp->b_flags & B_VMIO) &&
1926					   (LIST_FIRST(&bp->b_dep) == NULL)) {
1927						bp->b_flags |= B_RELBUF;
1928						brelse(bp);
1929					} else {
1930						bp->b_flags |= B_NOCACHE;
1931						VOP_BWRITE(bp->b_vp, bp);
1932					}
1933				}
1934				goto loop;
1935			}
1936		}
1937
1938		/*
1939		 * If the size is inconsistant in the VMIO case, we can resize
1940		 * the buffer.  This might lead to B_CACHE getting set or
1941		 * cleared.  If the size has not changed, B_CACHE remains
1942		 * unchanged from its previous state.
1943		 */
1944
1945		if (bp->b_bcount != size)
1946			allocbuf(bp, size);
1947
1948		KASSERT(bp->b_offset != NOOFFSET,
1949		    ("getblk: no buffer offset"));
1950
1951		/*
1952		 * A buffer with B_DELWRI set and B_CACHE clear must
1953		 * be committed before we can return the buffer in
1954		 * order to prevent the caller from issuing a read
1955		 * ( due to B_CACHE not being set ) and overwriting
1956		 * it.
1957		 *
1958		 * Most callers, including NFS and FFS, need this to
1959		 * operate properly either because they assume they
1960		 * can issue a read if B_CACHE is not set, or because
1961		 * ( for example ) an uncached B_DELWRI might loop due
1962		 * to softupdates re-dirtying the buffer.  In the latter
1963		 * case, B_CACHE is set after the first write completes,
1964		 * preventing further loops.
1965		 */
1966
1967		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
1968			VOP_BWRITE(bp->b_vp, bp);
1969			goto loop;
1970		}
1971
1972		if (bp->b_usecount < BUF_MAXUSE)
1973			++bp->b_usecount;
1974		splx(s);
1975		bp->b_flags &= ~B_DONE;
1976	} else {
1977		/*
1978		 * Buffer is not in-core, create new buffer.  The buffer
1979		 * returned by getnewbuf() is locked.  Note that the returned
1980		 * buffer is also considered valid (not marked B_INVAL).
1981		 */
1982		int bsize, maxsize, vmio;
1983		off_t offset;
1984
1985		if (vp->v_type == VBLK)
1986			bsize = DEV_BSIZE;
1987		else if (vp->v_mountedhere)
1988			bsize = vp->v_mountedhere->mnt_stat.f_iosize;
1989		else if (vp->v_mount)
1990			bsize = vp->v_mount->mnt_stat.f_iosize;
1991		else
1992			bsize = size;
1993
1994		offset = (off_t)blkno * bsize;
1995		vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
1996		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
1997		maxsize = imax(maxsize, bsize);
1998
1999		if ((bp = getnewbuf(vp, blkno,
2000			slpflag, slptimeo, size, maxsize)) == NULL) {
2001			if (slpflag || slptimeo) {
2002				splx(s);
2003				return NULL;
2004			}
2005			goto loop;
2006		}
2007
2008		/*
2009		 * This code is used to make sure that a buffer is not
2010		 * created while the getnewbuf routine is blocked.
2011		 * This can be a problem whether the vnode is locked or not.
2012		 * If the buffer is created out from under us, we have to
2013		 * throw away the one we just created.  There is now window
2014		 * race because we are safely running at splbio() from the
2015		 * point of the duplicate buffer creation through to here.
2016		 */
2017		if (gbincore(vp, blkno)) {
2018			bp->b_flags |= B_INVAL;
2019			brelse(bp);
2020			goto loop;
2021		}
2022
2023		/*
2024		 * Insert the buffer into the hash, so that it can
2025		 * be found by incore.
2026		 */
2027		bp->b_blkno = bp->b_lblkno = blkno;
2028		bp->b_offset = offset;
2029
2030		bgetvp(vp, bp);
2031		LIST_REMOVE(bp, b_hash);
2032		bh = BUFHASH(vp, blkno);
2033		LIST_INSERT_HEAD(bh, bp, b_hash);
2034
2035		/*
2036		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2037		 * buffer size starts out as 0, B_CACHE will be set by
2038		 * allocbuf() for the VMIO case prior to it testing the
2039		 * backing store for validity.
2040		 */
2041
2042		if (vmio) {
2043			bp->b_flags |= B_VMIO;
2044#if defined(VFS_BIO_DEBUG)
2045			if (vp->v_type != VREG && vp->v_type != VBLK)
2046				printf("getblk: vmioing file type %d???\n", vp->v_type);
2047#endif
2048		} else {
2049			bp->b_flags &= ~B_VMIO;
2050		}
2051
2052		allocbuf(bp, size);
2053
2054		splx(s);
2055		bp->b_flags &= ~B_DONE;
2056	}
2057	return (bp);
2058}
2059
2060/*
2061 * Get an empty, disassociated buffer of given size.  The buffer is initially
2062 * set to B_INVAL.
2063 */
2064struct buf *
2065geteblk(int size)
2066{
2067	struct buf *bp;
2068	int s;
2069
2070	s = splbio();
2071	while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0);
2072	splx(s);
2073	allocbuf(bp, size);
2074	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2075	return (bp);
2076}
2077
2078
2079/*
2080 * This code constitutes the buffer memory from either anonymous system
2081 * memory (in the case of non-VMIO operations) or from an associated
2082 * VM object (in the case of VMIO operations).  This code is able to
2083 * resize a buffer up or down.
2084 *
2085 * Note that this code is tricky, and has many complications to resolve
2086 * deadlock or inconsistant data situations.  Tread lightly!!!
2087 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2088 * the caller.  Calling this code willy nilly can result in the loss of data.
2089 *
2090 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2091 * B_CACHE for the non-VMIO case.
2092 */
2093
2094int
2095allocbuf(struct buf *bp, int size)
2096{
2097	int newbsize, mbsize;
2098	int i;
2099
2100#if !defined(MAX_PERF)
2101	if (BUF_REFCNT(bp) == 0)
2102		panic("allocbuf: buffer not busy");
2103
2104	if (bp->b_kvasize < size)
2105		panic("allocbuf: buffer too small");
2106#endif
2107
2108	if ((bp->b_flags & B_VMIO) == 0) {
2109		caddr_t origbuf;
2110		int origbufsize;
2111		/*
2112		 * Just get anonymous memory from the kernel.  Don't
2113		 * mess with B_CACHE.
2114		 */
2115		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2116#if !defined(NO_B_MALLOC)
2117		if (bp->b_flags & B_MALLOC)
2118			newbsize = mbsize;
2119		else
2120#endif
2121			newbsize = round_page(size);
2122
2123		if (newbsize < bp->b_bufsize) {
2124#if !defined(NO_B_MALLOC)
2125			/*
2126			 * malloced buffers are not shrunk
2127			 */
2128			if (bp->b_flags & B_MALLOC) {
2129				if (newbsize) {
2130					bp->b_bcount = size;
2131				} else {
2132					free(bp->b_data, M_BIOBUF);
2133					bufspace -= bp->b_bufsize;
2134					bufmallocspace -= bp->b_bufsize;
2135					runningbufspace -= bp->b_bufsize;
2136					if (bp->b_bufsize)
2137						bufspacewakeup();
2138					bp->b_data = bp->b_kvabase;
2139					bp->b_bufsize = 0;
2140					bp->b_bcount = 0;
2141					bp->b_flags &= ~B_MALLOC;
2142				}
2143				return 1;
2144			}
2145#endif
2146			vm_hold_free_pages(
2147			    bp,
2148			    (vm_offset_t) bp->b_data + newbsize,
2149			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2150		} else if (newbsize > bp->b_bufsize) {
2151#if !defined(NO_B_MALLOC)
2152			/*
2153			 * We only use malloced memory on the first allocation.
2154			 * and revert to page-allocated memory when the buffer grows.
2155			 */
2156			if ( (bufmallocspace < maxbufmallocspace) &&
2157				(bp->b_bufsize == 0) &&
2158				(mbsize <= PAGE_SIZE/2)) {
2159
2160				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2161				bp->b_bufsize = mbsize;
2162				bp->b_bcount = size;
2163				bp->b_flags |= B_MALLOC;
2164				bufspace += mbsize;
2165				bufmallocspace += mbsize;
2166				runningbufspace += bp->b_bufsize;
2167				return 1;
2168			}
2169#endif
2170			origbuf = NULL;
2171			origbufsize = 0;
2172#if !defined(NO_B_MALLOC)
2173			/*
2174			 * If the buffer is growing on its other-than-first allocation,
2175			 * then we revert to the page-allocation scheme.
2176			 */
2177			if (bp->b_flags & B_MALLOC) {
2178				origbuf = bp->b_data;
2179				origbufsize = bp->b_bufsize;
2180				bp->b_data = bp->b_kvabase;
2181				bufspace -= bp->b_bufsize;
2182				bufmallocspace -= bp->b_bufsize;
2183				runningbufspace -= bp->b_bufsize;
2184				if (bp->b_bufsize)
2185					bufspacewakeup();
2186				bp->b_bufsize = 0;
2187				bp->b_flags &= ~B_MALLOC;
2188				newbsize = round_page(newbsize);
2189			}
2190#endif
2191			vm_hold_load_pages(
2192			    bp,
2193			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2194			    (vm_offset_t) bp->b_data + newbsize);
2195#if !defined(NO_B_MALLOC)
2196			if (origbuf) {
2197				bcopy(origbuf, bp->b_data, origbufsize);
2198				free(origbuf, M_BIOBUF);
2199			}
2200#endif
2201		}
2202	} else {
2203		vm_page_t m;
2204		int desiredpages;
2205
2206		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2207		desiredpages = (size == 0) ? 0 :
2208			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2209
2210#if !defined(NO_B_MALLOC)
2211		if (bp->b_flags & B_MALLOC)
2212			panic("allocbuf: VMIO buffer can't be malloced");
2213#endif
2214		/*
2215		 * Set B_CACHE initially if buffer is 0 length or will become
2216		 * 0-length.
2217		 */
2218		if (size == 0 || bp->b_bufsize == 0)
2219			bp->b_flags |= B_CACHE;
2220
2221		if (newbsize < bp->b_bufsize) {
2222			/*
2223			 * DEV_BSIZE aligned new buffer size is less then the
2224			 * DEV_BSIZE aligned existing buffer size.  Figure out
2225			 * if we have to remove any pages.
2226			 */
2227			if (desiredpages < bp->b_npages) {
2228				for (i = desiredpages; i < bp->b_npages; i++) {
2229					/*
2230					 * the page is not freed here -- it
2231					 * is the responsibility of
2232					 * vnode_pager_setsize
2233					 */
2234					m = bp->b_pages[i];
2235					KASSERT(m != bogus_page,
2236					    ("allocbuf: bogus page found"));
2237					while (vm_page_sleep_busy(m, TRUE, "biodep"))
2238						;
2239
2240					bp->b_pages[i] = NULL;
2241					vm_page_unwire(m, 0);
2242				}
2243				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2244				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2245				bp->b_npages = desiredpages;
2246			}
2247		} else if (size > bp->b_bcount) {
2248			/*
2249			 * We are growing the buffer, possibly in a
2250			 * byte-granular fashion.
2251			 */
2252			struct vnode *vp;
2253			vm_object_t obj;
2254			vm_offset_t toff;
2255			vm_offset_t tinc;
2256
2257			/*
2258			 * Step 1, bring in the VM pages from the object,
2259			 * allocating them if necessary.  We must clear
2260			 * B_CACHE if these pages are not valid for the
2261			 * range covered by the buffer.
2262			 */
2263
2264			vp = bp->b_vp;
2265			obj = vp->v_object;
2266
2267			while (bp->b_npages < desiredpages) {
2268				vm_page_t m;
2269				vm_pindex_t pi;
2270
2271				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2272				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2273					m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL);
2274					if (m == NULL) {
2275						VM_WAIT;
2276						vm_pageout_deficit += desiredpages - bp->b_npages;
2277					} else {
2278						vm_page_wire(m);
2279						vm_page_wakeup(m);
2280						bp->b_flags &= ~B_CACHE;
2281						bp->b_pages[bp->b_npages] = m;
2282						++bp->b_npages;
2283					}
2284					continue;
2285				}
2286
2287				/*
2288				 * We found a page.  If we have to sleep on it,
2289				 * retry because it might have gotten freed out
2290				 * from under us.
2291				 *
2292				 * We can only test PG_BUSY here.  Blocking on
2293				 * m->busy might lead to a deadlock:
2294				 *
2295				 *  vm_fault->getpages->cluster_read->allocbuf
2296				 *
2297				 */
2298
2299				if (vm_page_sleep_busy(m, FALSE, "pgtblk"))
2300					continue;
2301
2302				/*
2303				 * We have a good page.  Should we wakeup the
2304				 * page daemon?
2305				 */
2306				if ((curproc != pageproc) &&
2307				    ((m->queue - m->pc) == PQ_CACHE) &&
2308				    ((cnt.v_free_count + cnt.v_cache_count) <
2309					(cnt.v_free_min + cnt.v_cache_min))
2310				) {
2311					pagedaemon_wakeup();
2312				}
2313				vm_page_flag_clear(m, PG_ZERO);
2314				vm_page_wire(m);
2315				bp->b_pages[bp->b_npages] = m;
2316				++bp->b_npages;
2317			}
2318
2319			/*
2320			 * Step 2.  We've loaded the pages into the buffer,
2321			 * we have to figure out if we can still have B_CACHE
2322			 * set.  Note that B_CACHE is set according to the
2323			 * byte-granular range ( bcount and size ), new the
2324			 * aligned range ( newbsize ).
2325			 *
2326			 * The VM test is against m->valid, which is DEV_BSIZE
2327			 * aligned.  Needless to say, the validity of the data
2328			 * needs to also be DEV_BSIZE aligned.  Note that this
2329			 * fails with NFS if the server or some other client
2330			 * extends the file's EOF.  If our buffer is resized,
2331			 * B_CACHE may remain set! XXX
2332			 */
2333
2334			toff = bp->b_bcount;
2335			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2336
2337			while ((bp->b_flags & B_CACHE) && toff < size) {
2338				vm_pindex_t pi;
2339
2340				if (tinc > (size - toff))
2341					tinc = size - toff;
2342
2343				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2344				    PAGE_SHIFT;
2345
2346				vfs_buf_test_cache(
2347				    bp,
2348				    bp->b_offset,
2349				    toff,
2350				    tinc,
2351				    bp->b_pages[pi]
2352				);
2353				toff += tinc;
2354				tinc = PAGE_SIZE;
2355			}
2356
2357			/*
2358			 * Step 3, fixup the KVM pmap.  Remember that
2359			 * bp->b_data is relative to bp->b_offset, but
2360			 * bp->b_offset may be offset into the first page.
2361			 */
2362
2363			bp->b_data = (caddr_t)
2364			    trunc_page((vm_offset_t)bp->b_data);
2365			pmap_qenter(
2366			    (vm_offset_t)bp->b_data,
2367			    bp->b_pages,
2368			    bp->b_npages
2369			);
2370			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2371			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
2372		}
2373	}
2374	if (bp->b_flags & B_VMIO)
2375		vmiospace += (newbsize - bp->b_bufsize);
2376	bufspace += (newbsize - bp->b_bufsize);
2377	runningbufspace += (newbsize - bp->b_bufsize);
2378	if (newbsize < bp->b_bufsize)
2379		bufspacewakeup();
2380	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
2381	bp->b_bcount = size;		/* requested buffer size	*/
2382	return 1;
2383}
2384
2385/*
2386 *	biowait:
2387 *
2388 *	Wait for buffer I/O completion, returning error status.  The buffer
2389 *	is left locked and B_DONE on return.  B_EINTR is converted into a EINTR
2390 *	error and cleared.
2391 */
2392int
2393biowait(register struct buf * bp)
2394{
2395	int s;
2396
2397	s = splbio();
2398	while ((bp->b_flags & B_DONE) == 0)
2399#if defined(NO_SCHEDULE_MODS)
2400		tsleep(bp, PRIBIO, "biowait", 0);
2401#else
2402		if (bp->b_flags & B_READ)
2403			tsleep(bp, PRIBIO, "biord", 0);
2404		else
2405			tsleep(bp, PRIBIO, "biowr", 0);
2406#endif
2407	splx(s);
2408	if (bp->b_flags & B_EINTR) {
2409		bp->b_flags &= ~B_EINTR;
2410		return (EINTR);
2411	}
2412	if (bp->b_flags & B_ERROR) {
2413		return (bp->b_error ? bp->b_error : EIO);
2414	} else {
2415		return (0);
2416	}
2417}
2418
2419/*
2420 *	biodone:
2421 *
2422 *	Finish I/O on a buffer, optionally calling a completion function.
2423 *	This is usually called from an interrupt so process blocking is
2424 *	not allowed.
2425 *
2426 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
2427 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
2428 *	assuming B_INVAL is clear.
2429 *
2430 *	For the VMIO case, we set B_CACHE if the op was a read and no
2431 *	read error occured, or if the op was a write.  B_CACHE is never
2432 *	set if the buffer is invalid or otherwise uncacheable.
2433 *
2434 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
2435 *	initiator to leave B_INVAL set to brelse the buffer out of existance
2436 *	in the biodone routine.
2437 */
2438void
2439biodone(register struct buf * bp)
2440{
2441	int s;
2442
2443	s = splbio();
2444
2445	KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy", bp));
2446	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
2447
2448	bp->b_flags |= B_DONE;
2449
2450	if (bp->b_flags & B_FREEBUF) {
2451		brelse(bp);
2452		splx(s);
2453		return;
2454	}
2455
2456	if ((bp->b_flags & B_READ) == 0) {
2457		vwakeup(bp);
2458	}
2459
2460	/* call optional completion function if requested */
2461	if (bp->b_flags & B_CALL) {
2462		bp->b_flags &= ~B_CALL;
2463		(*bp->b_iodone) (bp);
2464		splx(s);
2465		return;
2466	}
2467	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
2468		(*bioops.io_complete)(bp);
2469
2470	if (bp->b_flags & B_VMIO) {
2471		int i, resid;
2472		vm_ooffset_t foff;
2473		vm_page_t m;
2474		vm_object_t obj;
2475		int iosize;
2476		struct vnode *vp = bp->b_vp;
2477
2478		obj = vp->v_object;
2479
2480#if defined(VFS_BIO_DEBUG)
2481		if (vp->v_usecount == 0) {
2482			panic("biodone: zero vnode ref count");
2483		}
2484
2485		if (vp->v_object == NULL) {
2486			panic("biodone: missing VM object");
2487		}
2488
2489		if ((vp->v_flag & VOBJBUF) == 0) {
2490			panic("biodone: vnode is not setup for merged cache");
2491		}
2492#endif
2493
2494		foff = bp->b_offset;
2495		KASSERT(bp->b_offset != NOOFFSET,
2496		    ("biodone: no buffer offset"));
2497
2498#if !defined(MAX_PERF)
2499		if (!obj) {
2500			panic("biodone: no object");
2501		}
2502#endif
2503#if defined(VFS_BIO_DEBUG)
2504		if (obj->paging_in_progress < bp->b_npages) {
2505			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2506			    obj->paging_in_progress, bp->b_npages);
2507		}
2508#endif
2509
2510		/*
2511		 * Set B_CACHE if the op was a normal read and no error
2512		 * occured.  B_CACHE is set for writes in the b*write()
2513		 * routines.
2514		 */
2515		iosize = bp->b_bcount;
2516		if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) {
2517			bp->b_flags |= B_CACHE;
2518		}
2519
2520		for (i = 0; i < bp->b_npages; i++) {
2521			int bogusflag = 0;
2522			m = bp->b_pages[i];
2523			if (m == bogus_page) {
2524				bogusflag = 1;
2525				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2526				if (!m) {
2527#if defined(VFS_BIO_DEBUG)
2528					printf("biodone: page disappeared\n");
2529#endif
2530					vm_object_pip_subtract(obj, 1);
2531					bp->b_flags &= ~B_CACHE;
2532					continue;
2533				}
2534				bp->b_pages[i] = m;
2535				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2536			}
2537#if defined(VFS_BIO_DEBUG)
2538			if (OFF_TO_IDX(foff) != m->pindex) {
2539				printf(
2540"biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2541				    (unsigned long)foff, m->pindex);
2542			}
2543#endif
2544			resid = IDX_TO_OFF(m->pindex + 1) - foff;
2545			if (resid > iosize)
2546				resid = iosize;
2547
2548			/*
2549			 * In the write case, the valid and clean bits are
2550			 * already changed correctly ( see bdwrite() ), so we
2551			 * only need to do this here in the read case.
2552			 */
2553			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
2554				vfs_page_set_valid(bp, foff, i, m);
2555			}
2556			vm_page_flag_clear(m, PG_ZERO);
2557
2558			/*
2559			 * when debugging new filesystems or buffer I/O methods, this
2560			 * is the most common error that pops up.  if you see this, you
2561			 * have not set the page busy flag correctly!!!
2562			 */
2563			if (m->busy == 0) {
2564#if !defined(MAX_PERF)
2565				printf("biodone: page busy < 0, "
2566				    "pindex: %d, foff: 0x(%x,%x), "
2567				    "resid: %d, index: %d\n",
2568				    (int) m->pindex, (int)(foff >> 32),
2569						(int) foff & 0xffffffff, resid, i);
2570#endif
2571				if (vp->v_type != VBLK)
2572#if !defined(MAX_PERF)
2573					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
2574					    bp->b_vp->v_mount->mnt_stat.f_iosize,
2575					    (int) bp->b_lblkno,
2576					    bp->b_flags, bp->b_npages);
2577				else
2578					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
2579					    (int) bp->b_lblkno,
2580					    bp->b_flags, bp->b_npages);
2581				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
2582				    m->valid, m->dirty, m->wire_count);
2583#endif
2584				panic("biodone: page busy < 0\n");
2585			}
2586			vm_page_io_finish(m);
2587			vm_object_pip_subtract(obj, 1);
2588			foff += resid;
2589			iosize -= resid;
2590		}
2591		if (obj)
2592			vm_object_pip_wakeupn(obj, 0);
2593	}
2594	/*
2595	 * For asynchronous completions, release the buffer now. The brelse
2596	 * will do a wakeup there if necessary - so no need to do a wakeup
2597	 * here in the async case. The sync case always needs to do a wakeup.
2598	 */
2599
2600	if (bp->b_flags & B_ASYNC) {
2601		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
2602			brelse(bp);
2603		else
2604			bqrelse(bp);
2605	} else {
2606		wakeup(bp);
2607	}
2608	splx(s);
2609}
2610
2611#if 0	/* not with kirks code */
2612static int vfs_update_interval = 30;
2613
2614static void
2615vfs_update()
2616{
2617	while (1) {
2618		tsleep(&vfs_update_wakeup, PUSER, "update",
2619		    hz * vfs_update_interval);
2620		vfs_update_wakeup = 0;
2621		sync(curproc, NULL);
2622	}
2623}
2624
2625static int
2626sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
2627{
2628	int error = sysctl_handle_int(oidp,
2629		oidp->oid_arg1, oidp->oid_arg2, req);
2630	if (!error)
2631		wakeup(&vfs_update_wakeup);
2632	return error;
2633}
2634
2635SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
2636	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
2637
2638#endif
2639
2640
2641/*
2642 * This routine is called in lieu of iodone in the case of
2643 * incomplete I/O.  This keeps the busy status for pages
2644 * consistant.
2645 */
2646void
2647vfs_unbusy_pages(struct buf * bp)
2648{
2649	int i;
2650
2651	if (bp->b_flags & B_VMIO) {
2652		struct vnode *vp = bp->b_vp;
2653		vm_object_t obj = vp->v_object;
2654
2655		for (i = 0; i < bp->b_npages; i++) {
2656			vm_page_t m = bp->b_pages[i];
2657
2658			if (m == bogus_page) {
2659				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
2660#if !defined(MAX_PERF)
2661				if (!m) {
2662					panic("vfs_unbusy_pages: page missing\n");
2663				}
2664#endif
2665				bp->b_pages[i] = m;
2666				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2667			}
2668			vm_object_pip_subtract(obj, 1);
2669			vm_page_flag_clear(m, PG_ZERO);
2670			vm_page_io_finish(m);
2671		}
2672		vm_object_pip_wakeupn(obj, 0);
2673	}
2674}
2675
2676/*
2677 * vfs_page_set_valid:
2678 *
2679 *	Set the valid bits in a page based on the supplied offset.   The
2680 *	range is restricted to the buffer's size.
2681 *
2682 *	This routine is typically called after a read completes.
2683 */
2684static void
2685vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2686{
2687	vm_ooffset_t soff, eoff;
2688
2689	/*
2690	 * Start and end offsets in buffer.  eoff - soff may not cross a
2691	 * page boundry or cross the end of the buffer.  The end of the
2692	 * buffer, in this case, is our file EOF, not the allocation size
2693	 * of the buffer.
2694	 */
2695	soff = off;
2696	eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
2697	if (eoff > bp->b_offset + bp->b_bcount)
2698		eoff = bp->b_offset + bp->b_bcount;
2699
2700	/*
2701	 * Set valid range.  This is typically the entire buffer and thus the
2702	 * entire page.
2703	 */
2704	if (eoff > soff) {
2705		vm_page_set_validclean(
2706		    m,
2707		   (vm_offset_t) (soff & PAGE_MASK),
2708		   (vm_offset_t) (eoff - soff)
2709		);
2710	}
2711}
2712
2713/*
2714 * This routine is called before a device strategy routine.
2715 * It is used to tell the VM system that paging I/O is in
2716 * progress, and treat the pages associated with the buffer
2717 * almost as being PG_BUSY.  Also the object paging_in_progress
2718 * flag is handled to make sure that the object doesn't become
2719 * inconsistant.
2720 *
2721 * Since I/O has not been initiated yet, certain buffer flags
2722 * such as B_ERROR or B_INVAL may be in an inconsistant state
2723 * and should be ignored.
2724 */
2725void
2726vfs_busy_pages(struct buf * bp, int clear_modify)
2727{
2728	int i, bogus;
2729
2730	if (bp->b_flags & B_VMIO) {
2731		struct vnode *vp = bp->b_vp;
2732		vm_object_t obj = vp->v_object;
2733		vm_ooffset_t foff;
2734
2735		foff = bp->b_offset;
2736		KASSERT(bp->b_offset != NOOFFSET,
2737		    ("vfs_busy_pages: no buffer offset"));
2738		vfs_setdirty(bp);
2739
2740retry:
2741		for (i = 0; i < bp->b_npages; i++) {
2742			vm_page_t m = bp->b_pages[i];
2743			if (vm_page_sleep_busy(m, FALSE, "vbpage"))
2744				goto retry;
2745		}
2746
2747		bogus = 0;
2748		for (i = 0; i < bp->b_npages; i++) {
2749			vm_page_t m = bp->b_pages[i];
2750
2751			vm_page_flag_clear(m, PG_ZERO);
2752			if ((bp->b_flags & B_CLUSTER) == 0) {
2753				vm_object_pip_add(obj, 1);
2754				vm_page_io_start(m);
2755			}
2756
2757			/*
2758			 * When readying a buffer for a read ( i.e
2759			 * clear_modify == 0 ), it is important to do
2760			 * bogus_page replacement for valid pages in
2761			 * partially instantiated buffers.  Partially
2762			 * instantiated buffers can, in turn, occur when
2763			 * reconstituting a buffer from its VM backing store
2764			 * base.  We only have to do this if B_CACHE is
2765			 * clear ( which causes the I/O to occur in the
2766			 * first place ).  The replacement prevents the read
2767			 * I/O from overwriting potentially dirty VM-backed
2768			 * pages.  XXX bogus page replacement is, uh, bogus.
2769			 * It may not work properly with small-block devices.
2770			 * We need to find a better way.
2771			 */
2772
2773			vm_page_protect(m, VM_PROT_NONE);
2774			if (clear_modify)
2775				vfs_page_set_valid(bp, foff, i, m);
2776			else if (m->valid == VM_PAGE_BITS_ALL &&
2777				(bp->b_flags & B_CACHE) == 0) {
2778				bp->b_pages[i] = bogus_page;
2779				bogus++;
2780			}
2781			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2782		}
2783		if (bogus)
2784			pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2785	}
2786}
2787
2788/*
2789 * Tell the VM system that the pages associated with this buffer
2790 * are clean.  This is used for delayed writes where the data is
2791 * going to go to disk eventually without additional VM intevention.
2792 *
2793 * Note that while we only really need to clean through to b_bcount, we
2794 * just go ahead and clean through to b_bufsize.
2795 */
2796static void
2797vfs_clean_pages(struct buf * bp)
2798{
2799	int i;
2800
2801	if (bp->b_flags & B_VMIO) {
2802		vm_ooffset_t foff;
2803
2804		foff = bp->b_offset;
2805		KASSERT(bp->b_offset != NOOFFSET,
2806		    ("vfs_clean_pages: no buffer offset"));
2807		for (i = 0; i < bp->b_npages; i++) {
2808			vm_page_t m = bp->b_pages[i];
2809			vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2810			vm_ooffset_t eoff = noff;
2811
2812			if (eoff > bp->b_offset + bp->b_bufsize)
2813				eoff = bp->b_offset + bp->b_bufsize;
2814			vfs_page_set_valid(bp, foff, i, m);
2815			/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2816			foff = noff;
2817		}
2818	}
2819}
2820
2821/*
2822 *	vfs_bio_set_validclean:
2823 *
2824 *	Set the range within the buffer to valid and clean.  The range is
2825 *	relative to the beginning of the buffer, b_offset.  Note that b_offset
2826 *	itself may be offset from the beginning of the first page.
2827 */
2828
2829void
2830vfs_bio_set_validclean(struct buf *bp, int base, int size)
2831{
2832	if (bp->b_flags & B_VMIO) {
2833		int i;
2834		int n;
2835
2836		/*
2837		 * Fixup base to be relative to beginning of first page.
2838		 * Set initial n to be the maximum number of bytes in the
2839		 * first page that can be validated.
2840		 */
2841
2842		base += (bp->b_offset & PAGE_MASK);
2843		n = PAGE_SIZE - (base & PAGE_MASK);
2844
2845		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
2846			vm_page_t m = bp->b_pages[i];
2847
2848			if (n > size)
2849				n = size;
2850
2851			vm_page_set_validclean(m, base & PAGE_MASK, n);
2852			base += n;
2853			size -= n;
2854			n = PAGE_SIZE;
2855		}
2856	}
2857}
2858
2859/*
2860 *	vfs_bio_clrbuf:
2861 *
2862 *	clear a buffer.  This routine essentially fakes an I/O, so we need
2863 *	to clear B_ERROR and B_INVAL.
2864 *
2865 *	Note that while we only theoretically need to clear through b_bcount,
2866 *	we go ahead and clear through b_bufsize.
2867 */
2868
2869void
2870vfs_bio_clrbuf(struct buf *bp) {
2871	int i, mask = 0;
2872	caddr_t sa, ea;
2873	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
2874		bp->b_flags &= ~(B_INVAL|B_ERROR);
2875		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
2876		    (bp->b_offset & PAGE_MASK) == 0) {
2877			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
2878			if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2879			    ((bp->b_pages[0]->valid & mask) != mask)) {
2880				bzero(bp->b_data, bp->b_bufsize);
2881			}
2882			bp->b_pages[0]->valid |= mask;
2883			bp->b_resid = 0;
2884			return;
2885		}
2886		ea = sa = bp->b_data;
2887		for(i=0;i<bp->b_npages;i++,sa=ea) {
2888			int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE;
2889			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
2890			ea = (caddr_t)ulmin((u_long)ea,
2891				(u_long)bp->b_data + bp->b_bufsize);
2892			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
2893			if ((bp->b_pages[i]->valid & mask) == mask)
2894				continue;
2895			if ((bp->b_pages[i]->valid & mask) == 0) {
2896				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2897					bzero(sa, ea - sa);
2898				}
2899			} else {
2900				for (; sa < ea; sa += DEV_BSIZE, j++) {
2901					if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
2902						(bp->b_pages[i]->valid & (1<<j)) == 0)
2903						bzero(sa, DEV_BSIZE);
2904				}
2905			}
2906			bp->b_pages[i]->valid |= mask;
2907			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
2908		}
2909		bp->b_resid = 0;
2910	} else {
2911		clrbuf(bp);
2912	}
2913}
2914
2915/*
2916 * vm_hold_load_pages and vm_hold_unload pages get pages into
2917 * a buffers address space.  The pages are anonymous and are
2918 * not associated with a file object.
2919 */
2920void
2921vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2922{
2923	vm_offset_t pg;
2924	vm_page_t p;
2925	int index;
2926
2927	to = round_page(to);
2928	from = round_page(from);
2929	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2930
2931	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2932
2933tryagain:
2934
2935		p = vm_page_alloc(kernel_object,
2936			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
2937		    VM_ALLOC_NORMAL);
2938		if (!p) {
2939			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
2940			VM_WAIT;
2941			goto tryagain;
2942		}
2943		vm_page_wire(p);
2944		p->valid = VM_PAGE_BITS_ALL;
2945		vm_page_flag_clear(p, PG_ZERO);
2946		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
2947		bp->b_pages[index] = p;
2948		vm_page_wakeup(p);
2949	}
2950	bp->b_npages = index;
2951}
2952
2953void
2954vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2955{
2956	vm_offset_t pg;
2957	vm_page_t p;
2958	int index, newnpages;
2959
2960	from = round_page(from);
2961	to = round_page(to);
2962	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2963
2964	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2965		p = bp->b_pages[index];
2966		if (p && (index < bp->b_npages)) {
2967#if !defined(MAX_PERF)
2968			if (p->busy) {
2969				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2970					bp->b_blkno, bp->b_lblkno);
2971			}
2972#endif
2973			bp->b_pages[index] = NULL;
2974			pmap_kremove(pg);
2975			vm_page_busy(p);
2976			vm_page_unwire(p, 0);
2977			vm_page_free(p);
2978		}
2979	}
2980	bp->b_npages = newnpages;
2981}
2982
2983
2984#include "opt_ddb.h"
2985#ifdef DDB
2986#include <ddb/ddb.h>
2987
2988DB_SHOW_COMMAND(buffer, db_show_buffer)
2989{
2990	/* get args */
2991	struct buf *bp = (struct buf *)addr;
2992
2993	if (!have_addr) {
2994		db_printf("usage: show buffer <addr>\n");
2995		return;
2996	}
2997
2998	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
2999	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
3000		  "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, "
3001		  "b_blkno = %d, b_pblkno = %d\n",
3002		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
3003		  major(bp->b_dev), minor(bp->b_dev),
3004		  bp->b_data, bp->b_blkno, bp->b_pblkno);
3005	if (bp->b_npages) {
3006		int i;
3007		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
3008		for (i = 0; i < bp->b_npages; i++) {
3009			vm_page_t m;
3010			m = bp->b_pages[i];
3011			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
3012			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
3013			if ((i + 1) < bp->b_npages)
3014				db_printf(",");
3015		}
3016		db_printf("\n");
3017	}
3018}
3019#endif /* DDB */
3020