vfs_bio.c revision 46349
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 *		John S. Dyson.
13 *
14 * $Id: vfs_bio.c,v 1.207 1999/04/29 18:15:25 alc Exp $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme.  Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *
23 * Author:  John S. Dyson
24 * Significant help during the development and debugging phases
25 * had been provided by David Greenman, also of the FreeBSD core team.
26 *
27 * see man buf(9) for more info.
28 */
29
30#define VMIO
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/sysproto.h>
34#include <sys/kernel.h>
35#include <sys/sysctl.h>
36#include <sys/proc.h>
37#include <sys/vnode.h>
38#include <sys/vmmeter.h>
39#include <sys/lock.h>
40#include <miscfs/specfs/specdev.h>
41#include <vm/vm.h>
42#include <vm/vm_param.h>
43#include <vm/vm_prot.h>
44#include <vm/vm_kern.h>
45#include <vm/vm_pageout.h>
46#include <vm/vm_page.h>
47#include <vm/vm_object.h>
48#include <vm/vm_extern.h>
49#include <vm/vm_map.h>
50#include <sys/buf.h>
51#include <sys/mount.h>
52#include <sys/malloc.h>
53#include <sys/resourcevar.h>
54
55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
56
57struct	bio_ops bioops;		/* I/O operation notification */
58
59#if 0 	/* replaced bu sched_sync */
60static void vfs_update __P((void));
61static struct	proc *updateproc;
62static struct kproc_desc up_kp = {
63	"update",
64	vfs_update,
65	&updateproc
66};
67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
68#endif
69
70struct buf *buf;		/* buffer header pool */
71struct swqueue bswlist;
72
73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74		vm_offset_t to);
75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76		vm_offset_t to);
77static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
78			       int pageno, vm_page_t m);
79static void vfs_clean_pages(struct buf * bp);
80static void vfs_setdirty(struct buf *bp);
81static void vfs_vmio_release(struct buf *bp);
82static void flushdirtybuffers(int slpflag, int slptimeo);
83static int flushbufqueues(void);
84
85/*
86 * Internal update daemon, process 3
87 *	The variable vfs_update_wakeup allows for internal syncs.
88 */
89int vfs_update_wakeup;
90
91/*
92 * bogus page -- for I/O to/from partially complete buffers
93 * this is a temporary solution to the problem, but it is not
94 * really that bad.  it would be better to split the buffer
95 * for input in the case of buffers partially already in memory,
96 * but the code is intricate enough already.
97 */
98vm_page_t bogus_page;
99int runningbufspace;
100static vm_offset_t bogus_offset;
101
102static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
103	bufmallocspace, maxbufmallocspace, hibufspace;
104static int needsbuffer;
105static int numdirtybuffers, lodirtybuffers, hidirtybuffers;
106static int numfreebuffers, lofreebuffers, hifreebuffers;
107static int kvafreespace;
108
109SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
110	&numdirtybuffers, 0, "");
111SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW,
112	&lodirtybuffers, 0, "");
113SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
114	&hidirtybuffers, 0, "");
115SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
116	&numfreebuffers, 0, "");
117SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
118	&lofreebuffers, 0, "");
119SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
120	&hifreebuffers, 0, "");
121SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD,
122	&runningbufspace, 0, "");
123SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
124	&maxbufspace, 0, "");
125SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD,
126	&hibufspace, 0, "");
127SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
128	&bufspace, 0, "");
129SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW,
130	&maxvmiobufspace, 0, "");
131SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
132	&vmiospace, 0, "");
133SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
134	&maxbufmallocspace, 0, "");
135SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
136	&bufmallocspace, 0, "");
137SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD,
138	&kvafreespace, 0, "");
139
140static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
141struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } };
142
143extern int vm_swap_size;
144
145#define BUF_MAXUSE		24
146
147#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
148#define VFS_BIO_NEED_RESERVED02	0x02	/* unused */
149#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
150#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
151#define VFS_BIO_NEED_KVASPACE	0x10	/* wait for buffer_map space, emerg  */
152
153/*
154 *	kvaspacewakeup:
155 *
156 *	Called when kva space is potential available for recovery or when
157 *	kva space is recovered in the buffer_map.  This function wakes up
158 *	anyone waiting for buffer_map kva space.  Even though the buffer_map
159 *	is larger then maxbufspace, this situation will typically occur
160 *	when the buffer_map gets fragmented.
161 */
162
163static __inline void
164kvaspacewakeup(void)
165{
166	/*
167	 * If someone is waiting for KVA space, wake them up.  Even
168	 * though we haven't freed the kva space yet, the waiting
169	 * process will be able to now.
170	 */
171	if (needsbuffer & VFS_BIO_NEED_KVASPACE) {
172		needsbuffer &= ~VFS_BIO_NEED_KVASPACE;
173		wakeup(&needsbuffer);
174	}
175}
176
177/*
178 *	bufspacewakeup:
179 *
180 *	Called when buffer space is potentially available for recovery or when
181 *	buffer space is recovered.  getnewbuf() will block on this flag when
182 *	it is unable to free sufficient buffer space.  Buffer space becomes
183 *	recoverable when bp's get placed back in the queues.
184 */
185
186static __inline void
187bufspacewakeup(void)
188{
189	/*
190	 * If someone is waiting for BUF space, wake them up.  Even
191	 * though we haven't freed the kva space yet, the waiting
192	 * process will be able to now.
193	 */
194	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
195		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
196		wakeup(&needsbuffer);
197	}
198}
199
200/*
201 *	bufcountwakeup:
202 *
203 *	Called when a buffer has been added to one of the free queues to
204 *	account for the buffer and to wakeup anyone waiting for free buffers.
205 *	This typically occurs when large amounts of metadata are being handled
206 *	by the buffer cache ( else buffer space runs out first, usually ).
207 */
208
209static __inline void
210bufcountwakeup(void)
211{
212	++numfreebuffers;
213	if (needsbuffer) {
214		needsbuffer &= ~VFS_BIO_NEED_ANY;
215		if (numfreebuffers >= hifreebuffers)
216			needsbuffer &= ~VFS_BIO_NEED_FREE;
217		wakeup(&needsbuffer);
218	}
219}
220
221/*
222 *	vfs_buf_test_cache:
223 *
224 *	Called when a buffer is extended.  This function clears the B_CACHE
225 *	bit if the newly extended portion of the buffer does not contain
226 *	valid data.
227 */
228static __inline__
229void
230vfs_buf_test_cache(struct buf *bp,
231		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
232		  vm_page_t m)
233{
234	if (bp->b_flags & B_CACHE) {
235		int base = (foff + off) & PAGE_MASK;
236		if (vm_page_is_valid(m, base, size) == 0)
237			bp->b_flags &= ~B_CACHE;
238	}
239}
240
241
242/*
243 * Initialize buffer headers and related structures.
244 */
245void
246bufinit()
247{
248	struct buf *bp;
249	int i;
250
251	TAILQ_INIT(&bswlist);
252	LIST_INIT(&invalhash);
253
254	/* first, make a null hash table */
255	for (i = 0; i < BUFHSZ; i++)
256		LIST_INIT(&bufhashtbl[i]);
257
258	/* next, make a null set of free lists */
259	for (i = 0; i < BUFFER_QUEUES; i++)
260		TAILQ_INIT(&bufqueues[i]);
261
262	/* finally, initialize each buffer header and stick on empty q */
263	for (i = 0; i < nbuf; i++) {
264		bp = &buf[i];
265		bzero(bp, sizeof *bp);
266		bp->b_flags = B_INVAL;	/* we're just an empty header */
267		bp->b_dev = NODEV;
268		bp->b_rcred = NOCRED;
269		bp->b_wcred = NOCRED;
270		bp->b_qindex = QUEUE_EMPTY;
271		bp->b_xflags = 0;
272		LIST_INIT(&bp->b_dep);
273		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
274		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
275	}
276
277	/*
278	 * maxbufspace is currently calculated to support all filesystem
279	 * blocks to be 8K.  If you happen to use a 16K filesystem, the size
280	 * of the buffer cache is still the same as it would be for 8K
281	 * filesystems.  This keeps the size of the buffer cache "in check"
282	 * for big block filesystems.
283	 *
284	 * maxbufspace is calculated as around 50% of the KVA available in
285	 * the buffer_map ( DFLTSIZE vs BKVASIZE ), I presume to reduce the
286	 * effect of fragmentation.
287	 */
288	maxbufspace = (nbuf + 8) * DFLTBSIZE;
289	if ((hibufspace = maxbufspace - MAXBSIZE * 5) <= MAXBSIZE)
290		hibufspace = 3 * maxbufspace / 4;
291/*
292 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
293 */
294	maxvmiobufspace = 2 * hibufspace / 3;
295/*
296 * Limit the amount of malloc memory since it is wired permanently into
297 * the kernel space.  Even though this is accounted for in the buffer
298 * allocation, we don't want the malloced region to grow uncontrolled.
299 * The malloc scheme improves memory utilization significantly on average
300 * (small) directories.
301 */
302	maxbufmallocspace = hibufspace / 20;
303
304/*
305 * Reduce the chance of a deadlock occuring by limiting the number
306 * of delayed-write dirty buffers we allow to stack up.
307 */
308	lodirtybuffers = nbuf / 16 + 10;
309	hidirtybuffers = nbuf / 8 + 20;
310	numdirtybuffers = 0;
311
312/*
313 * Try to keep the number of free buffers in the specified range,
314 * and give the syncer access to an emergency reserve.
315 */
316	lofreebuffers = nbuf / 18 + 5;
317	hifreebuffers = 2 * lofreebuffers;
318	numfreebuffers = nbuf;
319
320	kvafreespace = 0;
321
322	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
323	bogus_page = vm_page_alloc(kernel_object,
324			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
325			VM_ALLOC_NORMAL);
326
327}
328
329/*
330 * Free the kva allocation for a buffer
331 * Must be called only at splbio or higher,
332 *  as this is the only locking for buffer_map.
333 */
334static void
335bfreekva(struct buf * bp)
336{
337	if (bp->b_kvasize) {
338		vm_map_delete(buffer_map,
339		    (vm_offset_t) bp->b_kvabase,
340		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize
341		);
342		bp->b_kvasize = 0;
343		kvaspacewakeup();
344	}
345}
346
347/*
348 *	bremfree:
349 *
350 *	Remove the buffer from the appropriate free list.
351 */
352void
353bremfree(struct buf * bp)
354{
355	int s = splbio();
356	int old_qindex = bp->b_qindex;
357
358	if (bp->b_qindex != QUEUE_NONE) {
359		if (bp->b_qindex == QUEUE_EMPTY) {
360			kvafreespace -= bp->b_kvasize;
361		}
362		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
363		bp->b_qindex = QUEUE_NONE;
364		runningbufspace += bp->b_bufsize;
365	} else {
366#if !defined(MAX_PERF)
367		panic("bremfree: removing a buffer when not on a queue");
368#endif
369	}
370
371	/*
372	 * Fixup numfreebuffers count.  If the buffer is invalid or not
373	 * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
374	 * the buffer was free and we must decrement numfreebuffers.
375	 */
376	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
377		switch(old_qindex) {
378		case QUEUE_EMPTY:
379		case QUEUE_LRU:
380		case QUEUE_AGE:
381			--numfreebuffers;
382			break;
383		default:
384			break;
385		}
386	}
387	splx(s);
388}
389
390
391/*
392 * Get a buffer with the specified data.  Look in the cache first.  We
393 * must clear B_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
394 * is set, the buffer is valid and we do not have to do anything ( see
395 * getblk() ).
396 */
397int
398bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
399    struct buf ** bpp)
400{
401	struct buf *bp;
402
403	bp = getblk(vp, blkno, size, 0, 0);
404	*bpp = bp;
405
406	/* if not found in cache, do some I/O */
407	if ((bp->b_flags & B_CACHE) == 0) {
408		if (curproc != NULL)
409			curproc->p_stats->p_ru.ru_inblock++;
410		KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
411		bp->b_flags |= B_READ;
412		bp->b_flags &= ~(B_ERROR | B_INVAL);
413		if (bp->b_rcred == NOCRED) {
414			if (cred != NOCRED)
415				crhold(cred);
416			bp->b_rcred = cred;
417		}
418		vfs_busy_pages(bp, 0);
419		VOP_STRATEGY(vp, bp);
420		return (biowait(bp));
421	}
422	return (0);
423}
424
425/*
426 * Operates like bread, but also starts asynchronous I/O on
427 * read-ahead blocks.  We must clear B_ERROR and B_INVAL prior
428 * to initiating I/O . If B_CACHE is set, the buffer is valid
429 * and we do not have to do anything.
430 */
431int
432breadn(struct vnode * vp, daddr_t blkno, int size,
433    daddr_t * rablkno, int *rabsize,
434    int cnt, struct ucred * cred, struct buf ** bpp)
435{
436	struct buf *bp, *rabp;
437	int i;
438	int rv = 0, readwait = 0;
439
440	*bpp = bp = getblk(vp, blkno, size, 0, 0);
441
442	/* if not found in cache, do some I/O */
443	if ((bp->b_flags & B_CACHE) == 0) {
444		if (curproc != NULL)
445			curproc->p_stats->p_ru.ru_inblock++;
446		bp->b_flags |= B_READ;
447		bp->b_flags &= ~(B_ERROR | B_INVAL);
448		if (bp->b_rcred == NOCRED) {
449			if (cred != NOCRED)
450				crhold(cred);
451			bp->b_rcred = cred;
452		}
453		vfs_busy_pages(bp, 0);
454		VOP_STRATEGY(vp, bp);
455		++readwait;
456	}
457
458	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
459		if (inmem(vp, *rablkno))
460			continue;
461		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
462
463		if ((rabp->b_flags & B_CACHE) == 0) {
464			if (curproc != NULL)
465				curproc->p_stats->p_ru.ru_inblock++;
466			rabp->b_flags |= B_READ | B_ASYNC;
467			rabp->b_flags &= ~(B_ERROR | B_INVAL);
468			if (rabp->b_rcred == NOCRED) {
469				if (cred != NOCRED)
470					crhold(cred);
471				rabp->b_rcred = cred;
472			}
473			vfs_busy_pages(rabp, 0);
474			VOP_STRATEGY(vp, rabp);
475		} else {
476			brelse(rabp);
477		}
478	}
479
480	if (readwait) {
481		rv = biowait(bp);
482	}
483	return (rv);
484}
485
486/*
487 * Write, release buffer on completion.  (Done by iodone
488 * if async).  Do not bother writing anything if the buffer
489 * is invalid.
490 *
491 * Note that we set B_CACHE here, indicating that buffer is
492 * fully valid and thus cacheable.  This is true even of NFS
493 * now so we set it generally.  This could be set either here
494 * or in biodone() since the I/O is synchronous.  We put it
495 * here.
496 */
497int
498bwrite(struct buf * bp)
499{
500	int oldflags, s;
501	struct vnode *vp;
502	struct mount *mp;
503
504	if (bp->b_flags & B_INVAL) {
505		brelse(bp);
506		return (0);
507	}
508
509	oldflags = bp->b_flags;
510
511#if !defined(MAX_PERF)
512	if ((bp->b_flags & B_BUSY) == 0)
513		panic("bwrite: buffer is not busy???");
514#endif
515	s = splbio();
516	bundirty(bp);
517
518	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
519	bp->b_flags |= B_WRITEINPROG | B_CACHE;
520
521	bp->b_vp->v_numoutput++;
522	vfs_busy_pages(bp, 1);
523	if (curproc != NULL)
524		curproc->p_stats->p_ru.ru_oublock++;
525	splx(s);
526	VOP_STRATEGY(bp->b_vp, bp);
527
528	/*
529	 * Collect statistics on synchronous and asynchronous writes.
530	 * Writes to block devices are charged to their associated
531	 * filesystem (if any).
532	 */
533	if ((vp = bp->b_vp) != NULL) {
534		if (vp->v_type == VBLK)
535			mp = vp->v_specmountpoint;
536		else
537			mp = vp->v_mount;
538		if (mp != NULL) {
539			if ((oldflags & B_ASYNC) == 0)
540				mp->mnt_stat.f_syncwrites++;
541			else
542				mp->mnt_stat.f_asyncwrites++;
543		}
544	}
545
546	if ((oldflags & B_ASYNC) == 0) {
547		int rtval = biowait(bp);
548		brelse(bp);
549		return (rtval);
550	}
551
552	return (0);
553}
554
555/*
556 * Delayed write. (Buffer is marked dirty).  Do not bother writing
557 * anything if the buffer is marked invalid.
558 *
559 * Note that since the buffer must be completely valid, we can safely
560 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
561 * biodone() in order to prevent getblk from writing the buffer
562 * out synchronously.
563 */
564void
565bdwrite(struct buf * bp)
566{
567	struct vnode *vp;
568
569#if !defined(MAX_PERF)
570	if ((bp->b_flags & B_BUSY) == 0) {
571		panic("bdwrite: buffer is not busy");
572	}
573#endif
574
575	if (bp->b_flags & B_INVAL) {
576		brelse(bp);
577		return;
578	}
579	bdirty(bp);
580
581	/*
582	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
583	 * true even of NFS now.
584	 */
585	bp->b_flags |= B_CACHE;
586
587	/*
588	 * This bmap keeps the system from needing to do the bmap later,
589	 * perhaps when the system is attempting to do a sync.  Since it
590	 * is likely that the indirect block -- or whatever other datastructure
591	 * that the filesystem needs is still in memory now, it is a good
592	 * thing to do this.  Note also, that if the pageout daemon is
593	 * requesting a sync -- there might not be enough memory to do
594	 * the bmap then...  So, this is important to do.
595	 */
596	if (bp->b_lblkno == bp->b_blkno) {
597		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
598	}
599
600	/*
601	 * Set the *dirty* buffer range based upon the VM system dirty pages.
602	 */
603	vfs_setdirty(bp);
604
605	/*
606	 * We need to do this here to satisfy the vnode_pager and the
607	 * pageout daemon, so that it thinks that the pages have been
608	 * "cleaned".  Note that since the pages are in a delayed write
609	 * buffer -- the VFS layer "will" see that the pages get written
610	 * out on the next sync, or perhaps the cluster will be completed.
611	 */
612	vfs_clean_pages(bp);
613	bqrelse(bp);
614
615	/*
616	 * XXX The soft dependency code is not prepared to
617	 * have I/O done when a bdwrite is requested. For
618	 * now we just let the write be delayed if it is
619	 * requested by the soft dependency code.
620	 */
621	if ((vp = bp->b_vp) &&
622	    ((vp->v_type == VBLK && vp->v_specmountpoint &&
623		  (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) ||
624		 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))))
625		return;
626
627	if (numdirtybuffers >= hidirtybuffers)
628		flushdirtybuffers(0, 0);
629}
630
631/*
632 *	bdirty:
633 *
634 *	Turn buffer into delayed write request.  We must clear B_READ and
635 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
636 *	itself to properly update it in the dirty/clean lists.  We mark it
637 *	B_DONE to ensure that any asynchronization of the buffer properly
638 *	clears B_DONE ( else a panic will occur later ).
639 *
640 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
641 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
642 *	should only be called if the buffer is known-good.
643 *
644 *	Since the buffer is not on a queue, we do not update the numfreebuffers
645 *	count.
646 *
647 *	Must be called at splbio().
648 *	The buffer must be on QUEUE_NONE.
649 */
650void
651bdirty(bp)
652	struct buf *bp;
653{
654	KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
655	bp->b_flags &= ~(B_READ|B_RELBUF);
656
657	if ((bp->b_flags & B_DELWRI) == 0) {
658		bp->b_flags |= B_DONE | B_DELWRI;
659		reassignbuf(bp, bp->b_vp);
660		++numdirtybuffers;
661	}
662}
663
664/*
665 *	bundirty:
666 *
667 *	Clear B_DELWRI for buffer.
668 *
669 *	Since the buffer is not on a queue, we do not update the numfreebuffers
670 *	count.
671 *
672 *	Must be called at splbio().
673 *	The buffer must be on QUEUE_NONE.
674 */
675
676void
677bundirty(bp)
678	struct buf *bp;
679{
680	KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
681
682	if (bp->b_flags & B_DELWRI) {
683		bp->b_flags &= ~B_DELWRI;
684		reassignbuf(bp, bp->b_vp);
685		--numdirtybuffers;
686	}
687}
688
689/*
690 *	bawrite:
691 *
692 *	Asynchronous write.  Start output on a buffer, but do not wait for
693 *	it to complete.  The buffer is released when the output completes.
694 *
695 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
696 *	B_INVAL buffers.  Not us.
697 */
698void
699bawrite(struct buf * bp)
700{
701	bp->b_flags |= B_ASYNC;
702	(void) VOP_BWRITE(bp);
703}
704
705/*
706 *	bowrite:
707 *
708 *	Ordered write.  Start output on a buffer, and flag it so that the
709 *	device will write it in the order it was queued.  The buffer is
710 *	released when the output completes.  bwrite() ( or the VOP routine
711 *	anyway ) is responsible for handling B_INVAL buffers.
712 */
713int
714bowrite(struct buf * bp)
715{
716	bp->b_flags |= B_ORDERED | B_ASYNC;
717	return (VOP_BWRITE(bp));
718}
719
720/*
721 *	brelse:
722 *
723 *	Release a busy buffer and, if requested, free its resources.  The
724 *	buffer will be stashed in the appropriate bufqueue[] allowing it
725 *	to be accessed later as a cache entity or reused for other purposes.
726 */
727void
728brelse(struct buf * bp)
729{
730	int s;
731
732	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
733
734#if 0
735	if (bp->b_flags & B_CLUSTER) {
736		relpbuf(bp, NULL);
737		return;
738	}
739#endif
740
741	s = splbio();
742
743	if (bp->b_flags & B_LOCKED)
744		bp->b_flags &= ~B_ERROR;
745
746	if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) {
747		/*
748		 * Failed write, redirty.  Must clear B_ERROR to prevent
749		 * pages from being scrapped.  Note: B_INVAL is ignored
750		 * here but will presumably be dealt with later.
751		 */
752		bp->b_flags &= ~B_ERROR;
753		bdirty(bp);
754	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) ||
755	    (bp->b_bufsize <= 0)) {
756		/*
757		 * Either a failed I/O or we were asked to free or not
758		 * cache the buffer.
759		 */
760		bp->b_flags |= B_INVAL;
761		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
762			(*bioops.io_deallocate)(bp);
763		if (bp->b_flags & B_DELWRI)
764			--numdirtybuffers;
765		bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF);
766		if ((bp->b_flags & B_VMIO) == 0) {
767			if (bp->b_bufsize)
768				allocbuf(bp, 0);
769			if (bp->b_vp)
770				brelvp(bp);
771		}
772	}
773
774	/*
775	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
776	 * is called with B_DELWRI set, the underlying pages may wind up
777	 * getting freed causing a previous write (bdwrite()) to get 'lost'
778	 * because pages associated with a B_DELWRI bp are marked clean.
779	 *
780	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
781	 * if B_DELWRI is set.
782	 */
783
784	if (bp->b_flags & B_DELWRI)
785		bp->b_flags &= ~B_RELBUF;
786
787	/*
788	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
789	 * constituted, not even NFS buffers now.  Two flags effect this.  If
790	 * B_INVAL, the struct buf is invalidated but the VM object is kept
791	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
792	 *
793	 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be
794	 * invalidated.  B_ERROR cannot be set for a failed write unless the
795	 * buffer is also B_INVAL because it hits the re-dirtying code above.
796	 *
797	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
798	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
799	 * the commit state and we cannot afford to lose the buffer.
800	 */
801	if ((bp->b_flags & B_VMIO)
802	    && !(bp->b_vp->v_tag == VT_NFS &&
803		 bp->b_vp->v_type != VBLK &&
804		 (bp->b_flags & B_DELWRI))
805	    ) {
806
807		int i, j, resid;
808		vm_page_t m;
809		off_t foff;
810		vm_pindex_t poff;
811		vm_object_t obj;
812		struct vnode *vp;
813
814		vp = bp->b_vp;
815
816		/*
817		 * Get the base offset and length of the buffer.  Note that
818		 * for block sizes that are less then PAGE_SIZE, the b_data
819		 * base of the buffer does not represent exactly b_offset and
820		 * neither b_offset nor b_size are necessarily page aligned.
821		 * Instead, the starting position of b_offset is:
822		 *
823		 * 	b_data + (b_offset & PAGE_MASK)
824		 *
825		 * block sizes less then DEV_BSIZE (usually 512) are not
826		 * supported due to the page granularity bits (m->valid,
827		 * m->dirty, etc...).
828		 *
829		 * See man buf(9) for more information
830		 */
831
832		resid = bp->b_bufsize;
833		foff = bp->b_offset;
834
835		for (i = 0; i < bp->b_npages; i++) {
836			m = bp->b_pages[i];
837			vm_page_flag_clear(m, PG_ZERO);
838			if (m == bogus_page) {
839
840				obj = (vm_object_t) vp->v_object;
841				poff = OFF_TO_IDX(bp->b_offset);
842
843				for (j = i; j < bp->b_npages; j++) {
844					m = bp->b_pages[j];
845					if (m == bogus_page) {
846						m = vm_page_lookup(obj, poff + j);
847#if !defined(MAX_PERF)
848						if (!m) {
849							panic("brelse: page missing\n");
850						}
851#endif
852						bp->b_pages[j] = m;
853					}
854				}
855
856				if ((bp->b_flags & B_INVAL) == 0) {
857					pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
858				}
859			}
860			if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
861				int poffset = foff & PAGE_MASK;
862				int presid = resid > (PAGE_SIZE - poffset) ?
863					(PAGE_SIZE - poffset) : resid;
864
865				KASSERT(presid >= 0, ("brelse: extra page"));
866				vm_page_set_invalid(m, poffset, presid);
867			}
868			resid -= PAGE_SIZE - (foff & PAGE_MASK);
869			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
870		}
871
872		if (bp->b_flags & (B_INVAL | B_RELBUF))
873			vfs_vmio_release(bp);
874
875	} else if (bp->b_flags & B_VMIO) {
876
877		if (bp->b_flags & (B_INVAL | B_RELBUF))
878			vfs_vmio_release(bp);
879
880	}
881
882#if !defined(MAX_PERF)
883	if (bp->b_qindex != QUEUE_NONE)
884		panic("brelse: free buffer onto another queue???");
885#endif
886	/* enqueue */
887
888	/* buffers with no memory */
889	if (bp->b_bufsize == 0) {
890		bp->b_flags |= B_INVAL;
891		bp->b_qindex = QUEUE_EMPTY;
892		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
893		LIST_REMOVE(bp, b_hash);
894		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
895		bp->b_dev = NODEV;
896		kvafreespace += bp->b_kvasize;
897		if (bp->b_kvasize)
898			kvaspacewakeup();
899	/* buffers with junk contents */
900	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
901		bp->b_flags |= B_INVAL;
902		bp->b_qindex = QUEUE_AGE;
903		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
904		LIST_REMOVE(bp, b_hash);
905		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
906		bp->b_dev = NODEV;
907
908	/* buffers that are locked */
909	} else if (bp->b_flags & B_LOCKED) {
910		bp->b_qindex = QUEUE_LOCKED;
911		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
912
913	/* buffers with stale but valid contents */
914	} else if (bp->b_flags & B_AGE) {
915		bp->b_qindex = QUEUE_AGE;
916		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
917
918	/* buffers with valid and quite potentially reuseable contents */
919	} else {
920		bp->b_qindex = QUEUE_LRU;
921		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
922	}
923
924	/*
925	 * If B_INVAL, clear B_DELWRI.
926	 */
927	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) {
928		bp->b_flags &= ~B_DELWRI;
929		--numdirtybuffers;
930	}
931
932	runningbufspace -= bp->b_bufsize;
933
934	/*
935	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
936	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
937	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
938	 * if B_INVAL is set ).
939	 */
940
941	if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
942		bufcountwakeup();
943
944	/*
945	 * Something we can maybe free.
946	 */
947
948	if (bp->b_bufsize)
949		bufspacewakeup();
950
951	if (bp->b_flags & B_WANTED) {
952		bp->b_flags &= ~(B_WANTED | B_AGE);
953		wakeup(bp);
954	}
955
956	/* unlock */
957	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
958		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
959	splx(s);
960}
961
962/*
963 * Release a buffer back to the appropriate queue but do not try to free
964 * it.
965 *
966 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
967 * biodone() to requeue an async I/O on completion.  It is also used when
968 * known good buffers need to be requeued but we think we may need the data
969 * again soon.
970 */
971void
972bqrelse(struct buf * bp)
973{
974	int s;
975
976	s = splbio();
977
978	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
979
980#if !defined(MAX_PERF)
981	if (bp->b_qindex != QUEUE_NONE)
982		panic("bqrelse: free buffer onto another queue???");
983#endif
984	if (bp->b_flags & B_LOCKED) {
985		bp->b_flags &= ~B_ERROR;
986		bp->b_qindex = QUEUE_LOCKED;
987		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
988		/* buffers with stale but valid contents */
989	} else {
990		bp->b_qindex = QUEUE_LRU;
991		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
992	}
993
994	runningbufspace -= bp->b_bufsize;
995
996	if ((bp->b_flags & B_LOCKED) == 0 &&
997	    ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
998	) {
999		bufcountwakeup();
1000	}
1001
1002	/*
1003	 * Something we can maybe wakeup
1004	 */
1005	if (bp->b_bufsize)
1006		bufspacewakeup();
1007
1008	/* anyone need this block? */
1009	if (bp->b_flags & B_WANTED) {
1010		bp->b_flags &= ~(B_WANTED | B_AGE);
1011		wakeup(bp);
1012	}
1013
1014	/* unlock */
1015	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
1016		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1017	splx(s);
1018}
1019
1020static void
1021vfs_vmio_release(bp)
1022	struct buf *bp;
1023{
1024	int i, s;
1025	vm_page_t m;
1026
1027	s = splvm();
1028	for (i = 0; i < bp->b_npages; i++) {
1029		m = bp->b_pages[i];
1030		bp->b_pages[i] = NULL;
1031		/*
1032		 * In order to keep page LRU ordering consistent, put
1033		 * everything on the inactive queue.
1034		 */
1035		vm_page_unwire(m, 0);
1036		/*
1037		 * We don't mess with busy pages, it is
1038		 * the responsibility of the process that
1039		 * busied the pages to deal with them.
1040		 */
1041		if ((m->flags & PG_BUSY) || (m->busy != 0))
1042			continue;
1043
1044		if (m->wire_count == 0) {
1045			vm_page_flag_clear(m, PG_ZERO);
1046			/*
1047			 * Might as well free the page if we can and it has
1048			 * no valid data.
1049			 */
1050			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) {
1051				vm_page_busy(m);
1052				vm_page_protect(m, VM_PROT_NONE);
1053				vm_page_free(m);
1054			}
1055		}
1056	}
1057	bufspace -= bp->b_bufsize;
1058	vmiospace -= bp->b_bufsize;
1059	runningbufspace -= bp->b_bufsize;
1060	splx(s);
1061	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1062	if (bp->b_bufsize)
1063		bufspacewakeup();
1064	bp->b_npages = 0;
1065	bp->b_bufsize = 0;
1066	bp->b_flags &= ~B_VMIO;
1067	if (bp->b_vp)
1068		brelvp(bp);
1069}
1070
1071/*
1072 * Check to see if a block is currently memory resident.
1073 */
1074struct buf *
1075gbincore(struct vnode * vp, daddr_t blkno)
1076{
1077	struct buf *bp;
1078	struct bufhashhdr *bh;
1079
1080	bh = BUFHASH(vp, blkno);
1081	bp = bh->lh_first;
1082
1083	/* Search hash chain */
1084	while (bp != NULL) {
1085		/* hit */
1086		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
1087		    (bp->b_flags & B_INVAL) == 0) {
1088			break;
1089		}
1090		bp = bp->b_hash.le_next;
1091	}
1092	return (bp);
1093}
1094
1095/*
1096 * this routine implements clustered async writes for
1097 * clearing out B_DELWRI buffers...  This is much better
1098 * than the old way of writing only one buffer at a time.
1099 */
1100int
1101vfs_bio_awrite(struct buf * bp)
1102{
1103	int i;
1104	daddr_t lblkno = bp->b_lblkno;
1105	struct vnode *vp = bp->b_vp;
1106	int s;
1107	int ncl;
1108	struct buf *bpa;
1109	int nwritten;
1110	int size;
1111	int maxcl;
1112
1113	s = splbio();
1114	/*
1115	 * right now we support clustered writing only to regular files, and
1116	 * then only if our I/O system is not saturated.
1117	 */
1118	if ((vp->v_type == VREG) &&
1119	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1120	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1121
1122		size = vp->v_mount->mnt_stat.f_iosize;
1123		maxcl = MAXPHYS / size;
1124
1125		for (i = 1; i < maxcl; i++) {
1126			if ((bpa = gbincore(vp, lblkno + i)) &&
1127			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1128			    (B_DELWRI | B_CLUSTEROK)) &&
1129			    (bpa->b_bufsize == size)) {
1130				if ((bpa->b_blkno == bpa->b_lblkno) ||
1131				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
1132					break;
1133			} else {
1134				break;
1135			}
1136		}
1137		ncl = i;
1138		/*
1139		 * this is a possible cluster write
1140		 */
1141		if (ncl != 1) {
1142			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
1143			splx(s);
1144			return nwritten;
1145		}
1146	}
1147
1148	bremfree(bp);
1149	bp->b_flags |= B_BUSY | B_ASYNC;
1150
1151	splx(s);
1152	/*
1153	 * default (old) behavior, writing out only one block
1154	 *
1155	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1156	 */
1157	nwritten = bp->b_bufsize;
1158	(void) VOP_BWRITE(bp);
1159
1160	return nwritten;
1161}
1162
1163/*
1164 *	getnewbuf:
1165 *
1166 *	Find and initialize a new buffer header, freeing up existing buffers
1167 *	in the bufqueues as necessary.  The new buffer is returned with
1168 *	flags set to B_BUSY.
1169 *
1170 *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1171 *	buffer away, the caller must set B_INVAL prior to calling brelse().
1172 *
1173 *	We block if:
1174 *		We have insufficient buffer headers
1175 *		We have insufficient buffer space
1176 *		buffer_map is too fragmented ( space reservation fails )
1177 *
1178 *	We do *not* attempt to flush dirty buffers more then one level deep.
1179 *	I.e., if P_FLSINPROG is set we do not flush dirty buffers at all.
1180 *
1181 *	If P_FLSINPROG is set, we are allowed to dip into our emergency
1182 *	reserve.
1183 */
1184static struct buf *
1185getnewbuf(struct vnode *vp, daddr_t blkno,
1186	int slpflag, int slptimeo, int size, int maxsize)
1187{
1188	struct buf *bp;
1189	struct buf *nbp;
1190	struct buf *dbp;
1191	int outofspace;
1192	int nqindex;
1193	int defrag = 0;
1194
1195restart:
1196	/*
1197	 * Calculate whether we are out of buffer space.  This state is
1198	 * recalculated on every restart.  If we are out of space, we
1199	 * have to turn off defragmentation.  The outofspace code will
1200	 * defragment too, but the looping conditionals will be messed up
1201	 * if both outofspace and defrag are on.
1202	 */
1203
1204	dbp = NULL;
1205	outofspace = 0;
1206	if (bufspace >= hibufspace) {
1207		if ((curproc->p_flag & P_FLSINPROG) == 0 ||
1208		    bufspace >= maxbufspace
1209		) {
1210			outofspace = 1;
1211			defrag = 0;
1212		}
1213	}
1214
1215	/*
1216	 * defrag state is semi-persistant.  1 means we are flagged for
1217	 * defragging.  -1 means we actually defragged something.
1218	 */
1219	/* nop */
1220
1221	/*
1222	 * Setup for scan.  If we do not have enough free buffers,
1223	 * we setup a degenerate case that falls through the while.
1224	 *
1225	 * If we are in the middle of a flush, we can dip into the
1226	 * emergency reserve.
1227	 *
1228	 * If we are out of space, we skip trying to scan QUEUE_EMPTY
1229	 * because those buffers are, well, empty.
1230	 */
1231
1232	if ((curproc->p_flag & P_FLSINPROG) == 0 &&
1233	    numfreebuffers < lofreebuffers
1234	) {
1235		nqindex = QUEUE_LRU;
1236		nbp = NULL;
1237	} else {
1238		nqindex = QUEUE_EMPTY;
1239		if (outofspace ||
1240		    (nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY])) == NULL
1241		) {
1242			nqindex = QUEUE_AGE;
1243			nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1244			if (nbp == NULL) {
1245				nqindex = QUEUE_LRU;
1246				nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1247			}
1248		}
1249	}
1250
1251	/*
1252	 * Run scan, possibly freeing data and/or kva mappings on the fly
1253	 * depending.
1254	 */
1255
1256	while ((bp = nbp) != NULL) {
1257		int qindex = nqindex;
1258		/*
1259		 * Calculate next bp ( we can only use it if we do not block
1260		 * or do other fancy things ).
1261		 */
1262		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1263			switch(qindex) {
1264			case QUEUE_EMPTY:
1265				nqindex = QUEUE_AGE;
1266				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE])))
1267					break;
1268				/* fall through */
1269			case QUEUE_AGE:
1270				nqindex = QUEUE_LRU;
1271				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])))
1272					break;
1273				/* fall through */
1274			case QUEUE_LRU:
1275				/*
1276				 * nbp is NULL.
1277				 */
1278				break;
1279			}
1280		}
1281
1282		/*
1283		 * Sanity Checks
1284		 */
1285		KASSERT(!(bp->b_flags & B_BUSY), ("getnewbuf: busy buffer %p on free list", bp));
1286		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1287
1288		/*
1289		 * Here we try to move NON VMIO buffers to the end of the
1290		 * LRU queue in order to make VMIO buffers more readily
1291		 * freeable.  We also try to move buffers with a positive
1292		 * usecount to the end.
1293		 *
1294		 * Note that by moving the bp to the end, we setup a following
1295		 * loop.  Since we continue to decrement b_usecount this
1296		 * is ok and, in fact, desireable.
1297		 *
1298		 * If we are at the end of the list, we move ourself to the
1299		 * same place and need to fixup nbp and nqindex to handle
1300		 * the following case.
1301		 */
1302
1303		if ((qindex == QUEUE_LRU) && bp->b_usecount > 0) {
1304			if ((bp->b_flags & B_VMIO) == 0 ||
1305			    (vmiospace < maxvmiobufspace)
1306			) {
1307				--bp->b_usecount;
1308				TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1309				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1310				if (nbp == NULL) {
1311					nqindex = qindex;
1312					nbp = bp;
1313				}
1314				continue;
1315			}
1316		}
1317
1318		/*
1319		 * If we come across a delayed write and numdirtybuffers should
1320		 * be flushed, try to write it out.  Only if P_FLSINPROG is
1321		 * not set.  We can't afford to recursively stack more then
1322		 * one deep due to the possibility of having deep VFS call
1323		 * stacks.
1324		 *
1325		 * Limit the number of dirty buffers we are willing to try
1326		 * to recover since it really isn't our job here.
1327		 */
1328		if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
1329			/*
1330			 * This is rather complex, but necessary.  If we come
1331			 * across a B_DELWRI buffer we have to flush it in
1332			 * order to use it.  We only do this if we absolutely
1333			 * need to.  We must also protect against too much
1334			 * recursion which might run us out of stack due to
1335			 * deep VFS call stacks.
1336			 *
1337			 * In heavy-writing situations, QUEUE_LRU can contain
1338			 * a large number of DELWRI buffers at its head.  These
1339			 * buffers must be moved to the tail if they cannot be
1340			 * written async in order to reduce the scanning time
1341			 * required to skip past these buffers in later
1342			 * getnewbuf() calls.
1343			 */
1344			if ((curproc->p_flag & P_FLSINPROG) ||
1345			    numdirtybuffers < hidirtybuffers
1346			) {
1347				if (qindex == QUEUE_LRU) {
1348					/*
1349					 * dbp prevents us from looping forever
1350					 * if all bps in QUEUE_LRU are dirty.
1351					 */
1352					if (bp == dbp) {
1353						bp = NULL;
1354						break;
1355					}
1356					if (dbp == NULL)
1357						dbp = TAILQ_LAST(&bufqueues[QUEUE_LRU], bqueues);
1358					TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1359					TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1360				}
1361				continue;
1362			}
1363			curproc->p_flag |= P_FLSINPROG;
1364			vfs_bio_awrite(bp);
1365			curproc->p_flag &= ~P_FLSINPROG;
1366			goto restart;
1367		}
1368
1369		if (defrag > 0 && bp->b_kvasize == 0)
1370			continue;
1371		if (outofspace > 0 && bp->b_bufsize == 0)
1372			continue;
1373
1374		/*
1375		 * Start freeing the bp.  This is somewhat involved.  nbp
1376		 * remains valid only for QUEUE_EMPTY bp's.
1377		 */
1378
1379		bremfree(bp);
1380		bp->b_flags |= B_BUSY;
1381
1382		if (qindex == QUEUE_LRU || qindex == QUEUE_AGE) {
1383			if (bp->b_flags & B_VMIO) {
1384				bp->b_flags &= ~B_ASYNC;
1385				vfs_vmio_release(bp);
1386			}
1387			if (bp->b_vp)
1388				brelvp(bp);
1389		}
1390
1391		if (bp->b_flags & B_WANTED) {
1392			bp->b_flags &= ~B_WANTED;
1393			wakeup(bp);
1394		}
1395
1396		/*
1397		 * NOTE:  nbp is now entirely invalid.  We can only restart
1398		 * the scan from this point on.
1399		 *
1400		 * Get the rest of the buffer freed up.  b_kva* is still
1401		 * valid after this operation.
1402		 */
1403
1404		if (bp->b_rcred != NOCRED) {
1405			crfree(bp->b_rcred);
1406			bp->b_rcred = NOCRED;
1407		}
1408		if (bp->b_wcred != NOCRED) {
1409			crfree(bp->b_wcred);
1410			bp->b_wcred = NOCRED;
1411		}
1412		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1413			(*bioops.io_deallocate)(bp);
1414
1415		LIST_REMOVE(bp, b_hash);
1416		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1417
1418		if (bp->b_bufsize)
1419			allocbuf(bp, 0);
1420
1421		bp->b_flags = B_BUSY;
1422		bp->b_dev = NODEV;
1423		bp->b_vp = NULL;
1424		bp->b_blkno = bp->b_lblkno = 0;
1425		bp->b_offset = NOOFFSET;
1426		bp->b_iodone = 0;
1427		bp->b_error = 0;
1428		bp->b_resid = 0;
1429		bp->b_bcount = 0;
1430		bp->b_npages = 0;
1431		bp->b_dirtyoff = bp->b_dirtyend = 0;
1432		bp->b_usecount = 5;
1433
1434		LIST_INIT(&bp->b_dep);
1435
1436		/*
1437		 * Ok, now that we have a free buffer, if we are defragging
1438		 * we have to recover the kvaspace.
1439		 */
1440
1441		if (defrag > 0) {
1442			defrag = -1;
1443			bp->b_flags |= B_INVAL;
1444			bfreekva(bp);
1445			brelse(bp);
1446			goto restart;
1447		}
1448
1449		if (outofspace > 0) {
1450			outofspace = -1;
1451			bp->b_flags |= B_INVAL;
1452			bfreekva(bp);
1453			brelse(bp);
1454			goto restart;
1455		}
1456
1457		/*
1458		 * We are done
1459		 */
1460		break;
1461	}
1462
1463	/*
1464	 * If we exhausted our list, sleep as appropriate.
1465	 */
1466
1467	if (bp == NULL) {
1468		int flags;
1469
1470dosleep:
1471		if (defrag > 0)
1472			flags = VFS_BIO_NEED_KVASPACE;
1473		else if (outofspace > 0)
1474			flags = VFS_BIO_NEED_BUFSPACE;
1475		else
1476			flags = VFS_BIO_NEED_ANY;
1477
1478		if (rushjob < syncdelay / 2)
1479			++rushjob;
1480		needsbuffer |= flags;
1481		while (needsbuffer & flags) {
1482			if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
1483			    "newbuf", slptimeo))
1484				return (NULL);
1485		}
1486	} else {
1487		/*
1488		 * We finally have a valid bp.  We aren't quite out of the
1489		 * woods, we still have to reserve kva space.
1490		 */
1491		vm_offset_t addr = 0;
1492
1493		maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
1494
1495		if (maxsize != bp->b_kvasize) {
1496			bfreekva(bp);
1497
1498			if (vm_map_findspace(buffer_map,
1499				vm_map_min(buffer_map), maxsize, &addr)
1500			) {
1501				/*
1502				 * Uh oh.  Buffer map is to fragmented.  Try
1503				 * to defragment.
1504				 */
1505				if (defrag <= 0) {
1506					defrag = 1;
1507					bp->b_flags |= B_INVAL;
1508					brelse(bp);
1509					goto restart;
1510				}
1511				/*
1512				 * Uh oh.  We couldn't seem to defragment
1513				 */
1514				bp = NULL;
1515				goto dosleep;
1516			}
1517		}
1518		if (addr) {
1519			vm_map_insert(buffer_map, NULL, 0,
1520				addr, addr + maxsize,
1521				VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1522
1523			bp->b_kvabase = (caddr_t) addr;
1524			bp->b_kvasize = maxsize;
1525		}
1526		bp->b_data = bp->b_kvabase;
1527	}
1528
1529	/*
1530	 * The bp, if valid, is set to B_BUSY.
1531	 */
1532	return (bp);
1533}
1534
1535/*
1536 *	waitfreebuffers:
1537 *
1538 *	Wait for sufficient free buffers.  This routine is not called if
1539 *	curproc is the update process so we do not have to do anything
1540 *	fancy.
1541 */
1542
1543static void
1544waitfreebuffers(int slpflag, int slptimeo)
1545{
1546	while (numfreebuffers < hifreebuffers) {
1547		flushdirtybuffers(slpflag, slptimeo);
1548		if (numfreebuffers < hifreebuffers)
1549			break;
1550		needsbuffer |= VFS_BIO_NEED_FREE;
1551		if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo))
1552			break;
1553	}
1554}
1555
1556/*
1557 *	flushdirtybuffers:
1558 *
1559 *	This routine is called when we get too many dirty buffers.
1560 *
1561 *	We have to protect ourselves from recursion, but we also do not want
1562 *	other process's flushdirtybuffers() to interfere with the syncer if
1563 *	it decides to flushdirtybuffers().
1564 *
1565 *	In order to maximize operations, we allow any process to flush
1566 *	dirty buffers and use P_FLSINPROG to prevent recursion.
1567 */
1568
1569static void
1570flushdirtybuffers(int slpflag, int slptimeo)
1571{
1572	int s;
1573
1574	s = splbio();
1575
1576	if (curproc->p_flag & P_FLSINPROG) {
1577		splx(s);
1578		return;
1579	}
1580	curproc->p_flag |= P_FLSINPROG;
1581
1582	while (numdirtybuffers > lodirtybuffers) {
1583		if (flushbufqueues() == 0)
1584			break;
1585	}
1586
1587	curproc->p_flag &= ~P_FLSINPROG;
1588
1589	splx(s);
1590}
1591
1592static int
1593flushbufqueues(void)
1594{
1595	struct buf *bp;
1596	int qindex;
1597	int r = 0;
1598
1599	qindex = QUEUE_AGE;
1600	bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1601
1602	for (;;) {
1603		if (bp == NULL) {
1604			if (qindex == QUEUE_LRU)
1605				break;
1606			qindex = QUEUE_LRU;
1607			if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL)
1608				break;
1609		}
1610
1611		/*
1612		 * Try to free up B_INVAL delayed-write buffers rather then
1613		 * writing them out.  Note also that NFS is somewhat sensitive
1614		 * to B_INVAL buffers so it is doubly important that we do
1615		 * this.
1616		 */
1617		if ((bp->b_flags & B_DELWRI) != 0) {
1618			if (bp->b_flags & B_INVAL) {
1619				bremfree(bp);
1620				bp->b_flags |= B_BUSY;
1621				brelse(bp);
1622			} else {
1623				vfs_bio_awrite(bp);
1624			}
1625			++r;
1626			break;
1627		}
1628		bp = TAILQ_NEXT(bp, b_freelist);
1629	}
1630	return(r);
1631}
1632
1633/*
1634 * Check to see if a block is currently memory resident.
1635 */
1636struct buf *
1637incore(struct vnode * vp, daddr_t blkno)
1638{
1639	struct buf *bp;
1640
1641	int s = splbio();
1642	bp = gbincore(vp, blkno);
1643	splx(s);
1644	return (bp);
1645}
1646
1647/*
1648 * Returns true if no I/O is needed to access the
1649 * associated VM object.  This is like incore except
1650 * it also hunts around in the VM system for the data.
1651 */
1652
1653int
1654inmem(struct vnode * vp, daddr_t blkno)
1655{
1656	vm_object_t obj;
1657	vm_offset_t toff, tinc, size;
1658	vm_page_t m;
1659	vm_ooffset_t off;
1660
1661	if (incore(vp, blkno))
1662		return 1;
1663	if (vp->v_mount == NULL)
1664		return 0;
1665	if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
1666		return 0;
1667
1668	obj = vp->v_object;
1669	size = PAGE_SIZE;
1670	if (size > vp->v_mount->mnt_stat.f_iosize)
1671		size = vp->v_mount->mnt_stat.f_iosize;
1672	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
1673
1674	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1675		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1676		if (!m)
1677			return 0;
1678		tinc = size;
1679		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
1680			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
1681		if (vm_page_is_valid(m,
1682		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
1683			return 0;
1684	}
1685	return 1;
1686}
1687
1688/*
1689 *	vfs_setdirty:
1690 *
1691 *	Sets the dirty range for a buffer based on the status of the dirty
1692 *	bits in the pages comprising the buffer.
1693 *
1694 *	The range is limited to the size of the buffer.
1695 *
1696 *	This routine is primarily used by NFS, but is generalized for the
1697 *	B_VMIO case.
1698 */
1699static void
1700vfs_setdirty(struct buf *bp)
1701{
1702	int i;
1703	vm_object_t object;
1704
1705	/*
1706	 * Degenerate case - empty buffer
1707	 */
1708
1709	if (bp->b_bufsize == 0)
1710		return;
1711
1712	/*
1713	 * We qualify the scan for modified pages on whether the
1714	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1715	 * is not cleared simply by protecting pages off.
1716	 */
1717
1718	if ((bp->b_flags & B_VMIO) == 0)
1719		return;
1720
1721	object = bp->b_pages[0]->object;
1722
1723	if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
1724		printf("Warning: object %p writeable but not mightbedirty\n", object);
1725	if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
1726		printf("Warning: object %p mightbedirty but not writeable\n", object);
1727
1728	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
1729		vm_offset_t boffset;
1730		vm_offset_t eoffset;
1731
1732		/*
1733		 * test the pages to see if they have been modified directly
1734		 * by users through the VM system.
1735		 */
1736		for (i = 0; i < bp->b_npages; i++) {
1737			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
1738			vm_page_test_dirty(bp->b_pages[i]);
1739		}
1740
1741		/*
1742		 * Calculate the encompassing dirty range, boffset and eoffset,
1743		 * (eoffset - boffset) bytes.
1744		 */
1745
1746		for (i = 0; i < bp->b_npages; i++) {
1747			if (bp->b_pages[i]->dirty)
1748				break;
1749		}
1750		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1751
1752		for (i = bp->b_npages - 1; i >= 0; --i) {
1753			if (bp->b_pages[i]->dirty) {
1754				break;
1755			}
1756		}
1757		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1758
1759		/*
1760		 * Fit it to the buffer.
1761		 */
1762
1763		if (eoffset > bp->b_bcount)
1764			eoffset = bp->b_bcount;
1765
1766		/*
1767		 * If we have a good dirty range, merge with the existing
1768		 * dirty range.
1769		 */
1770
1771		if (boffset < eoffset) {
1772			if (bp->b_dirtyoff > boffset)
1773				bp->b_dirtyoff = boffset;
1774			if (bp->b_dirtyend < eoffset)
1775				bp->b_dirtyend = eoffset;
1776		}
1777	}
1778}
1779
1780/*
1781 *	getblk:
1782 *
1783 *	Get a block given a specified block and offset into a file/device.
1784 *	The buffers B_DONE bit will be cleared on return, making it almost
1785 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
1786 *	return.  The caller should clear B_INVAL prior to initiating a
1787 *	READ.
1788 *
1789 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
1790 *	an existing buffer.
1791 *
1792 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
1793 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
1794 *	and then cleared based on the backing VM.  If the previous buffer is
1795 *	non-0-sized but invalid, B_CACHE will be cleared.
1796 *
1797 *	If getblk() must create a new buffer, the new buffer is returned with
1798 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
1799 *	case it is returned with B_INVAL clear and B_CACHE set based on the
1800 *	backing VM.
1801 *
1802 *	getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos
1803 *	B_CACHE bit is clear.
1804 *
1805 *	What this means, basically, is that the caller should use B_CACHE to
1806 *	determine whether the buffer is fully valid or not and should clear
1807 *	B_INVAL prior to issuing a read.  If the caller intends to validate
1808 *	the buffer by loading its data area with something, the caller needs
1809 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
1810 *	the caller should set B_CACHE ( as an optimization ), else the caller
1811 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
1812 *	a write attempt or if it was a successfull read.  If the caller
1813 *	intends to issue a READ, the caller must clear B_INVAL and B_ERROR
1814 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
1815 */
1816struct buf *
1817getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1818{
1819	struct buf *bp;
1820	int s;
1821	struct bufhashhdr *bh;
1822
1823#if !defined(MAX_PERF)
1824	if (size > MAXBSIZE)
1825		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1826#endif
1827
1828	s = splbio();
1829loop:
1830	/*
1831	 * Block if we are low on buffers.  The syncer is allowed more
1832	 * buffers in order to avoid a deadlock.
1833	 */
1834	if (curproc == updateproc && numfreebuffers == 0) {
1835		needsbuffer |= VFS_BIO_NEED_ANY;
1836		tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1837		    slptimeo);
1838	} else if (curproc != updateproc && numfreebuffers < lofreebuffers) {
1839		waitfreebuffers(slpflag, slptimeo);
1840	}
1841
1842	if ((bp = gbincore(vp, blkno))) {
1843		/*
1844		 * Buffer is in-core
1845		 */
1846
1847		if (bp->b_flags & B_BUSY) {
1848			bp->b_flags |= B_WANTED;
1849			if (bp->b_usecount < BUF_MAXUSE)
1850				++bp->b_usecount;
1851
1852			if (!tsleep(bp,
1853				(PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1854				goto loop;
1855			}
1856
1857			splx(s);
1858			return (struct buf *) NULL;
1859		}
1860
1861		/*
1862		 * Busy the buffer.  B_CACHE is cleared if the buffer is
1863		 * invalid.  Ohterwise, for a non-VMIO buffer, B_CACHE is set
1864		 * and for a VMIO buffer B_CACHE is adjusted according to the
1865		 * backing VM cache.
1866		 */
1867		bp->b_flags |= B_BUSY;
1868		if (bp->b_flags & B_INVAL)
1869			bp->b_flags &= ~B_CACHE;
1870		else if ((bp->b_flags & (B_VMIO|B_INVAL)) == 0)
1871			bp->b_flags |= B_CACHE;
1872		bremfree(bp);
1873
1874		/*
1875		 * check for size inconsistancies for non-VMIO case.
1876		 */
1877
1878		if (bp->b_bcount != size) {
1879			if ((bp->b_flags & B_VMIO) == 0 ||
1880			    (size > bp->b_kvasize)
1881			) {
1882				if (bp->b_flags & B_DELWRI) {
1883					bp->b_flags |= B_NOCACHE;
1884					VOP_BWRITE(bp);
1885				} else {
1886					if ((bp->b_flags & B_VMIO) &&
1887					   (LIST_FIRST(&bp->b_dep) == NULL)) {
1888						bp->b_flags |= B_RELBUF;
1889						brelse(bp);
1890					} else {
1891						bp->b_flags |= B_NOCACHE;
1892						VOP_BWRITE(bp);
1893					}
1894				}
1895				goto loop;
1896			}
1897		}
1898
1899		/*
1900		 * If the size is inconsistant in the VMIO case, we can resize
1901		 * the buffer.  This might lead to B_CACHE getting set or
1902		 * cleared.  If the size has not changed, B_CACHE remains
1903		 * unchanged from its previous state.
1904		 */
1905
1906		if (bp->b_bcount != size)
1907			allocbuf(bp, size);
1908
1909		KASSERT(bp->b_offset != NOOFFSET,
1910		    ("getblk: no buffer offset"));
1911
1912		/*
1913		 * A buffer with B_DELWRI set and B_CACHE clear must
1914		 * be committed before we can return the buffer in
1915		 * order to prevent the caller from issuing a read
1916		 * ( due to B_CACHE not being set ) and overwriting
1917		 * it.
1918		 *
1919		 * Most callers, including NFS and FFS, need this to
1920		 * operate properly either because they assume they
1921		 * can issue a read if B_CACHE is not set, or because
1922		 * ( for example ) an uncached B_DELWRI might loop due
1923		 * to softupdates re-dirtying the buffer.  In the latter
1924		 * case, B_CACHE is set after the first write completes,
1925		 * preventing further loops.
1926		 */
1927
1928		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
1929			VOP_BWRITE(bp);
1930			goto loop;
1931		}
1932
1933		if (bp->b_usecount < BUF_MAXUSE)
1934			++bp->b_usecount;
1935		splx(s);
1936		bp->b_flags &= ~B_DONE;
1937	} else {
1938		/*
1939		 * Buffer is not in-core, create new buffer.  The buffer
1940		 * returned by getnewbuf() is marked B_BUSY.  Note that the
1941		 * returned buffer is also considered valid ( not marked
1942		 * B_INVAL ).
1943		 */
1944		int bsize, maxsize, vmio;
1945		off_t offset;
1946
1947		if (vp->v_type == VBLK)
1948			bsize = DEV_BSIZE;
1949		else if (vp->v_mountedhere)
1950			bsize = vp->v_mountedhere->mnt_stat.f_iosize;
1951		else if (vp->v_mount)
1952			bsize = vp->v_mount->mnt_stat.f_iosize;
1953		else
1954			bsize = size;
1955
1956		offset = (off_t)blkno * bsize;
1957		vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
1958		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
1959		maxsize = imax(maxsize, bsize);
1960
1961		if ((bp = getnewbuf(vp, blkno,
1962			slpflag, slptimeo, size, maxsize)) == NULL) {
1963			if (slpflag || slptimeo) {
1964				splx(s);
1965				return NULL;
1966			}
1967			goto loop;
1968		}
1969
1970		/*
1971		 * This code is used to make sure that a buffer is not
1972		 * created while the getnewbuf routine is blocked.
1973		 * This can be a problem whether the vnode is locked or not.
1974		 * If the buffer is created out from under us, we have to
1975		 * throw away the one we just created.  There is now window
1976		 * race because we are safely running at splbio() from the
1977		 * point of the duplicate buffer creation through to here.
1978		 */
1979		if (gbincore(vp, blkno)) {
1980			bp->b_flags |= B_INVAL;
1981			brelse(bp);
1982			goto loop;
1983		}
1984
1985		/*
1986		 * Insert the buffer into the hash, so that it can
1987		 * be found by incore.
1988		 */
1989		bp->b_blkno = bp->b_lblkno = blkno;
1990		bp->b_offset = offset;
1991
1992		bgetvp(vp, bp);
1993		LIST_REMOVE(bp, b_hash);
1994		bh = BUFHASH(vp, blkno);
1995		LIST_INSERT_HEAD(bh, bp, b_hash);
1996
1997		/*
1998		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
1999		 * buffer size starts out as 0, B_CACHE will be set by
2000		 * allocbuf() for the VMIO case prior to it testing the
2001		 * backing store for validity.
2002		 */
2003
2004		if (vmio) {
2005			bp->b_flags |= B_VMIO;
2006#if defined(VFS_BIO_DEBUG)
2007			if (vp->v_type != VREG && vp->v_type != VBLK)
2008				printf("getblk: vmioing file type %d???\n", vp->v_type);
2009#endif
2010		} else {
2011			bp->b_flags &= ~B_VMIO;
2012		}
2013
2014		allocbuf(bp, size);
2015
2016		splx(s);
2017		bp->b_flags &= ~B_DONE;
2018	}
2019	return (bp);
2020}
2021
2022/*
2023 * Get an empty, disassociated buffer of given size.  The buffer is initially
2024 * set to B_INVAL.
2025 */
2026struct buf *
2027geteblk(int size)
2028{
2029	struct buf *bp;
2030	int s;
2031
2032	s = splbio();
2033	while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0);
2034	splx(s);
2035	allocbuf(bp, size);
2036	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2037	return (bp);
2038}
2039
2040
2041/*
2042 * This code constitutes the buffer memory from either anonymous system
2043 * memory (in the case of non-VMIO operations) or from an associated
2044 * VM object (in the case of VMIO operations).  This code is able to
2045 * resize a buffer up or down.
2046 *
2047 * Note that this code is tricky, and has many complications to resolve
2048 * deadlock or inconsistant data situations.  Tread lightly!!!
2049 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2050 * the caller.  Calling this code willy nilly can result in the loss of data.
2051 *
2052 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2053 * B_CACHE for the non-VMIO case.
2054 */
2055
2056int
2057allocbuf(struct buf *bp, int size)
2058{
2059	int newbsize, mbsize;
2060	int i;
2061
2062#if !defined(MAX_PERF)
2063	if (!(bp->b_flags & B_BUSY))
2064		panic("allocbuf: buffer not busy");
2065
2066	if (bp->b_kvasize < size)
2067		panic("allocbuf: buffer too small");
2068#endif
2069
2070	if ((bp->b_flags & B_VMIO) == 0) {
2071		caddr_t origbuf;
2072		int origbufsize;
2073		/*
2074		 * Just get anonymous memory from the kernel.  Don't
2075		 * mess with B_CACHE.
2076		 */
2077		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2078#if !defined(NO_B_MALLOC)
2079		if (bp->b_flags & B_MALLOC)
2080			newbsize = mbsize;
2081		else
2082#endif
2083			newbsize = round_page(size);
2084
2085		if (newbsize < bp->b_bufsize) {
2086#if !defined(NO_B_MALLOC)
2087			/*
2088			 * malloced buffers are not shrunk
2089			 */
2090			if (bp->b_flags & B_MALLOC) {
2091				if (newbsize) {
2092					bp->b_bcount = size;
2093				} else {
2094					free(bp->b_data, M_BIOBUF);
2095					bufspace -= bp->b_bufsize;
2096					bufmallocspace -= bp->b_bufsize;
2097					runningbufspace -= bp->b_bufsize;
2098					if (bp->b_bufsize)
2099						bufspacewakeup();
2100					bp->b_data = bp->b_kvabase;
2101					bp->b_bufsize = 0;
2102					bp->b_bcount = 0;
2103					bp->b_flags &= ~B_MALLOC;
2104				}
2105				return 1;
2106			}
2107#endif
2108			vm_hold_free_pages(
2109			    bp,
2110			    (vm_offset_t) bp->b_data + newbsize,
2111			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2112		} else if (newbsize > bp->b_bufsize) {
2113#if !defined(NO_B_MALLOC)
2114			/*
2115			 * We only use malloced memory on the first allocation.
2116			 * and revert to page-allocated memory when the buffer grows.
2117			 */
2118			if ( (bufmallocspace < maxbufmallocspace) &&
2119				(bp->b_bufsize == 0) &&
2120				(mbsize <= PAGE_SIZE/2)) {
2121
2122				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2123				bp->b_bufsize = mbsize;
2124				bp->b_bcount = size;
2125				bp->b_flags |= B_MALLOC;
2126				bufspace += mbsize;
2127				bufmallocspace += mbsize;
2128				runningbufspace += bp->b_bufsize;
2129				return 1;
2130			}
2131#endif
2132			origbuf = NULL;
2133			origbufsize = 0;
2134#if !defined(NO_B_MALLOC)
2135			/*
2136			 * If the buffer is growing on its other-than-first allocation,
2137			 * then we revert to the page-allocation scheme.
2138			 */
2139			if (bp->b_flags & B_MALLOC) {
2140				origbuf = bp->b_data;
2141				origbufsize = bp->b_bufsize;
2142				bp->b_data = bp->b_kvabase;
2143				bufspace -= bp->b_bufsize;
2144				bufmallocspace -= bp->b_bufsize;
2145				runningbufspace -= bp->b_bufsize;
2146				if (bp->b_bufsize)
2147					bufspacewakeup();
2148				bp->b_bufsize = 0;
2149				bp->b_flags &= ~B_MALLOC;
2150				newbsize = round_page(newbsize);
2151			}
2152#endif
2153			vm_hold_load_pages(
2154			    bp,
2155			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2156			    (vm_offset_t) bp->b_data + newbsize);
2157#if !defined(NO_B_MALLOC)
2158			if (origbuf) {
2159				bcopy(origbuf, bp->b_data, origbufsize);
2160				free(origbuf, M_BIOBUF);
2161			}
2162#endif
2163		}
2164	} else {
2165		vm_page_t m;
2166		int desiredpages;
2167
2168		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2169		desiredpages = (size == 0) ? 0 :
2170			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2171
2172#if !defined(NO_B_MALLOC)
2173		if (bp->b_flags & B_MALLOC)
2174			panic("allocbuf: VMIO buffer can't be malloced");
2175#endif
2176		/*
2177		 * Set B_CACHE initially if buffer is 0 length or will become
2178		 * 0-length.
2179		 */
2180		if (size == 0 || bp->b_bufsize == 0)
2181			bp->b_flags |= B_CACHE;
2182
2183		if (newbsize < bp->b_bufsize) {
2184			/*
2185			 * DEV_BSIZE aligned new buffer size is less then the
2186			 * DEV_BSIZE aligned existing buffer size.  Figure out
2187			 * if we have to remove any pages.
2188			 */
2189			if (desiredpages < bp->b_npages) {
2190				for (i = desiredpages; i < bp->b_npages; i++) {
2191					/*
2192					 * the page is not freed here -- it
2193					 * is the responsibility of
2194					 * vnode_pager_setsize
2195					 */
2196					m = bp->b_pages[i];
2197					KASSERT(m != bogus_page,
2198					    ("allocbuf: bogus page found"));
2199					while (vm_page_sleep_busy(m, TRUE, "biodep"))
2200						;
2201
2202					bp->b_pages[i] = NULL;
2203					vm_page_unwire(m, 0);
2204				}
2205				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2206				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2207				bp->b_npages = desiredpages;
2208			}
2209		} else if (size > bp->b_bcount) {
2210			/*
2211			 * We are growing the buffer, possibly in a
2212			 * byte-granular fashion.
2213			 */
2214			struct vnode *vp;
2215			vm_object_t obj;
2216			vm_offset_t toff;
2217			vm_offset_t tinc;
2218
2219			/*
2220			 * Step 1, bring in the VM pages from the object,
2221			 * allocating them if necessary.  We must clear
2222			 * B_CACHE if these pages are not valid for the
2223			 * range covered by the buffer.
2224			 */
2225
2226			vp = bp->b_vp;
2227			obj = vp->v_object;
2228
2229			while (bp->b_npages < desiredpages) {
2230				vm_page_t m;
2231				vm_pindex_t pi;
2232
2233				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2234				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2235					m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL);
2236					if (m == NULL) {
2237						VM_WAIT;
2238						vm_pageout_deficit += desiredpages - bp->b_npages;
2239					} else {
2240						vm_page_wire(m);
2241						vm_page_wakeup(m);
2242						bp->b_flags &= ~B_CACHE;
2243						bp->b_pages[bp->b_npages] = m;
2244						++bp->b_npages;
2245					}
2246					continue;
2247				}
2248
2249				/*
2250				 * We found a page.  If we have to sleep on it,
2251				 * retry because it might have gotten freed out
2252				 * from under us.
2253				 *
2254				 * We can only test PG_BUSY here.  Blocking on
2255				 * m->busy might lead to a deadlock:
2256				 *
2257				 *  vm_fault->getpages->cluster_read->allocbuf
2258				 *
2259				 */
2260
2261				if (vm_page_sleep_busy(m, FALSE, "pgtblk"))
2262					continue;
2263
2264				/*
2265				 * We have a good page.  Should we wakeup the
2266				 * page daemon?
2267				 */
2268				if ((curproc != pageproc) &&
2269				    ((m->queue - m->pc) == PQ_CACHE) &&
2270				    ((cnt.v_free_count + cnt.v_cache_count) <
2271					(cnt.v_free_min + cnt.v_cache_min))
2272				) {
2273					pagedaemon_wakeup();
2274				}
2275				vm_page_flag_clear(m, PG_ZERO);
2276				vm_page_wire(m);
2277				bp->b_pages[bp->b_npages] = m;
2278				++bp->b_npages;
2279			}
2280
2281			/*
2282			 * Step 2.  We've loaded the pages into the buffer,
2283			 * we have to figure out if we can still have B_CACHE
2284			 * set.  Note that B_CACHE is set according to the
2285			 * byte-granular range ( bcount and size ), new the
2286			 * aligned range ( newbsize ).
2287			 *
2288			 * The VM test is against m->valid, which is DEV_BSIZE
2289			 * aligned.  Needless to say, the validity of the data
2290			 * needs to also be DEV_BSIZE aligned.  Note that this
2291			 * fails with NFS if the server or some other client
2292			 * extends the file's EOF.  If our buffer is resized,
2293			 * B_CACHE may remain set! XXX
2294			 */
2295
2296			toff = bp->b_bcount;
2297			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2298
2299			while ((bp->b_flags & B_CACHE) && toff < size) {
2300				vm_pindex_t pi;
2301
2302				if (tinc > (size - toff))
2303					tinc = size - toff;
2304
2305				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2306				    PAGE_SHIFT;
2307
2308				vfs_buf_test_cache(
2309				    bp,
2310				    bp->b_offset,
2311				    toff,
2312				    tinc,
2313				    bp->b_pages[pi]
2314				);
2315				toff += tinc;
2316				tinc = PAGE_SIZE;
2317			}
2318
2319			/*
2320			 * Step 3, fixup the KVM pmap.  Remember that
2321			 * bp->b_data is relative to bp->b_offset, but
2322			 * bp->b_offset may be offset into the first page.
2323			 */
2324
2325			bp->b_data = (caddr_t)
2326			    trunc_page((vm_offset_t)bp->b_data);
2327			pmap_qenter(
2328			    (vm_offset_t)bp->b_data,
2329			    bp->b_pages,
2330			    bp->b_npages
2331			);
2332			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2333			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
2334		}
2335	}
2336	if (bp->b_flags & B_VMIO)
2337		vmiospace += (newbsize - bp->b_bufsize);
2338	bufspace += (newbsize - bp->b_bufsize);
2339	runningbufspace += (newbsize - bp->b_bufsize);
2340	if (newbsize < bp->b_bufsize)
2341		bufspacewakeup();
2342	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
2343	bp->b_bcount = size;		/* requested buffer size	*/
2344	return 1;
2345}
2346
2347/*
2348 *	biowait:
2349 *
2350 *	Wait for buffer I/O completion, returning error status.  The buffer
2351 *	is left B_BUSY|B_DONE on return.  B_EINTR is converted into a EINTR
2352 *	error and cleared.
2353 */
2354int
2355biowait(register struct buf * bp)
2356{
2357	int s;
2358
2359	s = splbio();
2360	while ((bp->b_flags & B_DONE) == 0)
2361#if defined(NO_SCHEDULE_MODS)
2362		tsleep(bp, PRIBIO, "biowait", 0);
2363#else
2364		if (bp->b_flags & B_READ)
2365			tsleep(bp, PRIBIO, "biord", 0);
2366		else
2367			tsleep(bp, PRIBIO, "biowr", 0);
2368#endif
2369	splx(s);
2370	if (bp->b_flags & B_EINTR) {
2371		bp->b_flags &= ~B_EINTR;
2372		return (EINTR);
2373	}
2374	if (bp->b_flags & B_ERROR) {
2375		return (bp->b_error ? bp->b_error : EIO);
2376	} else {
2377		return (0);
2378	}
2379}
2380
2381/*
2382 *	biodone:
2383 *
2384 *	Finish I/O on a buffer, optionally calling a completion function.
2385 *	This is usually called from an interrupt so process blocking is
2386 *	not allowed.
2387 *
2388 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
2389 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
2390 *	assuming B_INVAL is clear.
2391 *
2392 *	For the VMIO case, we set B_CACHE if the op was a read and no
2393 *	read error occured, or if the op was a write.  B_CACHE is never
2394 *	set if the buffer is invalid or otherwise uncacheable.
2395 *
2396 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
2397 *	initiator to leave B_INVAL set to brelse the buffer out of existance
2398 *	in the biodone routine.
2399 */
2400void
2401biodone(register struct buf * bp)
2402{
2403	int s;
2404
2405	s = splbio();
2406
2407	KASSERT((bp->b_flags & B_BUSY), ("biodone: bp %p not busy", bp));
2408	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
2409
2410	bp->b_flags |= B_DONE;
2411
2412	if (bp->b_flags & B_FREEBUF) {
2413		brelse(bp);
2414		splx(s);
2415		return;
2416	}
2417
2418	if ((bp->b_flags & B_READ) == 0) {
2419		vwakeup(bp);
2420	}
2421
2422	/* call optional completion function if requested */
2423	if (bp->b_flags & B_CALL) {
2424		bp->b_flags &= ~B_CALL;
2425		(*bp->b_iodone) (bp);
2426		splx(s);
2427		return;
2428	}
2429	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
2430		(*bioops.io_complete)(bp);
2431
2432	if (bp->b_flags & B_VMIO) {
2433		int i, resid;
2434		vm_ooffset_t foff;
2435		vm_page_t m;
2436		vm_object_t obj;
2437		int iosize;
2438		struct vnode *vp = bp->b_vp;
2439
2440		obj = vp->v_object;
2441
2442#if defined(VFS_BIO_DEBUG)
2443		if (vp->v_usecount == 0) {
2444			panic("biodone: zero vnode ref count");
2445		}
2446
2447		if (vp->v_object == NULL) {
2448			panic("biodone: missing VM object");
2449		}
2450
2451		if ((vp->v_flag & VOBJBUF) == 0) {
2452			panic("biodone: vnode is not setup for merged cache");
2453		}
2454#endif
2455
2456		foff = bp->b_offset;
2457		KASSERT(bp->b_offset != NOOFFSET,
2458		    ("biodone: no buffer offset"));
2459
2460#if !defined(MAX_PERF)
2461		if (!obj) {
2462			panic("biodone: no object");
2463		}
2464#endif
2465#if defined(VFS_BIO_DEBUG)
2466		if (obj->paging_in_progress < bp->b_npages) {
2467			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2468			    obj->paging_in_progress, bp->b_npages);
2469		}
2470#endif
2471
2472		/*
2473		 * Set B_CACHE if the op was a normal read and no error
2474		 * occured.  B_CACHE is set for writes in the b*write()
2475		 * routines.
2476		 */
2477		iosize = bp->b_bcount;
2478		if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) {
2479			bp->b_flags |= B_CACHE;
2480		}
2481
2482		for (i = 0; i < bp->b_npages; i++) {
2483			int bogusflag = 0;
2484			m = bp->b_pages[i];
2485			if (m == bogus_page) {
2486				bogusflag = 1;
2487				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2488				if (!m) {
2489#if defined(VFS_BIO_DEBUG)
2490					printf("biodone: page disappeared\n");
2491#endif
2492					vm_object_pip_subtract(obj, 1);
2493					bp->b_flags &= ~B_CACHE;
2494					continue;
2495				}
2496				bp->b_pages[i] = m;
2497				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2498			}
2499#if defined(VFS_BIO_DEBUG)
2500			if (OFF_TO_IDX(foff) != m->pindex) {
2501				printf(
2502"biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2503				    (unsigned long)foff, m->pindex);
2504			}
2505#endif
2506			resid = IDX_TO_OFF(m->pindex + 1) - foff;
2507			if (resid > iosize)
2508				resid = iosize;
2509
2510			/*
2511			 * In the write case, the valid and clean bits are
2512			 * already changed correctly ( see bdwrite() ), so we
2513			 * only need to do this here in the read case.
2514			 */
2515			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
2516				vfs_page_set_valid(bp, foff, i, m);
2517			}
2518			vm_page_flag_clear(m, PG_ZERO);
2519
2520			/*
2521			 * when debugging new filesystems or buffer I/O methods, this
2522			 * is the most common error that pops up.  if you see this, you
2523			 * have not set the page busy flag correctly!!!
2524			 */
2525			if (m->busy == 0) {
2526#if !defined(MAX_PERF)
2527				printf("biodone: page busy < 0, "
2528				    "pindex: %d, foff: 0x(%x,%x), "
2529				    "resid: %d, index: %d\n",
2530				    (int) m->pindex, (int)(foff >> 32),
2531						(int) foff & 0xffffffff, resid, i);
2532#endif
2533				if (vp->v_type != VBLK)
2534#if !defined(MAX_PERF)
2535					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
2536					    bp->b_vp->v_mount->mnt_stat.f_iosize,
2537					    (int) bp->b_lblkno,
2538					    bp->b_flags, bp->b_npages);
2539				else
2540					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
2541					    (int) bp->b_lblkno,
2542					    bp->b_flags, bp->b_npages);
2543				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
2544				    m->valid, m->dirty, m->wire_count);
2545#endif
2546				panic("biodone: page busy < 0\n");
2547			}
2548			vm_page_io_finish(m);
2549			vm_object_pip_subtract(obj, 1);
2550			foff += resid;
2551			iosize -= resid;
2552		}
2553		if (obj)
2554			vm_object_pip_wakeupn(obj, 0);
2555	}
2556	/*
2557	 * For asynchronous completions, release the buffer now. The brelse
2558	 * checks for B_WANTED and will do the wakeup there if necessary - so
2559	 * no need to do a wakeup here in the async case.
2560	 */
2561
2562	if (bp->b_flags & B_ASYNC) {
2563		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
2564			brelse(bp);
2565		else
2566			bqrelse(bp);
2567	} else {
2568		bp->b_flags &= ~B_WANTED;
2569		wakeup(bp);
2570	}
2571	splx(s);
2572}
2573
2574#if 0	/* not with kirks code */
2575static int vfs_update_interval = 30;
2576
2577static void
2578vfs_update()
2579{
2580	while (1) {
2581		tsleep(&vfs_update_wakeup, PUSER, "update",
2582		    hz * vfs_update_interval);
2583		vfs_update_wakeup = 0;
2584		sync(curproc, NULL);
2585	}
2586}
2587
2588static int
2589sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
2590{
2591	int error = sysctl_handle_int(oidp,
2592		oidp->oid_arg1, oidp->oid_arg2, req);
2593	if (!error)
2594		wakeup(&vfs_update_wakeup);
2595	return error;
2596}
2597
2598SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
2599	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
2600
2601#endif
2602
2603
2604/*
2605 * This routine is called in lieu of iodone in the case of
2606 * incomplete I/O.  This keeps the busy status for pages
2607 * consistant.
2608 */
2609void
2610vfs_unbusy_pages(struct buf * bp)
2611{
2612	int i;
2613
2614	if (bp->b_flags & B_VMIO) {
2615		struct vnode *vp = bp->b_vp;
2616		vm_object_t obj = vp->v_object;
2617
2618		for (i = 0; i < bp->b_npages; i++) {
2619			vm_page_t m = bp->b_pages[i];
2620
2621			if (m == bogus_page) {
2622				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
2623#if !defined(MAX_PERF)
2624				if (!m) {
2625					panic("vfs_unbusy_pages: page missing\n");
2626				}
2627#endif
2628				bp->b_pages[i] = m;
2629				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2630			}
2631			vm_object_pip_subtract(obj, 1);
2632			vm_page_flag_clear(m, PG_ZERO);
2633			vm_page_io_finish(m);
2634		}
2635		vm_object_pip_wakeupn(obj, 0);
2636	}
2637}
2638
2639/*
2640 * vfs_page_set_valid:
2641 *
2642 *	Set the valid bits in a page based on the supplied offset.   The
2643 *	range is restricted to the buffer's size.
2644 *
2645 *	For NFS, the range is additionally restricted to b_validoff/end.
2646 *	validoff/end must be DEV_BSIZE chunky or the end must be at the
2647 *	file EOF.  If a dirty range exists, set the page's dirty bits
2648 *	inclusively.
2649 *
2650 *	This routine is typically called after a read completes.
2651 */
2652static void
2653vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2654{
2655	vm_ooffset_t soff, eoff;
2656
2657	/*
2658	 * Start and end offsets in buffer.  eoff - soff may not cross a
2659	 * page boundry or cross the end of the buffer.  The end of the
2660	 * buffer, in this case, is our file EOF, not the allocation size
2661	 * of the buffer.
2662	 */
2663	soff = off;
2664	eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
2665	if (eoff > bp->b_offset + bp->b_bcount)
2666		eoff = bp->b_offset + bp->b_bcount;
2667
2668	/*
2669	 * Set valid range.  This is typically the entire buffer and thus the
2670	 * entire page.
2671	 */
2672	if (eoff > soff) {
2673		vm_page_set_validclean(
2674		    m,
2675		   (vm_offset_t) (soff & PAGE_MASK),
2676		   (vm_offset_t) (eoff - soff)
2677		);
2678	}
2679}
2680
2681/*
2682 * This routine is called before a device strategy routine.
2683 * It is used to tell the VM system that paging I/O is in
2684 * progress, and treat the pages associated with the buffer
2685 * almost as being PG_BUSY.  Also the object paging_in_progress
2686 * flag is handled to make sure that the object doesn't become
2687 * inconsistant.
2688 *
2689 * Since I/O has not been initiated yet, certain buffer flags
2690 * such as B_ERROR or B_INVAL may be in an inconsistant state
2691 * and should be ignored.
2692 */
2693void
2694vfs_busy_pages(struct buf * bp, int clear_modify)
2695{
2696	int i, bogus;
2697
2698	if (bp->b_flags & B_VMIO) {
2699		struct vnode *vp = bp->b_vp;
2700		vm_object_t obj = vp->v_object;
2701		vm_ooffset_t foff;
2702
2703		foff = bp->b_offset;
2704		KASSERT(bp->b_offset != NOOFFSET,
2705		    ("vfs_busy_pages: no buffer offset"));
2706		vfs_setdirty(bp);
2707
2708retry:
2709		for (i = 0; i < bp->b_npages; i++) {
2710			vm_page_t m = bp->b_pages[i];
2711			if (vm_page_sleep_busy(m, FALSE, "vbpage"))
2712				goto retry;
2713		}
2714
2715		bogus = 0;
2716		for (i = 0; i < bp->b_npages; i++) {
2717			vm_page_t m = bp->b_pages[i];
2718
2719			vm_page_flag_clear(m, PG_ZERO);
2720			if ((bp->b_flags & B_CLUSTER) == 0) {
2721				vm_object_pip_add(obj, 1);
2722				vm_page_io_start(m);
2723			}
2724
2725			/*
2726			 * When readying a buffer for a read ( i.e
2727			 * clear_modify == 0 ), it is important to do
2728			 * bogus_page replacement for valid pages in
2729			 * partially instantiated buffers.  Partially
2730			 * instantiated buffers can, in turn, occur when
2731			 * reconstituting a buffer from its VM backing store
2732			 * base.  We only have to do this if B_CACHE is
2733			 * clear ( which causes the I/O to occur in the
2734			 * first place ).  The replacement prevents the read
2735			 * I/O from overwriting potentially dirty VM-backed
2736			 * pages.  XXX bogus page replacement is, uh, bogus.
2737			 * It may not work properly with small-block devices.
2738			 * We need to find a better way.
2739			 */
2740
2741			vm_page_protect(m, VM_PROT_NONE);
2742			if (clear_modify)
2743				vfs_page_set_valid(bp, foff, i, m);
2744			else if (m->valid == VM_PAGE_BITS_ALL &&
2745				(bp->b_flags & B_CACHE) == 0) {
2746				bp->b_pages[i] = bogus_page;
2747				bogus++;
2748			}
2749			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2750		}
2751		if (bogus)
2752			pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2753	}
2754}
2755
2756/*
2757 * Tell the VM system that the pages associated with this buffer
2758 * are clean.  This is used for delayed writes where the data is
2759 * going to go to disk eventually without additional VM intevention.
2760 *
2761 * Note that while we only really need to clean through to b_bcount, we
2762 * just go ahead and clean through to b_bufsize.
2763 */
2764static void
2765vfs_clean_pages(struct buf * bp)
2766{
2767	int i;
2768
2769	if (bp->b_flags & B_VMIO) {
2770		vm_ooffset_t foff;
2771
2772		foff = bp->b_offset;
2773		KASSERT(bp->b_offset != NOOFFSET,
2774		    ("vfs_clean_pages: no buffer offset"));
2775		for (i = 0; i < bp->b_npages; i++) {
2776			vm_page_t m = bp->b_pages[i];
2777			vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2778			vm_ooffset_t eoff = noff;
2779
2780			if (eoff > bp->b_offset + bp->b_bufsize)
2781				eoff = bp->b_offset + bp->b_bufsize;
2782			vfs_page_set_valid(bp, foff, i, m);
2783			/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2784			foff = noff;
2785		}
2786	}
2787}
2788
2789/*
2790 *	vfs_bio_set_validclean:
2791 *
2792 *	Set the range within the buffer to valid and clean.  The range is
2793 *	relative to the beginning of the buffer, b_offset.  Note that b_offset
2794 *	itself may be offset from the beginning of the first page.
2795 */
2796
2797void
2798vfs_bio_set_validclean(struct buf *bp, int base, int size)
2799{
2800	if (bp->b_flags & B_VMIO) {
2801		int i;
2802		int n;
2803
2804		/*
2805		 * Fixup base to be relative to beginning of first page.
2806		 * Set initial n to be the maximum number of bytes in the
2807		 * first page that can be validated.
2808		 */
2809
2810		base += (bp->b_offset & PAGE_MASK);
2811		n = PAGE_SIZE - (base & PAGE_MASK);
2812
2813		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
2814			vm_page_t m = bp->b_pages[i];
2815
2816			if (n > size)
2817				n = size;
2818
2819			vm_page_set_validclean(m, base & PAGE_MASK, n);
2820			base += n;
2821			size -= n;
2822			n = PAGE_SIZE;
2823		}
2824	}
2825}
2826
2827/*
2828 *	vfs_bio_clrbuf:
2829 *
2830 *	clear a buffer.  This routine essentially fakes an I/O, so we need
2831 *	to clear B_ERROR and B_INVAL.
2832 *
2833 *	Note that while we only theoretically need to clear through b_bcount,
2834 *	we go ahead and clear through b_bufsize.
2835 */
2836
2837void
2838vfs_bio_clrbuf(struct buf *bp) {
2839	int i, mask = 0;
2840	caddr_t sa, ea;
2841	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
2842		bp->b_flags &= ~(B_INVAL|B_ERROR);
2843		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
2844		    (bp->b_offset & PAGE_MASK) == 0) {
2845			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
2846			if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2847			    ((bp->b_pages[0]->valid & mask) != mask)) {
2848				bzero(bp->b_data, bp->b_bufsize);
2849			}
2850			bp->b_pages[0]->valid |= mask;
2851			bp->b_resid = 0;
2852			return;
2853		}
2854		ea = sa = bp->b_data;
2855		for(i=0;i<bp->b_npages;i++,sa=ea) {
2856			int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE;
2857			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
2858			ea = (caddr_t)ulmin((u_long)ea,
2859				(u_long)bp->b_data + bp->b_bufsize);
2860			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
2861			if ((bp->b_pages[i]->valid & mask) == mask)
2862				continue;
2863			if ((bp->b_pages[i]->valid & mask) == 0) {
2864				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2865					bzero(sa, ea - sa);
2866				}
2867			} else {
2868				for (; sa < ea; sa += DEV_BSIZE, j++) {
2869					if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
2870						(bp->b_pages[i]->valid & (1<<j)) == 0)
2871						bzero(sa, DEV_BSIZE);
2872				}
2873			}
2874			bp->b_pages[i]->valid |= mask;
2875			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
2876		}
2877		bp->b_resid = 0;
2878	} else {
2879		clrbuf(bp);
2880	}
2881}
2882
2883/*
2884 * vm_hold_load_pages and vm_hold_unload pages get pages into
2885 * a buffers address space.  The pages are anonymous and are
2886 * not associated with a file object.
2887 */
2888void
2889vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2890{
2891	vm_offset_t pg;
2892	vm_page_t p;
2893	int index;
2894
2895	to = round_page(to);
2896	from = round_page(from);
2897	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2898
2899	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2900
2901tryagain:
2902
2903		p = vm_page_alloc(kernel_object,
2904			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
2905		    VM_ALLOC_NORMAL);
2906		if (!p) {
2907			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
2908			VM_WAIT;
2909			goto tryagain;
2910		}
2911		vm_page_wire(p);
2912		p->valid = VM_PAGE_BITS_ALL;
2913		vm_page_flag_clear(p, PG_ZERO);
2914		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
2915		bp->b_pages[index] = p;
2916		vm_page_wakeup(p);
2917	}
2918	bp->b_npages = index;
2919}
2920
2921void
2922vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2923{
2924	vm_offset_t pg;
2925	vm_page_t p;
2926	int index, newnpages;
2927
2928	from = round_page(from);
2929	to = round_page(to);
2930	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2931
2932	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2933		p = bp->b_pages[index];
2934		if (p && (index < bp->b_npages)) {
2935#if !defined(MAX_PERF)
2936			if (p->busy) {
2937				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2938					bp->b_blkno, bp->b_lblkno);
2939			}
2940#endif
2941			bp->b_pages[index] = NULL;
2942			pmap_kremove(pg);
2943			vm_page_busy(p);
2944			vm_page_unwire(p, 0);
2945			vm_page_free(p);
2946		}
2947	}
2948	bp->b_npages = newnpages;
2949}
2950
2951
2952#include "opt_ddb.h"
2953#ifdef DDB
2954#include <ddb/ddb.h>
2955
2956DB_SHOW_COMMAND(buffer, db_show_buffer)
2957{
2958	/* get args */
2959	struct buf *bp = (struct buf *)addr;
2960
2961	if (!have_addr) {
2962		db_printf("usage: show buffer <addr>\n");
2963		return;
2964	}
2965
2966	db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc,
2967		  (u_int)bp->b_flags, PRINT_BUF_FLAGS);
2968	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
2969		  "b_resid = %ld\nb_dev = 0x%x, b_data = %p, "
2970		  "b_blkno = %d, b_pblkno = %d\n",
2971		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
2972		  bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno);
2973	if (bp->b_npages) {
2974		int i;
2975		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
2976		for (i = 0; i < bp->b_npages; i++) {
2977			vm_page_t m;
2978			m = bp->b_pages[i];
2979			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
2980			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
2981			if ((i + 1) < bp->b_npages)
2982				db_printf(",");
2983		}
2984		db_printf("\n");
2985	}
2986}
2987#endif /* DDB */
2988