vfs_bio.c revision 9558
1/*
2 * Copyright (c) 1994 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 *    John S. Dyson.
16 * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17 *    is allowed if this notation is included.
18 * 5. Modifications may be freely made to this file if the above conditions
19 *    are met.
20 *
21 * $Id: vfs_bio.c,v 1.48 1995/07/15 16:01:46 davidg Exp $
22 */
23
24/*
25 * this file contains a new buffer I/O scheme implementing a coherent
26 * VM object and buffer cache scheme.  Pains have been taken to make
27 * sure that the performance degradation associated with schemes such
28 * as this is not realized.
29 *
30 * Author:  John S. Dyson
31 * Significant help during the development and debugging phases
32 * had been provided by David Greenman, also of the FreeBSD core team.
33 */
34
35#define VMIO
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#include <sys/proc.h>
40#include <sys/vnode.h>
41#include <vm/vm.h>
42#include <vm/vm_kern.h>
43#include <vm/vm_pageout.h>
44#include <vm/vm_page.h>
45#include <vm/vm_object.h>
46#include <sys/buf.h>
47#include <sys/mount.h>
48#include <sys/malloc.h>
49#include <sys/resourcevar.h>
50#include <sys/proc.h>
51
52#include <miscfs/specfs/specdev.h>
53
54struct buf *buf;		/* buffer header pool */
55int nbuf;			/* number of buffer headers calculated
56				 * elsewhere */
57struct swqueue bswlist;
58
59void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
60void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
61void vfs_clean_pages(struct buf * bp);
62static void vfs_setdirty(struct buf *bp);
63
64int needsbuffer;
65
66/*
67 * Internal update daemon, process 3
68 *	The variable vfs_update_wakeup allows for internal syncs.
69 */
70int vfs_update_wakeup;
71
72
73/*
74 * buffers base kva
75 */
76caddr_t buffers_kva;
77
78/*
79 * bogus page -- for I/O to/from partially complete buffers
80 * this is a temporary solution to the problem, but it is not
81 * really that bad.  it would be better to split the buffer
82 * for input in the case of buffers partially already in memory,
83 * but the code is intricate enough already.
84 */
85vm_page_t bogus_page;
86vm_offset_t bogus_offset;
87
88int bufspace, maxbufspace;
89
90/*
91 * advisory minimum for size of LRU queue or VMIO queue
92 */
93int minbuf;
94
95/*
96 * Initialize buffer headers and related structures.
97 */
98void
99bufinit()
100{
101	struct buf *bp;
102	int i;
103
104	TAILQ_INIT(&bswlist);
105	LIST_INIT(&invalhash);
106
107	/* first, make a null hash table */
108	for (i = 0; i < BUFHSZ; i++)
109		LIST_INIT(&bufhashtbl[i]);
110
111	/* next, make a null set of free lists */
112	for (i = 0; i < BUFFER_QUEUES; i++)
113		TAILQ_INIT(&bufqueues[i]);
114
115	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
116	/* finally, initialize each buffer header and stick on empty q */
117	for (i = 0; i < nbuf; i++) {
118		bp = &buf[i];
119		bzero(bp, sizeof *bp);
120		bp->b_flags = B_INVAL;	/* we're just an empty header */
121		bp->b_dev = NODEV;
122		bp->b_rcred = NOCRED;
123		bp->b_wcred = NOCRED;
124		bp->b_qindex = QUEUE_EMPTY;
125		bp->b_vnbufs.le_next = NOLIST;
126		bp->b_data = buffers_kva + i * MAXBSIZE;
127		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
128		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
129	}
130/*
131 * maxbufspace is currently calculated to support all filesystem blocks
132 * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
133 * cache is still the same as it would be for 8K filesystems.  This
134 * keeps the size of the buffer cache "in check" for big block filesystems.
135 */
136	minbuf = nbuf / 3;
137	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
138
139	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
140	bogus_page = vm_page_alloc(kernel_object,
141			bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL);
142
143}
144
145/*
146 * remove the buffer from the appropriate free list
147 */
148void
149bremfree(struct buf * bp)
150{
151	int s = splbio();
152
153	if (bp->b_qindex != QUEUE_NONE) {
154		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
155		bp->b_qindex = QUEUE_NONE;
156	} else {
157		panic("bremfree: removing a buffer when not on a queue");
158	}
159	splx(s);
160}
161
162/*
163 * Get a buffer with the specified data.  Look in the cache first.
164 */
165int
166bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
167    struct buf ** bpp)
168{
169	struct buf *bp;
170
171	bp = getblk(vp, blkno, size, 0, 0);
172	*bpp = bp;
173
174	/* if not found in cache, do some I/O */
175	if ((bp->b_flags & B_CACHE) == 0) {
176		if (curproc != NULL)
177			curproc->p_stats->p_ru.ru_inblock++;
178		bp->b_flags |= B_READ;
179		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
180		if (bp->b_rcred == NOCRED) {
181			if (cred != NOCRED)
182				crhold(cred);
183			bp->b_rcred = cred;
184		}
185		vfs_busy_pages(bp, 0);
186		VOP_STRATEGY(bp);
187		return (biowait(bp));
188	}
189	return (0);
190}
191
192/*
193 * Operates like bread, but also starts asynchronous I/O on
194 * read-ahead blocks.
195 */
196int
197breadn(struct vnode * vp, daddr_t blkno, int size,
198    daddr_t * rablkno, int *rabsize,
199    int cnt, struct ucred * cred, struct buf ** bpp)
200{
201	struct buf *bp, *rabp;
202	int i;
203	int rv = 0, readwait = 0;
204
205	*bpp = bp = getblk(vp, blkno, size, 0, 0);
206
207	/* if not found in cache, do some I/O */
208	if ((bp->b_flags & B_CACHE) == 0) {
209		if (curproc != NULL)
210			curproc->p_stats->p_ru.ru_inblock++;
211		bp->b_flags |= B_READ;
212		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
213		if (bp->b_rcred == NOCRED) {
214			if (cred != NOCRED)
215				crhold(cred);
216			bp->b_rcred = cred;
217		}
218		vfs_busy_pages(bp, 0);
219		VOP_STRATEGY(bp);
220		++readwait;
221	}
222	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
223		if (inmem(vp, *rablkno))
224			continue;
225		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
226
227		if ((rabp->b_flags & B_CACHE) == 0) {
228			if (curproc != NULL)
229				curproc->p_stats->p_ru.ru_inblock++;
230			rabp->b_flags |= B_READ | B_ASYNC;
231			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
232			if (rabp->b_rcred == NOCRED) {
233				if (cred != NOCRED)
234					crhold(cred);
235				rabp->b_rcred = cred;
236			}
237			vfs_busy_pages(rabp, 0);
238			VOP_STRATEGY(rabp);
239		} else {
240			brelse(rabp);
241		}
242	}
243
244	if (readwait) {
245		rv = biowait(bp);
246	}
247	return (rv);
248}
249
250/*
251 * Write, release buffer on completion.  (Done by iodone
252 * if async.)
253 */
254int
255bwrite(struct buf * bp)
256{
257	int oldflags = bp->b_flags;
258
259	if (bp->b_flags & B_INVAL) {
260		brelse(bp);
261		return (0);
262	}
263	if (!(bp->b_flags & B_BUSY))
264		panic("bwrite: buffer is not busy???");
265
266	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
267	bp->b_flags |= B_WRITEINPROG;
268
269	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
270		reassignbuf(bp, bp->b_vp);
271	}
272
273	bp->b_vp->v_numoutput++;
274	vfs_busy_pages(bp, 1);
275	if (curproc != NULL)
276		curproc->p_stats->p_ru.ru_oublock++;
277	VOP_STRATEGY(bp);
278
279	if ((oldflags & B_ASYNC) == 0) {
280		int rtval = biowait(bp);
281
282		if (oldflags & B_DELWRI) {
283			reassignbuf(bp, bp->b_vp);
284		}
285		brelse(bp);
286		return (rtval);
287	}
288	return (0);
289}
290
291int
292vn_bwrite(ap)
293	struct vop_bwrite_args *ap;
294{
295	return (bwrite(ap->a_bp));
296}
297
298/*
299 * Delayed write. (Buffer is marked dirty).
300 */
301void
302bdwrite(struct buf * bp)
303{
304
305	if ((bp->b_flags & B_BUSY) == 0) {
306		panic("bdwrite: buffer is not busy");
307	}
308	if (bp->b_flags & B_INVAL) {
309		brelse(bp);
310		return;
311	}
312	if (bp->b_flags & B_TAPE) {
313		bawrite(bp);
314		return;
315	}
316	bp->b_flags &= ~(B_READ|B_RELBUF);
317	if ((bp->b_flags & B_DELWRI) == 0) {
318		bp->b_flags |= B_DONE | B_DELWRI;
319		reassignbuf(bp, bp->b_vp);
320	}
321
322	/*
323	 * This bmap keeps the system from needing to do the bmap later,
324	 * perhaps when the system is attempting to do a sync.  Since it
325	 * is likely that the indirect block -- or whatever other datastructure
326	 * that the filesystem needs is still in memory now, it is a good
327	 * thing to do this.  Note also, that if the pageout daemon is
328	 * requesting a sync -- there might not be enough memory to do
329	 * the bmap then...  So, this is important to do.
330	 */
331	if( bp->b_lblkno == bp->b_blkno) {
332		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL);
333	}
334
335	/*
336	 * Set the *dirty* buffer range based upon the VM system dirty pages.
337	 */
338	vfs_setdirty(bp);
339
340	/*
341	 * We need to do this here to satisfy the vnode_pager and the
342	 * pageout daemon, so that it thinks that the pages have been
343	 * "cleaned".  Note that since the pages are in a delayed write
344	 * buffer -- the VFS layer "will" see that the pages get written
345	 * out on the next sync, or perhaps the cluster will be completed.
346	 */
347	vfs_clean_pages(bp);
348	brelse(bp);
349	return;
350}
351
352/*
353 * Asynchronous write.
354 * Start output on a buffer, but do not wait for it to complete.
355 * The buffer is released when the output completes.
356 */
357void
358bawrite(struct buf * bp)
359{
360	bp->b_flags |= B_ASYNC;
361	(void) VOP_BWRITE(bp);
362}
363
364/*
365 * Release a buffer.
366 */
367void
368brelse(struct buf * bp)
369{
370	int s;
371
372	if (bp->b_flags & B_CLUSTER) {
373		relpbuf(bp);
374		return;
375	}
376	/* anyone need a "free" block? */
377	s = splbio();
378
379	if (needsbuffer) {
380		needsbuffer = 0;
381		wakeup((caddr_t) &needsbuffer);
382	}
383
384	/* anyone need this block? */
385	if (bp->b_flags & B_WANTED) {
386		bp->b_flags &= ~B_WANTED | B_AGE;
387		wakeup((caddr_t) bp);
388	} else if (bp->b_flags & B_VMIO) {
389		bp->b_flags &= ~B_WANTED;
390		wakeup((caddr_t) bp);
391	}
392	if (bp->b_flags & B_LOCKED)
393		bp->b_flags &= ~B_ERROR;
394
395	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
396	    (bp->b_bufsize <= 0)) {
397		bp->b_flags |= B_INVAL;
398		bp->b_flags &= ~(B_DELWRI | B_CACHE);
399		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp)
400			brelvp(bp);
401	}
402
403	/*
404	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
405	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
406	 * but the VM object is kept around.  The B_NOCACHE flag is used to
407	 * invalidate the pages in the VM object.
408	 */
409	if (bp->b_flags & B_VMIO) {
410		vm_offset_t foff;
411		vm_object_t obj;
412		int i, resid;
413		vm_page_t m;
414		int iototal = bp->b_bufsize;
415
416		foff = 0;
417		obj = 0;
418		if (bp->b_npages) {
419			if (bp->b_vp && bp->b_vp->v_mount) {
420				foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
421			} else {
422				/*
423				 * vnode pointer has been ripped away --
424				 * probably file gone...
425				 */
426				foff = bp->b_pages[0]->offset;
427			}
428		}
429		for (i = 0; i < bp->b_npages; i++) {
430			m = bp->b_pages[i];
431			if (m == bogus_page) {
432				m = vm_page_lookup(obj, foff);
433				if (!m) {
434					panic("brelse: page missing\n");
435				}
436				bp->b_pages[i] = m;
437				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
438			}
439			resid = (m->offset + PAGE_SIZE) - foff;
440			if (resid > iototal)
441				resid = iototal;
442			if (resid > 0) {
443				/*
444				 * Don't invalidate the page if the local machine has already
445				 * modified it.  This is the lesser of two evils, and should
446				 * be fixed.
447				 */
448				if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
449					vm_page_test_dirty(m);
450					if (m->dirty == 0) {
451						vm_page_set_invalid(m, foff, resid);
452						if (m->valid == 0)
453							vm_page_protect(m, VM_PROT_NONE);
454					}
455				}
456			}
457			foff += resid;
458			iototal -= resid;
459		}
460
461		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
462			for(i=0;i<bp->b_npages;i++) {
463				m = bp->b_pages[i];
464				--m->bmapped;
465				if (m->bmapped == 0) {
466					if (m->flags & PG_WANTED) {
467						wakeup((caddr_t) m);
468						m->flags &= ~PG_WANTED;
469					}
470					vm_page_test_dirty(m);
471					if ((m->dirty & m->valid) == 0 &&
472						(m->flags & PG_REFERENCED) == 0 &&
473							!pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
474						vm_page_cache(m);
475					} else if ((m->flags & PG_ACTIVE) == 0) {
476						vm_page_activate(m);
477						m->act_count = 0;
478					}
479				}
480			}
481			bufspace -= bp->b_bufsize;
482			pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
483			bp->b_npages = 0;
484			bp->b_bufsize = 0;
485			bp->b_flags &= ~B_VMIO;
486			if (bp->b_vp)
487				brelvp(bp);
488		}
489	}
490	if (bp->b_qindex != QUEUE_NONE)
491		panic("brelse: free buffer onto another queue???");
492
493	/* enqueue */
494	/* buffers with no memory */
495	if (bp->b_bufsize == 0) {
496		bp->b_qindex = QUEUE_EMPTY;
497		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
498		LIST_REMOVE(bp, b_hash);
499		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
500		bp->b_dev = NODEV;
501		/* buffers with junk contents */
502	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
503		bp->b_qindex = QUEUE_AGE;
504		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
505		LIST_REMOVE(bp, b_hash);
506		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
507		bp->b_dev = NODEV;
508		/* buffers that are locked */
509	} else if (bp->b_flags & B_LOCKED) {
510		bp->b_qindex = QUEUE_LOCKED;
511		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
512		/* buffers with stale but valid contents */
513	} else if (bp->b_flags & B_AGE) {
514		bp->b_qindex = QUEUE_AGE;
515		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
516		/* buffers with valid and quite potentially reuseable contents */
517	} else {
518		bp->b_qindex = QUEUE_LRU;
519		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
520	}
521
522	/* unlock */
523	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
524	splx(s);
525}
526
527/*
528 * this routine implements clustered async writes for
529 * clearing out B_DELWRI buffers...  This is much better
530 * than the old way of writing only one buffer at a time.
531 */
532void
533vfs_bio_awrite(struct buf * bp)
534{
535	int i;
536	daddr_t lblkno = bp->b_lblkno;
537	struct vnode *vp = bp->b_vp;
538	int s;
539	int ncl;
540	struct buf *bpa;
541
542	s = splbio();
543	if( vp->v_mount && (vp->v_flag & VVMIO) &&
544	    	(bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
545		int size = vp->v_mount->mnt_stat.f_iosize;
546
547		for (i = 1; i < MAXPHYS / size; i++) {
548			if ((bpa = incore(vp, lblkno + i)) &&
549			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) &&
550			    (bpa->b_bufsize == size)) {
551				if ((bpa->b_blkno == bpa->b_lblkno) ||
552				    (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE))
553					break;
554			} else {
555				break;
556			}
557		}
558		ncl = i;
559		/*
560		 * this is a possible cluster write
561		 */
562		if (ncl != 1) {
563			bremfree(bp);
564			cluster_wbuild(vp, bp, size, lblkno, ncl, -1);
565			splx(s);
566			return;
567		}
568	}
569	/*
570	 * default (old) behavior, writing out only one block
571	 */
572	bremfree(bp);
573	bp->b_flags |= B_BUSY | B_ASYNC;
574	(void) VOP_BWRITE(bp);
575	splx(s);
576}
577
578
579/*
580 * Find a buffer header which is available for use.
581 */
582static struct buf *
583getnewbuf(int slpflag, int slptimeo, int doingvmio)
584{
585	struct buf *bp;
586	int s;
587	int firstbp = 1;
588
589	s = splbio();
590start:
591	if (bufspace >= maxbufspace)
592		goto trytofreespace;
593
594	/* can we constitute a new buffer? */
595	if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
596		if (bp->b_qindex != QUEUE_EMPTY)
597			panic("getnewbuf: inconsistent EMPTY queue");
598		bremfree(bp);
599		goto fillbuf;
600	}
601trytofreespace:
602	/*
603	 * We keep the file I/O from hogging metadata I/O
604	 * This is desirable because file data is cached in the
605	 * VM/Buffer cache even if a buffer is freed.
606	 */
607	if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
608		if (bp->b_qindex != QUEUE_AGE)
609			panic("getnewbuf: inconsistent AGE queue");
610	} else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
611		if (bp->b_qindex != QUEUE_LRU)
612			panic("getnewbuf: inconsistent LRU queue");
613	}
614	if (!bp) {
615		/* wait for a free buffer of any kind */
616		needsbuffer = 1;
617		tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo);
618		splx(s);
619		return (0);
620	}
621
622	/* if we are a delayed write, convert to an async write */
623	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
624		vfs_bio_awrite(bp);
625		if (!slpflag && !slptimeo) {
626			splx(s);
627			return (0);
628		}
629		goto start;
630	}
631
632	if (bp->b_flags & B_WANTED) {
633		bp->b_flags &= ~B_WANTED;
634		wakeup((caddr_t) bp);
635	}
636	bremfree(bp);
637
638	if (bp->b_flags & B_VMIO) {
639		bp->b_flags |= B_RELBUF | B_BUSY | B_DONE;
640		brelse(bp);
641		bremfree(bp);
642	}
643
644	if (bp->b_vp)
645		brelvp(bp);
646
647	/* we are not free, nor do we contain interesting data */
648	if (bp->b_rcred != NOCRED)
649		crfree(bp->b_rcred);
650	if (bp->b_wcred != NOCRED)
651		crfree(bp->b_wcred);
652fillbuf:
653	bp->b_flags |= B_BUSY;
654	LIST_REMOVE(bp, b_hash);
655	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
656	splx(s);
657	if (bp->b_bufsize) {
658		allocbuf(bp, 0);
659	}
660	bp->b_flags = B_BUSY;
661	bp->b_dev = NODEV;
662	bp->b_vp = NULL;
663	bp->b_blkno = bp->b_lblkno = 0;
664	bp->b_iodone = 0;
665	bp->b_error = 0;
666	bp->b_resid = 0;
667	bp->b_bcount = 0;
668	bp->b_npages = 0;
669	bp->b_wcred = bp->b_rcred = NOCRED;
670	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
671	bp->b_dirtyoff = bp->b_dirtyend = 0;
672	bp->b_validoff = bp->b_validend = 0;
673	if (bufspace >= maxbufspace) {
674		s = splbio();
675		bp->b_flags |= B_INVAL;
676		brelse(bp);
677		goto trytofreespace;
678	}
679	return (bp);
680}
681
682/*
683 * Check to see if a block is currently memory resident.
684 */
685struct buf *
686incore(struct vnode * vp, daddr_t blkno)
687{
688	struct buf *bp;
689	struct bufhashhdr *bh;
690
691	int s = splbio();
692
693	bh = BUFHASH(vp, blkno);
694	bp = bh->lh_first;
695
696	/* Search hash chain */
697	while (bp) {
698		/* hit */
699		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
700		    (bp->b_flags & B_INVAL) == 0) {
701			splx(s);
702			return (bp);
703		}
704		bp = bp->b_hash.le_next;
705	}
706	splx(s);
707
708	return (0);
709}
710
711/*
712 * Returns true if no I/O is needed to access the
713 * associated VM object.  This is like incore except
714 * it also hunts around in the VM system for the data.
715 */
716
717int
718inmem(struct vnode * vp, daddr_t blkno)
719{
720	vm_object_t obj;
721	vm_offset_t off, toff, tinc;
722	vm_page_t m;
723
724	if (incore(vp, blkno))
725		return 1;
726	if (vp->v_mount == 0)
727		return 0;
728	if ((vp->v_object == 0) || (vp->v_flag & VVMIO) == 0)
729		return 0;
730
731	obj = vp->v_object;
732	tinc = PAGE_SIZE;
733	if (tinc > vp->v_mount->mnt_stat.f_iosize)
734		tinc = vp->v_mount->mnt_stat.f_iosize;
735	off = blkno * vp->v_mount->mnt_stat.f_iosize;
736
737	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
738		int mask;
739
740		m = vm_page_lookup(obj, trunc_page(toff + off));
741		if (!m)
742			return 0;
743		if (vm_page_is_valid(m, toff + off, tinc) == 0)
744			return 0;
745	}
746	return 1;
747}
748
749/*
750 * now we set the dirty range for the buffer --
751 * for NFS -- if the file is mapped and pages have
752 * been written to, let it know.  We want the
753 * entire range of the buffer to be marked dirty if
754 * any of the pages have been written to for consistancy
755 * with the b_validoff, b_validend set in the nfs write
756 * code, and used by the nfs read code.
757 */
758static void
759vfs_setdirty(struct buf *bp) {
760	int i;
761	vm_object_t object;
762	vm_offset_t boffset, offset;
763	/*
764	 * We qualify the scan for modified pages on whether the
765	 * object has been flushed yet.  The OBJ_WRITEABLE flag
766	 * is not cleared simply by protecting pages off.
767	 */
768	if ((bp->b_flags & B_VMIO) &&
769		((object = bp->b_pages[0]->object)->flags & OBJ_WRITEABLE)) {
770		/*
771		 * test the pages to see if they have been modified directly
772		 * by users through the VM system.
773		 */
774		for (i = 0; i < bp->b_npages; i++)
775			vm_page_test_dirty(bp->b_pages[i]);
776
777		/*
778		 * scan forwards for the first page modified
779		 */
780		for (i = 0; i < bp->b_npages; i++) {
781			if (bp->b_pages[i]->dirty) {
782				break;
783			}
784		}
785		boffset = i * PAGE_SIZE;
786		if (boffset < bp->b_dirtyoff) {
787			bp->b_dirtyoff = boffset;
788		}
789
790		/*
791		 * scan backwards for the last page modified
792		 */
793		for (i = bp->b_npages - 1; i >= 0; --i) {
794			if (bp->b_pages[i]->dirty) {
795				break;
796			}
797		}
798		boffset = (i + 1) * PAGE_SIZE;
799		offset = boffset + bp->b_pages[0]->offset;
800		if (offset >= object->size) {
801			boffset = object->size - bp->b_pages[0]->offset;
802		}
803		if (bp->b_dirtyend < boffset) {
804			bp->b_dirtyend = boffset;
805		}
806	}
807}
808
809/*
810 * Get a block given a specified block and offset into a file/device.
811 */
812struct buf *
813getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
814{
815	struct buf *bp;
816	int s;
817	struct bufhashhdr *bh;
818	vm_offset_t off;
819	int nleft;
820
821	s = splbio();
822loop:
823	if (bp = incore(vp, blkno)) {
824		if (bp->b_flags & B_BUSY) {
825			bp->b_flags |= B_WANTED;
826			if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo))
827				goto loop;
828
829			splx(s);
830			return (struct buf *) NULL;
831		}
832		bp->b_flags |= B_BUSY | B_CACHE;
833		bremfree(bp);
834		/*
835		 * check for size inconsistancies
836		 */
837		if (bp->b_bcount != size) {
838			allocbuf(bp, size);
839		}
840		splx(s);
841		return (bp);
842	} else {
843		vm_object_t obj;
844		int doingvmio;
845
846		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
847			doingvmio = 1;
848		} else {
849			doingvmio = 0;
850		}
851		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
852			if (slpflag || slptimeo)
853				return NULL;
854			goto loop;
855		}
856
857		/*
858		 * This code is used to make sure that a buffer is not
859		 * created while the getnewbuf routine is blocked.
860		 * Normally the vnode is locked so this isn't a problem.
861		 * VBLK type I/O requests, however, don't lock the vnode.
862		 * VOP_ISLOCKED would be much better but is also much
863		 * slower.
864		 */
865		if ((vp->v_type == VBLK) && incore(vp, blkno)) {
866			bp->b_flags |= B_INVAL;
867			brelse(bp);
868			goto loop;
869		}
870
871		/*
872		 * Insert the buffer into the hash, so that it can
873		 * be found by incore.
874		 */
875		bp->b_blkno = bp->b_lblkno = blkno;
876		bgetvp(vp, bp);
877		LIST_REMOVE(bp, b_hash);
878		bh = BUFHASH(vp, blkno);
879		LIST_INSERT_HEAD(bh, bp, b_hash);
880
881		if (doingvmio) {
882			bp->b_flags |= (B_VMIO | B_CACHE);
883#if defined(VFS_BIO_DEBUG)
884			if (vp->v_type != VREG)
885				printf("getblk: vmioing file type %d???\n", vp->v_type);
886#endif
887		} else {
888			bp->b_flags &= ~B_VMIO;
889		}
890		splx(s);
891
892		allocbuf(bp, size);
893		return (bp);
894	}
895}
896
897/*
898 * Get an empty, disassociated buffer of given size.
899 */
900struct buf *
901geteblk(int size)
902{
903	struct buf *bp;
904
905	while ((bp = getnewbuf(0, 0, 0)) == 0);
906	allocbuf(bp, size);
907	bp->b_flags |= B_INVAL;
908	return (bp);
909}
910
911/*
912 * This code constitutes the buffer memory from either anonymous system
913 * memory (in the case of non-VMIO operations) or from an associated
914 * VM object (in the case of VMIO operations).
915 *
916 * Note that this code is tricky, and has many complications to resolve
917 * deadlock or inconsistant data situations.  Tread lightly!!!
918 *
919 * Modify the length of a buffer's underlying buffer storage without
920 * destroying information (unless, of course the buffer is shrinking).
921 */
922int
923allocbuf(struct buf * bp, int size)
924{
925
926	int s;
927	int newbsize, mbsize;
928	int i;
929
930	if ((bp->b_flags & B_VMIO) == 0) {
931		/*
932		 * Just get anonymous memory from the kernel
933		 */
934		mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
935		newbsize = round_page(size);
936
937		if (newbsize < bp->b_bufsize) {
938			vm_hold_free_pages(
939			    bp,
940			    (vm_offset_t) bp->b_data + newbsize,
941			    (vm_offset_t) bp->b_data + bp->b_bufsize);
942		} else if (newbsize > bp->b_bufsize) {
943			vm_hold_load_pages(
944			    bp,
945			    (vm_offset_t) bp->b_data + bp->b_bufsize,
946			    (vm_offset_t) bp->b_data + newbsize);
947		}
948	} else {
949		vm_page_t m;
950		int desiredpages;
951
952		newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
953		desiredpages = round_page(newbsize) / PAGE_SIZE;
954
955		if (newbsize < bp->b_bufsize) {
956			if (desiredpages < bp->b_npages) {
957				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
958				    desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages));
959				for (i = desiredpages; i < bp->b_npages; i++) {
960					m = bp->b_pages[i];
961					s = splhigh();
962					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
963						m->flags |= PG_WANTED;
964						tsleep(m, PVM, "biodep", 0);
965					}
966					splx(s);
967
968					if (m->bmapped == 0) {
969						printf("allocbuf: bmapped is zero for page %d\n", i);
970						panic("allocbuf: error");
971					}
972					--m->bmapped;
973					if (m->bmapped == 0) {
974						vm_page_protect(m, VM_PROT_NONE);
975						vm_page_free(m);
976					}
977					bp->b_pages[i] = NULL;
978				}
979				bp->b_npages = desiredpages;
980			}
981		} else if (newbsize > bp->b_bufsize) {
982			vm_object_t obj;
983			vm_offset_t tinc, off, toff, objoff;
984			int pageindex, curbpnpages;
985			struct vnode *vp;
986			int bsize;
987
988			vp = bp->b_vp;
989			bsize = vp->v_mount->mnt_stat.f_iosize;
990
991			if (bp->b_npages < desiredpages) {
992				obj = vp->v_object;
993				tinc = PAGE_SIZE;
994				if (tinc > bsize)
995					tinc = bsize;
996				off = bp->b_lblkno * bsize;
997				curbpnpages = bp->b_npages;
998		doretry:
999				bp->b_flags |= B_CACHE;
1000				for (toff = 0; toff < newbsize; toff += tinc) {
1001					int mask;
1002					int bytesinpage;
1003
1004					pageindex = toff / PAGE_SIZE;
1005					objoff = trunc_page(toff + off);
1006					if (pageindex < curbpnpages) {
1007						int pb;
1008
1009						m = bp->b_pages[pageindex];
1010						if (m->offset != objoff)
1011							panic("allocbuf: page changed offset??!!!?");
1012						bytesinpage = tinc;
1013						if (tinc > (newbsize - toff))
1014							bytesinpage = newbsize - toff;
1015						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1016							bp->b_flags &= ~B_CACHE;
1017						}
1018						if ((m->flags & PG_ACTIVE) == 0) {
1019							vm_page_activate(m);
1020							m->act_count = 0;
1021						}
1022						continue;
1023					}
1024					m = vm_page_lookup(obj, objoff);
1025					if (!m) {
1026						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1027						if (!m) {
1028							int j;
1029
1030							for (j = bp->b_npages; j < pageindex; j++) {
1031								PAGE_WAKEUP(bp->b_pages[j]);
1032							}
1033							VM_WAIT;
1034							curbpnpages = bp->b_npages;
1035							goto doretry;
1036						}
1037						vm_page_activate(m);
1038						m->act_count = 0;
1039						m->valid = 0;
1040					} else if (m->flags & PG_BUSY) {
1041						int j;
1042
1043						for (j = bp->b_npages; j < pageindex; j++) {
1044							PAGE_WAKEUP(bp->b_pages[j]);
1045						}
1046
1047						s = splbio();
1048						m->flags |= PG_WANTED;
1049						tsleep(m, PRIBIO, "pgtblk", 0);
1050						splx(s);
1051
1052						curbpnpages = bp->b_npages;
1053						goto doretry;
1054					} else {
1055						int pb;
1056						if ((curproc != pageproc) &&
1057							(m->flags & PG_CACHE) &&
1058						    (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
1059							pagedaemon_wakeup();
1060						}
1061						bytesinpage = tinc;
1062						if (tinc > (newbsize - toff))
1063							bytesinpage = newbsize - toff;
1064						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1065							bp->b_flags &= ~B_CACHE;
1066						}
1067						if ((m->flags & PG_ACTIVE) == 0) {
1068							vm_page_activate(m);
1069							m->act_count = 0;
1070						}
1071						m->flags |= PG_BUSY;
1072					}
1073					bp->b_pages[pageindex] = m;
1074					curbpnpages = pageindex + 1;
1075				}
1076				if (bsize >= PAGE_SIZE) {
1077					for (i = bp->b_npages; i < curbpnpages; i++) {
1078						m = bp->b_pages[i];
1079						if (m->valid == 0) {
1080							bp->b_flags &= ~B_CACHE;
1081						}
1082						m->bmapped++;
1083						PAGE_WAKEUP(m);
1084					}
1085				} else {
1086					if (!vm_page_is_valid(bp->b_pages[0], off, bsize))
1087						bp->b_flags &= ~B_CACHE;
1088					bp->b_pages[0]->bmapped++;
1089					PAGE_WAKEUP(bp->b_pages[0]);
1090				}
1091				bp->b_npages = curbpnpages;
1092				bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
1093				pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages);
1094				bp->b_data += off % PAGE_SIZE;
1095			}
1096		}
1097	}
1098	bufspace += (newbsize - bp->b_bufsize);
1099	bp->b_bufsize = newbsize;
1100	bp->b_bcount = size;
1101	return 1;
1102}
1103
1104/*
1105 * Wait for buffer I/O completion, returning error status.
1106 */
1107int
1108biowait(register struct buf * bp)
1109{
1110	int s;
1111
1112	s = splbio();
1113	while ((bp->b_flags & B_DONE) == 0)
1114		tsleep((caddr_t) bp, PRIBIO, "biowait", 0);
1115	splx(s);
1116	if (bp->b_flags & B_EINTR) {
1117		bp->b_flags &= ~B_EINTR;
1118		return (EINTR);
1119	}
1120	if (bp->b_flags & B_ERROR) {
1121		return (bp->b_error ? bp->b_error : EIO);
1122	} else {
1123		return (0);
1124	}
1125}
1126
1127/*
1128 * Finish I/O on a buffer, calling an optional function.
1129 * This is usually called from interrupt level, so process blocking
1130 * is not *a good idea*.
1131 */
1132void
1133biodone(register struct buf * bp)
1134{
1135	int s;
1136
1137	s = splbio();
1138	if (bp->b_flags & B_DONE) {
1139		splx(s);
1140		printf("biodone: buffer already done\n");
1141		return;
1142	}
1143	bp->b_flags |= B_DONE;
1144
1145	if ((bp->b_flags & B_READ) == 0) {
1146		struct vnode *vp = bp->b_vp;
1147		vwakeup(bp);
1148	}
1149#ifdef BOUNCE_BUFFERS
1150	if (bp->b_flags & B_BOUNCE)
1151		vm_bounce_free(bp);
1152#endif
1153
1154	/* call optional completion function if requested */
1155	if (bp->b_flags & B_CALL) {
1156		bp->b_flags &= ~B_CALL;
1157		(*bp->b_iodone) (bp);
1158		splx(s);
1159		return;
1160	}
1161	if (bp->b_flags & B_VMIO) {
1162		int i, resid;
1163		vm_offset_t foff;
1164		vm_page_t m;
1165		vm_object_t obj;
1166		int iosize;
1167		struct vnode *vp = bp->b_vp;
1168
1169		foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1170		obj = vp->v_object;
1171		if (!obj) {
1172			return;
1173		}
1174#if defined(VFS_BIO_DEBUG)
1175		if (obj->paging_in_progress < bp->b_npages) {
1176			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1177			    obj->paging_in_progress, bp->b_npages);
1178		}
1179#endif
1180		iosize = bp->b_bufsize;
1181		for (i = 0; i < bp->b_npages; i++) {
1182			int bogusflag = 0;
1183			m = bp->b_pages[i];
1184			if (m == bogus_page) {
1185				bogusflag = 1;
1186				m = vm_page_lookup(obj, foff);
1187				if (!m) {
1188#if defined(VFS_BIO_DEBUG)
1189					printf("biodone: page disappeared\n");
1190#endif
1191					--obj->paging_in_progress;
1192					continue;
1193				}
1194				bp->b_pages[i] = m;
1195				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1196			}
1197#if defined(VFS_BIO_DEBUG)
1198			if (trunc_page(foff) != m->offset) {
1199				printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset);
1200			}
1201#endif
1202			resid = (m->offset + PAGE_SIZE) - foff;
1203			if (resid > iosize)
1204				resid = iosize;
1205			/*
1206			 * In the write case, the valid and clean bits are
1207			 * already changed correctly, so we only need to do this
1208			 * here in the read case.
1209			 */
1210			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1211				vm_page_set_valid(m, foff & (PAGE_SIZE-1), resid);
1212				vm_page_set_clean(m, foff & (PAGE_SIZE-1), resid);
1213			}
1214
1215			/*
1216			 * when debugging new filesystems or buffer I/O methods, this
1217			 * is the most common error that pops up.  if you see this, you
1218			 * have not set the page busy flag correctly!!!
1219			 */
1220			if (m->busy == 0) {
1221				printf("biodone: page busy < 0, "
1222				    "off: %ld, foff: %ld, "
1223				    "resid: %d, index: %d\n",
1224				    m->offset, foff, resid, i);
1225				printf(" iosize: %ld, lblkno: %ld\n",
1226				    bp->b_vp->v_mount->mnt_stat.f_iosize,
1227				    bp->b_lblkno);
1228				printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n",
1229				    m->valid, m->dirty, m->bmapped);
1230				panic("biodone: page busy < 0\n");
1231			}
1232			--m->busy;
1233			if( (m->busy == 0) && (m->flags & PG_WANTED))
1234				wakeup((caddr_t) m);
1235			--obj->paging_in_progress;
1236			foff += resid;
1237			iosize -= resid;
1238		}
1239		if (obj && obj->paging_in_progress == 0 &&
1240		    (obj->flags & OBJ_PIPWNT)) {
1241			obj->flags &= ~OBJ_PIPWNT;
1242			wakeup((caddr_t) obj);
1243		}
1244	}
1245	/*
1246	 * For asynchronous completions, release the buffer now. The brelse
1247	 * checks for B_WANTED and will do the wakeup there if necessary - so
1248	 * no need to do a wakeup here in the async case.
1249	 */
1250
1251	if (bp->b_flags & B_ASYNC) {
1252		brelse(bp);
1253	} else {
1254		bp->b_flags &= ~B_WANTED;
1255		wakeup((caddr_t) bp);
1256	}
1257	splx(s);
1258}
1259
1260int
1261count_lock_queue()
1262{
1263	int count;
1264	struct buf *bp;
1265
1266	count = 0;
1267	for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
1268	    bp != NULL;
1269	    bp = bp->b_freelist.tqe_next)
1270		count++;
1271	return (count);
1272}
1273
1274int vfs_update_interval = 30;
1275
1276void
1277vfs_update()
1278{
1279	(void) spl0();
1280	while (1) {
1281		tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update",
1282		    hz * vfs_update_interval);
1283		vfs_update_wakeup = 0;
1284		sync(curproc, NULL, NULL);
1285	}
1286}
1287
1288/*
1289 * This routine is called in lieu of iodone in the case of
1290 * incomplete I/O.  This keeps the busy status for pages
1291 * consistant.
1292 */
1293void
1294vfs_unbusy_pages(struct buf * bp)
1295{
1296	int i;
1297
1298	if (bp->b_flags & B_VMIO) {
1299		struct vnode *vp = bp->b_vp;
1300		vm_object_t obj = vp->v_object;
1301		vm_offset_t foff;
1302
1303		foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1304
1305		for (i = 0; i < bp->b_npages; i++) {
1306			vm_page_t m = bp->b_pages[i];
1307
1308			if (m == bogus_page) {
1309				m = vm_page_lookup(obj, foff);
1310				if (!m) {
1311					panic("vfs_unbusy_pages: page missing\n");
1312				}
1313				bp->b_pages[i] = m;
1314				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1315			}
1316			--obj->paging_in_progress;
1317			--m->busy;
1318			if( (m->busy == 0) && (m->flags & PG_WANTED))
1319				wakeup((caddr_t) m);
1320		}
1321		if (obj->paging_in_progress == 0 &&
1322		    (obj->flags & OBJ_PIPWNT)) {
1323			obj->flags &= ~OBJ_PIPWNT;
1324			wakeup((caddr_t) obj);
1325		}
1326	}
1327}
1328
1329/*
1330 * This routine is called before a device strategy routine.
1331 * It is used to tell the VM system that paging I/O is in
1332 * progress, and treat the pages associated with the buffer
1333 * almost as being PG_BUSY.  Also the object paging_in_progress
1334 * flag is handled to make sure that the object doesn't become
1335 * inconsistant.
1336 */
1337void
1338vfs_busy_pages(struct buf * bp, int clear_modify)
1339{
1340	int i;
1341
1342	if (bp->b_flags & B_VMIO) {
1343		vm_object_t obj = bp->b_vp->v_object;
1344		vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1345		int iocount = bp->b_bufsize;
1346
1347		vfs_setdirty(bp);
1348		for (i = 0; i < bp->b_npages; i++) {
1349			vm_page_t m = bp->b_pages[i];
1350			int resid = (m->offset + PAGE_SIZE) - foff;
1351
1352			if (resid > iocount)
1353				resid = iocount;
1354			obj->paging_in_progress++;
1355			m->busy++;
1356			if (clear_modify) {
1357				vm_page_protect(m, VM_PROT_READ);
1358				vm_page_set_valid(m,
1359					foff & (PAGE_SIZE-1), resid);
1360				vm_page_set_clean(m,
1361					foff & (PAGE_SIZE-1), resid);
1362			} else if (bp->b_bcount >= PAGE_SIZE) {
1363				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1364					bp->b_pages[i] = bogus_page;
1365					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1366				}
1367			}
1368			foff += resid;
1369			iocount -= resid;
1370		}
1371	}
1372}
1373
1374/*
1375 * Tell the VM system that the pages associated with this buffer
1376 * are clean.  This is used for delayed writes where the data is
1377 * going to go to disk eventually without additional VM intevention.
1378 */
1379void
1380vfs_clean_pages(struct buf * bp)
1381{
1382	int i;
1383
1384	if (bp->b_flags & B_VMIO) {
1385		vm_offset_t foff =
1386			bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1387		int iocount = bp->b_bufsize;
1388
1389		for (i = 0; i < bp->b_npages; i++) {
1390			vm_page_t m = bp->b_pages[i];
1391			int resid = (m->offset + PAGE_SIZE) - foff;
1392
1393			if (resid > iocount)
1394				resid = iocount;
1395			if (resid > 0) {
1396				vm_page_set_valid(m,
1397					foff & (PAGE_SIZE-1), resid);
1398				vm_page_set_clean(m,
1399					foff & (PAGE_SIZE-1), resid);
1400			}
1401			foff += resid;
1402			iocount -= resid;
1403		}
1404	}
1405}
1406
1407void
1408vfs_bio_clrbuf(struct buf *bp) {
1409	int i;
1410	if( bp->b_flags & B_VMIO) {
1411		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1412			int j;
1413			if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) {
1414				for(j=0; j < bp->b_bufsize / DEV_BSIZE;j++) {
1415					bzero(bp->b_data + j * DEV_BSIZE, DEV_BSIZE);
1416				}
1417			}
1418			bp->b_resid = 0;
1419			return;
1420		}
1421		for(i=0;i<bp->b_npages;i++) {
1422			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1423				continue;
1424			if( bp->b_pages[i]->valid == 0) {
1425				bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE);
1426			} else {
1427				int j;
1428				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1429					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1430						bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE);
1431				}
1432			}
1433			bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
1434		}
1435		bp->b_resid = 0;
1436	} else {
1437		clrbuf(bp);
1438	}
1439}
1440
1441/*
1442 * vm_hold_load_pages and vm_hold_unload pages get pages into
1443 * a buffers address space.  The pages are anonymous and are
1444 * not associated with a file object.
1445 */
1446void
1447vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1448{
1449	vm_offset_t pg;
1450	vm_page_t p;
1451	vm_offset_t from = round_page(froma);
1452	vm_offset_t to = round_page(toa);
1453
1454	for (pg = from; pg < to; pg += PAGE_SIZE) {
1455
1456tryagain:
1457
1458		p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS,
1459		    VM_ALLOC_NORMAL);
1460		if (!p) {
1461			VM_WAIT;
1462			goto tryagain;
1463		}
1464		vm_page_wire(p);
1465		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1466		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p;
1467		PAGE_WAKEUP(p);
1468		bp->b_npages++;
1469	}
1470}
1471
1472void
1473vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1474{
1475	vm_offset_t pg;
1476	vm_page_t p;
1477	vm_offset_t from = round_page(froma);
1478	vm_offset_t to = round_page(toa);
1479
1480	for (pg = from; pg < to; pg += PAGE_SIZE) {
1481		p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE];
1482		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0;
1483		pmap_kremove(pg);
1484		vm_page_free(p);
1485		--bp->b_npages;
1486	}
1487}
1488