vfs_bio.c revision 9668
1/*
2 * Copyright (c) 1994 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 *    John S. Dyson.
16 * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17 *    is allowed if this notation is included.
18 * 5. Modifications may be freely made to this file if the above conditions
19 *    are met.
20 *
21 * $Id: vfs_bio.c,v 1.50 1995/07/21 04:55:45 davidg Exp $
22 */
23
24/*
25 * this file contains a new buffer I/O scheme implementing a coherent
26 * VM object and buffer cache scheme.  Pains have been taken to make
27 * sure that the performance degradation associated with schemes such
28 * as this is not realized.
29 *
30 * Author:  John S. Dyson
31 * Significant help during the development and debugging phases
32 * had been provided by David Greenman, also of the FreeBSD core team.
33 */
34
35#define VMIO
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#include <sys/proc.h>
40#include <sys/vnode.h>
41#include <vm/vm.h>
42#include <vm/vm_kern.h>
43#include <vm/vm_pageout.h>
44#include <vm/vm_page.h>
45#include <vm/vm_object.h>
46#include <sys/buf.h>
47#include <sys/mount.h>
48#include <sys/malloc.h>
49#include <sys/resourcevar.h>
50#include <sys/proc.h>
51
52#include <miscfs/specfs/specdev.h>
53
54struct buf *buf;		/* buffer header pool */
55int nbuf;			/* number of buffer headers calculated
56				 * elsewhere */
57struct swqueue bswlist;
58
59void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
60void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
61void vfs_clean_pages(struct buf * bp);
62static void vfs_setdirty(struct buf *bp);
63
64int needsbuffer;
65
66/*
67 * Internal update daemon, process 3
68 *	The variable vfs_update_wakeup allows for internal syncs.
69 */
70int vfs_update_wakeup;
71
72
73/*
74 * buffers base kva
75 */
76caddr_t buffers_kva;
77
78/*
79 * bogus page -- for I/O to/from partially complete buffers
80 * this is a temporary solution to the problem, but it is not
81 * really that bad.  it would be better to split the buffer
82 * for input in the case of buffers partially already in memory,
83 * but the code is intricate enough already.
84 */
85vm_page_t bogus_page;
86vm_offset_t bogus_offset;
87
88int bufspace, maxbufspace;
89
90/*
91 * advisory minimum for size of LRU queue or VMIO queue
92 */
93int minbuf;
94
95/*
96 * Initialize buffer headers and related structures.
97 */
98void
99bufinit()
100{
101	struct buf *bp;
102	int i;
103
104	TAILQ_INIT(&bswlist);
105	LIST_INIT(&invalhash);
106
107	/* first, make a null hash table */
108	for (i = 0; i < BUFHSZ; i++)
109		LIST_INIT(&bufhashtbl[i]);
110
111	/* next, make a null set of free lists */
112	for (i = 0; i < BUFFER_QUEUES; i++)
113		TAILQ_INIT(&bufqueues[i]);
114
115	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
116	/* finally, initialize each buffer header and stick on empty q */
117	for (i = 0; i < nbuf; i++) {
118		bp = &buf[i];
119		bzero(bp, sizeof *bp);
120		bp->b_flags = B_INVAL;	/* we're just an empty header */
121		bp->b_dev = NODEV;
122		bp->b_rcred = NOCRED;
123		bp->b_wcred = NOCRED;
124		bp->b_qindex = QUEUE_EMPTY;
125		bp->b_vnbufs.le_next = NOLIST;
126		bp->b_data = buffers_kva + i * MAXBSIZE;
127		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
128		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
129	}
130/*
131 * maxbufspace is currently calculated to support all filesystem blocks
132 * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
133 * cache is still the same as it would be for 8K filesystems.  This
134 * keeps the size of the buffer cache "in check" for big block filesystems.
135 */
136	minbuf = nbuf / 3;
137	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
138
139	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
140	bogus_page = vm_page_alloc(kernel_object,
141			bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL);
142
143}
144
145/*
146 * remove the buffer from the appropriate free list
147 */
148void
149bremfree(struct buf * bp)
150{
151	int s = splbio();
152
153	if (bp->b_qindex != QUEUE_NONE) {
154		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
155		bp->b_qindex = QUEUE_NONE;
156	} else {
157		panic("bremfree: removing a buffer when not on a queue");
158	}
159	splx(s);
160}
161
162/*
163 * Get a buffer with the specified data.  Look in the cache first.
164 */
165int
166bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
167    struct buf ** bpp)
168{
169	struct buf *bp;
170
171	bp = getblk(vp, blkno, size, 0, 0);
172	*bpp = bp;
173
174	/* if not found in cache, do some I/O */
175	if ((bp->b_flags & B_CACHE) == 0) {
176		if (curproc != NULL)
177			curproc->p_stats->p_ru.ru_inblock++;
178		bp->b_flags |= B_READ;
179		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
180		if (bp->b_rcred == NOCRED) {
181			if (cred != NOCRED)
182				crhold(cred);
183			bp->b_rcred = cred;
184		}
185		vfs_busy_pages(bp, 0);
186		VOP_STRATEGY(bp);
187		return (biowait(bp));
188	}
189	return (0);
190}
191
192/*
193 * Operates like bread, but also starts asynchronous I/O on
194 * read-ahead blocks.
195 */
196int
197breadn(struct vnode * vp, daddr_t blkno, int size,
198    daddr_t * rablkno, int *rabsize,
199    int cnt, struct ucred * cred, struct buf ** bpp)
200{
201	struct buf *bp, *rabp;
202	int i;
203	int rv = 0, readwait = 0;
204
205	*bpp = bp = getblk(vp, blkno, size, 0, 0);
206
207	/* if not found in cache, do some I/O */
208	if ((bp->b_flags & B_CACHE) == 0) {
209		if (curproc != NULL)
210			curproc->p_stats->p_ru.ru_inblock++;
211		bp->b_flags |= B_READ;
212		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
213		if (bp->b_rcred == NOCRED) {
214			if (cred != NOCRED)
215				crhold(cred);
216			bp->b_rcred = cred;
217		}
218		vfs_busy_pages(bp, 0);
219		VOP_STRATEGY(bp);
220		++readwait;
221	}
222	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
223		if (inmem(vp, *rablkno))
224			continue;
225		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
226
227		if ((rabp->b_flags & B_CACHE) == 0) {
228			if (curproc != NULL)
229				curproc->p_stats->p_ru.ru_inblock++;
230			rabp->b_flags |= B_READ | B_ASYNC;
231			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
232			if (rabp->b_rcred == NOCRED) {
233				if (cred != NOCRED)
234					crhold(cred);
235				rabp->b_rcred = cred;
236			}
237			vfs_busy_pages(rabp, 0);
238			VOP_STRATEGY(rabp);
239		} else {
240			brelse(rabp);
241		}
242	}
243
244	if (readwait) {
245		rv = biowait(bp);
246	}
247	return (rv);
248}
249
250/*
251 * Write, release buffer on completion.  (Done by iodone
252 * if async.)
253 */
254int
255bwrite(struct buf * bp)
256{
257	int oldflags = bp->b_flags;
258
259	if (bp->b_flags & B_INVAL) {
260		brelse(bp);
261		return (0);
262	}
263	if (!(bp->b_flags & B_BUSY))
264		panic("bwrite: buffer is not busy???");
265
266	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
267	bp->b_flags |= B_WRITEINPROG;
268
269	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
270		reassignbuf(bp, bp->b_vp);
271	}
272
273	bp->b_vp->v_numoutput++;
274	vfs_busy_pages(bp, 1);
275	if (curproc != NULL)
276		curproc->p_stats->p_ru.ru_oublock++;
277	VOP_STRATEGY(bp);
278
279	if ((oldflags & B_ASYNC) == 0) {
280		int rtval = biowait(bp);
281
282		if (oldflags & B_DELWRI) {
283			reassignbuf(bp, bp->b_vp);
284		}
285		brelse(bp);
286		return (rtval);
287	}
288	return (0);
289}
290
291int
292vn_bwrite(ap)
293	struct vop_bwrite_args *ap;
294{
295	return (bwrite(ap->a_bp));
296}
297
298/*
299 * Delayed write. (Buffer is marked dirty).
300 */
301void
302bdwrite(struct buf * bp)
303{
304
305	if ((bp->b_flags & B_BUSY) == 0) {
306		panic("bdwrite: buffer is not busy");
307	}
308	if (bp->b_flags & B_INVAL) {
309		brelse(bp);
310		return;
311	}
312	if (bp->b_flags & B_TAPE) {
313		bawrite(bp);
314		return;
315	}
316	bp->b_flags &= ~(B_READ|B_RELBUF);
317	if ((bp->b_flags & B_DELWRI) == 0) {
318		bp->b_flags |= B_DONE | B_DELWRI;
319		reassignbuf(bp, bp->b_vp);
320	}
321
322	/*
323	 * This bmap keeps the system from needing to do the bmap later,
324	 * perhaps when the system is attempting to do a sync.  Since it
325	 * is likely that the indirect block -- or whatever other datastructure
326	 * that the filesystem needs is still in memory now, it is a good
327	 * thing to do this.  Note also, that if the pageout daemon is
328	 * requesting a sync -- there might not be enough memory to do
329	 * the bmap then...  So, this is important to do.
330	 */
331	if( bp->b_lblkno == bp->b_blkno) {
332		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL);
333	}
334
335	/*
336	 * Set the *dirty* buffer range based upon the VM system dirty pages.
337	 */
338	vfs_setdirty(bp);
339
340	/*
341	 * We need to do this here to satisfy the vnode_pager and the
342	 * pageout daemon, so that it thinks that the pages have been
343	 * "cleaned".  Note that since the pages are in a delayed write
344	 * buffer -- the VFS layer "will" see that the pages get written
345	 * out on the next sync, or perhaps the cluster will be completed.
346	 */
347	vfs_clean_pages(bp);
348	brelse(bp);
349	return;
350}
351
352/*
353 * Asynchronous write.
354 * Start output on a buffer, but do not wait for it to complete.
355 * The buffer is released when the output completes.
356 */
357void
358bawrite(struct buf * bp)
359{
360	bp->b_flags |= B_ASYNC;
361	(void) VOP_BWRITE(bp);
362}
363
364/*
365 * Release a buffer.
366 */
367void
368brelse(struct buf * bp)
369{
370	int s;
371
372	if (bp->b_flags & B_CLUSTER) {
373		relpbuf(bp);
374		return;
375	}
376	/* anyone need a "free" block? */
377	s = splbio();
378
379	if (needsbuffer) {
380		needsbuffer = 0;
381		wakeup((caddr_t) &needsbuffer);
382	}
383
384	/* anyone need this block? */
385	if (bp->b_flags & B_WANTED) {
386		bp->b_flags &= ~(B_WANTED | B_AGE);
387		wakeup((caddr_t) bp);
388	} else if (bp->b_flags & B_VMIO) {
389		bp->b_flags &= ~B_WANTED;
390		wakeup((caddr_t) bp);
391	}
392	if (bp->b_flags & B_LOCKED)
393		bp->b_flags &= ~B_ERROR;
394
395	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
396	    (bp->b_bufsize <= 0)) {
397		bp->b_flags |= B_INVAL;
398		bp->b_flags &= ~(B_DELWRI | B_CACHE);
399		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp)
400			brelvp(bp);
401	}
402
403	/*
404	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
405	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
406	 * but the VM object is kept around.  The B_NOCACHE flag is used to
407	 * invalidate the pages in the VM object.
408	 */
409	if (bp->b_flags & B_VMIO) {
410		vm_offset_t foff;
411		vm_object_t obj;
412		int i, resid;
413		vm_page_t m;
414		int iototal = bp->b_bufsize;
415
416		foff = 0;
417		obj = 0;
418		if (bp->b_npages) {
419			if (bp->b_vp && bp->b_vp->v_mount) {
420				foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
421			} else {
422				/*
423				 * vnode pointer has been ripped away --
424				 * probably file gone...
425				 */
426				foff = bp->b_pages[0]->offset;
427			}
428		}
429		for (i = 0; i < bp->b_npages; i++) {
430			m = bp->b_pages[i];
431			if (m == bogus_page) {
432				m = vm_page_lookup(obj, foff);
433				if (!m) {
434					panic("brelse: page missing\n");
435				}
436				bp->b_pages[i] = m;
437				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
438			}
439			resid = (m->offset + PAGE_SIZE) - foff;
440			if (resid > iototal)
441				resid = iototal;
442			if (resid > 0) {
443				/*
444				 * Don't invalidate the page if the local machine has already
445				 * modified it.  This is the lesser of two evils, and should
446				 * be fixed.
447				 */
448				if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
449					vm_page_test_dirty(m);
450					if (m->dirty == 0) {
451						vm_page_set_invalid(m, foff, resid);
452						if (m->valid == 0)
453							vm_page_protect(m, VM_PROT_NONE);
454					}
455				}
456			}
457			foff += resid;
458			iototal -= resid;
459		}
460
461		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
462			for(i=0;i<bp->b_npages;i++) {
463				m = bp->b_pages[i];
464				--m->bmapped;
465				if (m->bmapped == 0) {
466					if (m->flags & PG_WANTED) {
467						wakeup((caddr_t) m);
468						m->flags &= ~PG_WANTED;
469					}
470					vm_page_test_dirty(m);
471					if ((m->dirty & m->valid) == 0 &&
472						(m->flags & PG_REFERENCED) == 0 &&
473							!pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
474						vm_page_cache(m);
475					} else if ((m->flags & PG_ACTIVE) == 0) {
476						vm_page_activate(m);
477						m->act_count = 0;
478					}
479				}
480			}
481			bufspace -= bp->b_bufsize;
482			pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
483			bp->b_npages = 0;
484			bp->b_bufsize = 0;
485			bp->b_flags &= ~B_VMIO;
486			if (bp->b_vp)
487				brelvp(bp);
488		}
489	}
490	if (bp->b_qindex != QUEUE_NONE)
491		panic("brelse: free buffer onto another queue???");
492
493	/* enqueue */
494	/* buffers with no memory */
495	if (bp->b_bufsize == 0) {
496		bp->b_qindex = QUEUE_EMPTY;
497		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
498		LIST_REMOVE(bp, b_hash);
499		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
500		bp->b_dev = NODEV;
501		/* buffers with junk contents */
502	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
503		bp->b_qindex = QUEUE_AGE;
504		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
505		LIST_REMOVE(bp, b_hash);
506		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
507		bp->b_dev = NODEV;
508		/* buffers that are locked */
509	} else if (bp->b_flags & B_LOCKED) {
510		bp->b_qindex = QUEUE_LOCKED;
511		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
512		/* buffers with stale but valid contents */
513	} else if (bp->b_flags & B_AGE) {
514		bp->b_qindex = QUEUE_AGE;
515		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
516		/* buffers with valid and quite potentially reuseable contents */
517	} else {
518		bp->b_qindex = QUEUE_LRU;
519		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
520	}
521
522	/* unlock */
523	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
524	splx(s);
525}
526
527/*
528 * this routine implements clustered async writes for
529 * clearing out B_DELWRI buffers...  This is much better
530 * than the old way of writing only one buffer at a time.
531 */
532void
533vfs_bio_awrite(struct buf * bp)
534{
535	int i;
536	daddr_t lblkno = bp->b_lblkno;
537	struct vnode *vp = bp->b_vp;
538	int s;
539	int ncl;
540	struct buf *bpa;
541
542	s = splbio();
543	if( vp->v_mount && (vp->v_flag & VVMIO) &&
544	    	(bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
545		int size = vp->v_mount->mnt_stat.f_iosize;
546
547		for (i = 1; i < MAXPHYS / size; i++) {
548			if ((bpa = incore(vp, lblkno + i)) &&
549			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
550			    (B_DELWRI | B_CLUSTEROK)) &&
551			    (bpa->b_bufsize == size)) {
552				if ((bpa->b_blkno == bpa->b_lblkno) ||
553				    (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE))
554					break;
555			} else {
556				break;
557			}
558		}
559		ncl = i;
560		/*
561		 * this is a possible cluster write
562		 */
563		if (ncl != 1) {
564			bremfree(bp);
565			cluster_wbuild(vp, bp, size, lblkno, ncl, -1);
566			splx(s);
567			return;
568		}
569	}
570	/*
571	 * default (old) behavior, writing out only one block
572	 */
573	bremfree(bp);
574	bp->b_flags |= B_BUSY | B_ASYNC;
575	(void) VOP_BWRITE(bp);
576	splx(s);
577}
578
579
580/*
581 * Find a buffer header which is available for use.
582 */
583static struct buf *
584getnewbuf(int slpflag, int slptimeo, int doingvmio)
585{
586	struct buf *bp;
587	int s;
588	int firstbp = 1;
589
590	s = splbio();
591start:
592	if (bufspace >= maxbufspace)
593		goto trytofreespace;
594
595	/* can we constitute a new buffer? */
596	if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
597		if (bp->b_qindex != QUEUE_EMPTY)
598			panic("getnewbuf: inconsistent EMPTY queue");
599		bremfree(bp);
600		goto fillbuf;
601	}
602trytofreespace:
603	/*
604	 * We keep the file I/O from hogging metadata I/O
605	 * This is desirable because file data is cached in the
606	 * VM/Buffer cache even if a buffer is freed.
607	 */
608	if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
609		if (bp->b_qindex != QUEUE_AGE)
610			panic("getnewbuf: inconsistent AGE queue");
611	} else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
612		if (bp->b_qindex != QUEUE_LRU)
613			panic("getnewbuf: inconsistent LRU queue");
614	}
615	if (!bp) {
616		/* wait for a free buffer of any kind */
617		needsbuffer = 1;
618		tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo);
619		splx(s);
620		return (0);
621	}
622
623	/* if we are a delayed write, convert to an async write */
624	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
625		vfs_bio_awrite(bp);
626		if (!slpflag && !slptimeo) {
627			splx(s);
628			return (0);
629		}
630		goto start;
631	}
632
633	if (bp->b_flags & B_WANTED) {
634		bp->b_flags &= ~B_WANTED;
635		wakeup((caddr_t) bp);
636	}
637	bremfree(bp);
638
639	if (bp->b_flags & B_VMIO) {
640		bp->b_flags |= B_RELBUF | B_BUSY | B_DONE;
641		brelse(bp);
642		bremfree(bp);
643	}
644
645	if (bp->b_vp)
646		brelvp(bp);
647
648	/* we are not free, nor do we contain interesting data */
649	if (bp->b_rcred != NOCRED)
650		crfree(bp->b_rcred);
651	if (bp->b_wcred != NOCRED)
652		crfree(bp->b_wcred);
653fillbuf:
654	bp->b_flags |= B_BUSY;
655	LIST_REMOVE(bp, b_hash);
656	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
657	splx(s);
658	if (bp->b_bufsize) {
659		allocbuf(bp, 0);
660	}
661	bp->b_flags = B_BUSY;
662	bp->b_dev = NODEV;
663	bp->b_vp = NULL;
664	bp->b_blkno = bp->b_lblkno = 0;
665	bp->b_iodone = 0;
666	bp->b_error = 0;
667	bp->b_resid = 0;
668	bp->b_bcount = 0;
669	bp->b_npages = 0;
670	bp->b_wcred = bp->b_rcred = NOCRED;
671	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
672	bp->b_dirtyoff = bp->b_dirtyend = 0;
673	bp->b_validoff = bp->b_validend = 0;
674	if (bufspace >= maxbufspace) {
675		s = splbio();
676		bp->b_flags |= B_INVAL;
677		brelse(bp);
678		goto trytofreespace;
679	}
680	return (bp);
681}
682
683/*
684 * Check to see if a block is currently memory resident.
685 */
686struct buf *
687incore(struct vnode * vp, daddr_t blkno)
688{
689	struct buf *bp;
690	struct bufhashhdr *bh;
691
692	int s = splbio();
693
694	bh = BUFHASH(vp, blkno);
695	bp = bh->lh_first;
696
697	/* Search hash chain */
698	while (bp) {
699		/* hit */
700		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
701		    (bp->b_flags & B_INVAL) == 0) {
702			splx(s);
703			return (bp);
704		}
705		bp = bp->b_hash.le_next;
706	}
707	splx(s);
708
709	return (0);
710}
711
712/*
713 * Returns true if no I/O is needed to access the
714 * associated VM object.  This is like incore except
715 * it also hunts around in the VM system for the data.
716 */
717
718int
719inmem(struct vnode * vp, daddr_t blkno)
720{
721	vm_object_t obj;
722	vm_offset_t off, toff, tinc;
723	vm_page_t m;
724
725	if (incore(vp, blkno))
726		return 1;
727	if (vp->v_mount == 0)
728		return 0;
729	if ((vp->v_object == 0) || (vp->v_flag & VVMIO) == 0)
730		return 0;
731
732	obj = vp->v_object;
733	tinc = PAGE_SIZE;
734	if (tinc > vp->v_mount->mnt_stat.f_iosize)
735		tinc = vp->v_mount->mnt_stat.f_iosize;
736	off = blkno * vp->v_mount->mnt_stat.f_iosize;
737
738	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
739		int mask;
740
741		m = vm_page_lookup(obj, trunc_page(toff + off));
742		if (!m)
743			return 0;
744		if (vm_page_is_valid(m, toff + off, tinc) == 0)
745			return 0;
746	}
747	return 1;
748}
749
750/*
751 * now we set the dirty range for the buffer --
752 * for NFS -- if the file is mapped and pages have
753 * been written to, let it know.  We want the
754 * entire range of the buffer to be marked dirty if
755 * any of the pages have been written to for consistancy
756 * with the b_validoff, b_validend set in the nfs write
757 * code, and used by the nfs read code.
758 */
759static void
760vfs_setdirty(struct buf *bp) {
761	int i;
762	vm_object_t object;
763	vm_offset_t boffset, offset;
764	/*
765	 * We qualify the scan for modified pages on whether the
766	 * object has been flushed yet.  The OBJ_WRITEABLE flag
767	 * is not cleared simply by protecting pages off.
768	 */
769	if ((bp->b_flags & B_VMIO) &&
770		((object = bp->b_pages[0]->object)->flags & OBJ_WRITEABLE)) {
771		/*
772		 * test the pages to see if they have been modified directly
773		 * by users through the VM system.
774		 */
775		for (i = 0; i < bp->b_npages; i++)
776			vm_page_test_dirty(bp->b_pages[i]);
777
778		/*
779		 * scan forwards for the first page modified
780		 */
781		for (i = 0; i < bp->b_npages; i++) {
782			if (bp->b_pages[i]->dirty) {
783				break;
784			}
785		}
786		boffset = i * PAGE_SIZE;
787		if (boffset < bp->b_dirtyoff) {
788			bp->b_dirtyoff = boffset;
789		}
790
791		/*
792		 * scan backwards for the last page modified
793		 */
794		for (i = bp->b_npages - 1; i >= 0; --i) {
795			if (bp->b_pages[i]->dirty) {
796				break;
797			}
798		}
799		boffset = (i + 1) * PAGE_SIZE;
800		offset = boffset + bp->b_pages[0]->offset;
801		if (offset >= object->size) {
802			boffset = object->size - bp->b_pages[0]->offset;
803		}
804		if (bp->b_dirtyend < boffset) {
805			bp->b_dirtyend = boffset;
806		}
807	}
808}
809
810/*
811 * Get a block given a specified block and offset into a file/device.
812 */
813struct buf *
814getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
815{
816	struct buf *bp;
817	int s;
818	struct bufhashhdr *bh;
819	vm_offset_t off;
820	int nleft;
821
822	s = splbio();
823loop:
824	if (bp = incore(vp, blkno)) {
825		if (bp->b_flags & B_BUSY) {
826			bp->b_flags |= B_WANTED;
827			if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo))
828				goto loop;
829
830			splx(s);
831			return (struct buf *) NULL;
832		}
833		bp->b_flags |= B_BUSY | B_CACHE;
834		bremfree(bp);
835		/*
836		 * check for size inconsistancies
837		 */
838		if (bp->b_bcount != size) {
839			if (bp->b_flags & B_VMIO) {
840				allocbuf(bp, size);
841			} else {
842				bp->b_flags |= B_NOCACHE;
843				VOP_BWRITE(bp);
844				goto loop;
845			}
846		}
847		splx(s);
848		return (bp);
849	} else {
850		vm_object_t obj;
851		int doingvmio;
852
853		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
854			doingvmio = 1;
855		} else {
856			doingvmio = 0;
857		}
858		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
859			if (slpflag || slptimeo)
860				return NULL;
861			goto loop;
862		}
863
864		/*
865		 * This code is used to make sure that a buffer is not
866		 * created while the getnewbuf routine is blocked.
867		 * Normally the vnode is locked so this isn't a problem.
868		 * VBLK type I/O requests, however, don't lock the vnode.
869		 */
870		if (!VOP_ISLOCKED(vp) && incore(vp, blkno)) {
871			bp->b_flags |= B_INVAL;
872			brelse(bp);
873			goto loop;
874		}
875
876		/*
877		 * Insert the buffer into the hash, so that it can
878		 * be found by incore.
879		 */
880		bp->b_blkno = bp->b_lblkno = blkno;
881		bgetvp(vp, bp);
882		LIST_REMOVE(bp, b_hash);
883		bh = BUFHASH(vp, blkno);
884		LIST_INSERT_HEAD(bh, bp, b_hash);
885
886		if (doingvmio) {
887			bp->b_flags |= (B_VMIO | B_CACHE);
888#if defined(VFS_BIO_DEBUG)
889			if (vp->v_type != VREG)
890				printf("getblk: vmioing file type %d???\n", vp->v_type);
891#endif
892		} else {
893			bp->b_flags &= ~B_VMIO;
894		}
895		splx(s);
896
897		allocbuf(bp, size);
898		return (bp);
899	}
900}
901
902/*
903 * Get an empty, disassociated buffer of given size.
904 */
905struct buf *
906geteblk(int size)
907{
908	struct buf *bp;
909
910	while ((bp = getnewbuf(0, 0, 0)) == 0);
911	allocbuf(bp, size);
912	bp->b_flags |= B_INVAL;
913	return (bp);
914}
915
916/*
917 * This code constitutes the buffer memory from either anonymous system
918 * memory (in the case of non-VMIO operations) or from an associated
919 * VM object (in the case of VMIO operations).
920 *
921 * Note that this code is tricky, and has many complications to resolve
922 * deadlock or inconsistant data situations.  Tread lightly!!!
923 *
924 * Modify the length of a buffer's underlying buffer storage without
925 * destroying information (unless, of course the buffer is shrinking).
926 */
927int
928allocbuf(struct buf * bp, int size)
929{
930
931	int s;
932	int newbsize, mbsize;
933	int i;
934
935	if ((bp->b_flags & B_VMIO) == 0) {
936		/*
937		 * Just get anonymous memory from the kernel
938		 */
939		mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
940		newbsize = round_page(size);
941
942		if (newbsize < bp->b_bufsize) {
943			vm_hold_free_pages(
944			    bp,
945			    (vm_offset_t) bp->b_data + newbsize,
946			    (vm_offset_t) bp->b_data + bp->b_bufsize);
947		} else if (newbsize > bp->b_bufsize) {
948			vm_hold_load_pages(
949			    bp,
950			    (vm_offset_t) bp->b_data + bp->b_bufsize,
951			    (vm_offset_t) bp->b_data + newbsize);
952		}
953	} else {
954		vm_page_t m;
955		int desiredpages;
956
957		newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
958		desiredpages = round_page(newbsize) / PAGE_SIZE;
959
960		if (newbsize < bp->b_bufsize) {
961			if (desiredpages < bp->b_npages) {
962				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
963				    desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages));
964				for (i = desiredpages; i < bp->b_npages; i++) {
965					m = bp->b_pages[i];
966					s = splhigh();
967					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
968						m->flags |= PG_WANTED;
969						tsleep(m, PVM, "biodep", 0);
970					}
971					splx(s);
972
973					if (m->bmapped == 0) {
974						printf("allocbuf: bmapped is zero for page %d\n", i);
975						panic("allocbuf: error");
976					}
977					--m->bmapped;
978					if (m->bmapped == 0) {
979						vm_page_protect(m, VM_PROT_NONE);
980						vm_page_free(m);
981					}
982					bp->b_pages[i] = NULL;
983				}
984				bp->b_npages = desiredpages;
985			}
986		} else if (newbsize > bp->b_bufsize) {
987			vm_object_t obj;
988			vm_offset_t tinc, off, toff, objoff;
989			int pageindex, curbpnpages;
990			struct vnode *vp;
991			int bsize;
992
993			vp = bp->b_vp;
994			bsize = vp->v_mount->mnt_stat.f_iosize;
995
996			if (bp->b_npages < desiredpages) {
997				obj = vp->v_object;
998				tinc = PAGE_SIZE;
999				if (tinc > bsize)
1000					tinc = bsize;
1001				off = bp->b_lblkno * bsize;
1002				curbpnpages = bp->b_npages;
1003		doretry:
1004				bp->b_flags |= B_CACHE;
1005				for (toff = 0; toff < newbsize; toff += tinc) {
1006					int mask;
1007					int bytesinpage;
1008
1009					pageindex = toff / PAGE_SIZE;
1010					objoff = trunc_page(toff + off);
1011					if (pageindex < curbpnpages) {
1012						int pb;
1013
1014						m = bp->b_pages[pageindex];
1015						if (m->offset != objoff)
1016							panic("allocbuf: page changed offset??!!!?");
1017						bytesinpage = tinc;
1018						if (tinc > (newbsize - toff))
1019							bytesinpage = newbsize - toff;
1020						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1021							bp->b_flags &= ~B_CACHE;
1022						}
1023						if ((m->flags & PG_ACTIVE) == 0) {
1024							vm_page_activate(m);
1025							m->act_count = 0;
1026						}
1027						continue;
1028					}
1029					m = vm_page_lookup(obj, objoff);
1030					if (!m) {
1031						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1032						if (!m) {
1033							int j;
1034
1035							for (j = bp->b_npages; j < pageindex; j++) {
1036								PAGE_WAKEUP(bp->b_pages[j]);
1037							}
1038							VM_WAIT;
1039							curbpnpages = bp->b_npages;
1040							goto doretry;
1041						}
1042						vm_page_activate(m);
1043						m->act_count = 0;
1044						m->valid = 0;
1045						bp->b_flags &= ~B_CACHE;
1046					} else if (m->flags & PG_BUSY) {
1047						int j;
1048
1049						for (j = bp->b_npages; j < pageindex; j++) {
1050							PAGE_WAKEUP(bp->b_pages[j]);
1051						}
1052
1053						s = splbio();
1054						m->flags |= PG_WANTED;
1055						tsleep(m, PRIBIO, "pgtblk", 0);
1056						splx(s);
1057
1058						curbpnpages = bp->b_npages;
1059						goto doretry;
1060					} else {
1061						int pb;
1062						if ((curproc != pageproc) &&
1063							(m->flags & PG_CACHE) &&
1064						    (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
1065							pagedaemon_wakeup();
1066						}
1067						bytesinpage = tinc;
1068						if (tinc > (newbsize - toff))
1069							bytesinpage = newbsize - toff;
1070						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1071							bp->b_flags &= ~B_CACHE;
1072						}
1073						if ((m->flags & PG_ACTIVE) == 0) {
1074							vm_page_activate(m);
1075							m->act_count = 0;
1076						}
1077						m->flags |= PG_BUSY;
1078					}
1079					bp->b_pages[pageindex] = m;
1080					curbpnpages = pageindex + 1;
1081				}
1082				for (i = bp->b_npages; i < curbpnpages; i++) {
1083					m = bp->b_pages[i];
1084					m->bmapped++;
1085					PAGE_WAKEUP(m);
1086				}
1087				bp->b_npages = curbpnpages;
1088				bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
1089				pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages);
1090				bp->b_data += off % PAGE_SIZE;
1091			}
1092		}
1093	}
1094	bufspace += (newbsize - bp->b_bufsize);
1095	bp->b_bufsize = newbsize;
1096	bp->b_bcount = size;
1097	return 1;
1098}
1099
1100/*
1101 * Wait for buffer I/O completion, returning error status.
1102 */
1103int
1104biowait(register struct buf * bp)
1105{
1106	int s;
1107
1108	s = splbio();
1109	while ((bp->b_flags & B_DONE) == 0)
1110		tsleep((caddr_t) bp, PRIBIO, "biowait", 0);
1111	splx(s);
1112	if (bp->b_flags & B_EINTR) {
1113		bp->b_flags &= ~B_EINTR;
1114		return (EINTR);
1115	}
1116	if (bp->b_flags & B_ERROR) {
1117		return (bp->b_error ? bp->b_error : EIO);
1118	} else {
1119		return (0);
1120	}
1121}
1122
1123/*
1124 * Finish I/O on a buffer, calling an optional function.
1125 * This is usually called from interrupt level, so process blocking
1126 * is not *a good idea*.
1127 */
1128void
1129biodone(register struct buf * bp)
1130{
1131	int s;
1132
1133	s = splbio();
1134	if (bp->b_flags & B_DONE) {
1135		splx(s);
1136		printf("biodone: buffer already done\n");
1137		return;
1138	}
1139	bp->b_flags |= B_DONE;
1140
1141	if ((bp->b_flags & B_READ) == 0) {
1142		struct vnode *vp = bp->b_vp;
1143		vwakeup(bp);
1144	}
1145#ifdef BOUNCE_BUFFERS
1146	if (bp->b_flags & B_BOUNCE)
1147		vm_bounce_free(bp);
1148#endif
1149
1150	/* call optional completion function if requested */
1151	if (bp->b_flags & B_CALL) {
1152		bp->b_flags &= ~B_CALL;
1153		(*bp->b_iodone) (bp);
1154		splx(s);
1155		return;
1156	}
1157	if (bp->b_flags & B_VMIO) {
1158		int i, resid;
1159		vm_offset_t foff;
1160		vm_page_t m;
1161		vm_object_t obj;
1162		int iosize;
1163		struct vnode *vp = bp->b_vp;
1164
1165		foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1166		obj = vp->v_object;
1167		if (!obj) {
1168			return;
1169		}
1170#if defined(VFS_BIO_DEBUG)
1171		if (obj->paging_in_progress < bp->b_npages) {
1172			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1173			    obj->paging_in_progress, bp->b_npages);
1174		}
1175#endif
1176		iosize = bp->b_bufsize;
1177		for (i = 0; i < bp->b_npages; i++) {
1178			int bogusflag = 0;
1179			m = bp->b_pages[i];
1180			if (m == bogus_page) {
1181				bogusflag = 1;
1182				m = vm_page_lookup(obj, foff);
1183				if (!m) {
1184#if defined(VFS_BIO_DEBUG)
1185					printf("biodone: page disappeared\n");
1186#endif
1187					--obj->paging_in_progress;
1188					continue;
1189				}
1190				bp->b_pages[i] = m;
1191				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1192			}
1193#if defined(VFS_BIO_DEBUG)
1194			if (trunc_page(foff) != m->offset) {
1195				printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset);
1196			}
1197#endif
1198			resid = (m->offset + PAGE_SIZE) - foff;
1199			if (resid > iosize)
1200				resid = iosize;
1201			/*
1202			 * In the write case, the valid and clean bits are
1203			 * already changed correctly, so we only need to do this
1204			 * here in the read case.
1205			 */
1206			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1207				vm_page_set_valid(m, foff & (PAGE_SIZE-1), resid);
1208				vm_page_set_clean(m, foff & (PAGE_SIZE-1), resid);
1209			}
1210
1211			/*
1212			 * when debugging new filesystems or buffer I/O methods, this
1213			 * is the most common error that pops up.  if you see this, you
1214			 * have not set the page busy flag correctly!!!
1215			 */
1216			if (m->busy == 0) {
1217				printf("biodone: page busy < 0, "
1218				    "off: %ld, foff: %ld, "
1219				    "resid: %d, index: %d\n",
1220				    m->offset, foff, resid, i);
1221				printf(" iosize: %ld, lblkno: %ld\n",
1222				    bp->b_vp->v_mount->mnt_stat.f_iosize,
1223				    bp->b_lblkno);
1224				printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n",
1225				    m->valid, m->dirty, m->bmapped);
1226				panic("biodone: page busy < 0\n");
1227			}
1228			--m->busy;
1229			if( (m->busy == 0) && (m->flags & PG_WANTED))
1230				wakeup((caddr_t) m);
1231			--obj->paging_in_progress;
1232			foff += resid;
1233			iosize -= resid;
1234		}
1235		if (obj && obj->paging_in_progress == 0 &&
1236		    (obj->flags & OBJ_PIPWNT)) {
1237			obj->flags &= ~OBJ_PIPWNT;
1238			wakeup((caddr_t) obj);
1239		}
1240	}
1241	/*
1242	 * For asynchronous completions, release the buffer now. The brelse
1243	 * checks for B_WANTED and will do the wakeup there if necessary - so
1244	 * no need to do a wakeup here in the async case.
1245	 */
1246
1247	if (bp->b_flags & B_ASYNC) {
1248		brelse(bp);
1249	} else {
1250		bp->b_flags &= ~B_WANTED;
1251		wakeup((caddr_t) bp);
1252	}
1253	splx(s);
1254}
1255
1256int
1257count_lock_queue()
1258{
1259	int count;
1260	struct buf *bp;
1261
1262	count = 0;
1263	for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
1264	    bp != NULL;
1265	    bp = bp->b_freelist.tqe_next)
1266		count++;
1267	return (count);
1268}
1269
1270int vfs_update_interval = 30;
1271
1272void
1273vfs_update()
1274{
1275	(void) spl0();
1276	while (1) {
1277		tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update",
1278		    hz * vfs_update_interval);
1279		vfs_update_wakeup = 0;
1280		sync(curproc, NULL, NULL);
1281	}
1282}
1283
1284/*
1285 * This routine is called in lieu of iodone in the case of
1286 * incomplete I/O.  This keeps the busy status for pages
1287 * consistant.
1288 */
1289void
1290vfs_unbusy_pages(struct buf * bp)
1291{
1292	int i;
1293
1294	if (bp->b_flags & B_VMIO) {
1295		struct vnode *vp = bp->b_vp;
1296		vm_object_t obj = vp->v_object;
1297		vm_offset_t foff;
1298
1299		foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1300
1301		for (i = 0; i < bp->b_npages; i++) {
1302			vm_page_t m = bp->b_pages[i];
1303
1304			if (m == bogus_page) {
1305				m = vm_page_lookup(obj, foff);
1306				if (!m) {
1307					panic("vfs_unbusy_pages: page missing\n");
1308				}
1309				bp->b_pages[i] = m;
1310				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1311			}
1312			--obj->paging_in_progress;
1313			--m->busy;
1314			if( (m->busy == 0) && (m->flags & PG_WANTED))
1315				wakeup((caddr_t) m);
1316		}
1317		if (obj->paging_in_progress == 0 &&
1318		    (obj->flags & OBJ_PIPWNT)) {
1319			obj->flags &= ~OBJ_PIPWNT;
1320			wakeup((caddr_t) obj);
1321		}
1322	}
1323}
1324
1325/*
1326 * This routine is called before a device strategy routine.
1327 * It is used to tell the VM system that paging I/O is in
1328 * progress, and treat the pages associated with the buffer
1329 * almost as being PG_BUSY.  Also the object paging_in_progress
1330 * flag is handled to make sure that the object doesn't become
1331 * inconsistant.
1332 */
1333void
1334vfs_busy_pages(struct buf * bp, int clear_modify)
1335{
1336	int i;
1337
1338	if (bp->b_flags & B_VMIO) {
1339		vm_object_t obj = bp->b_vp->v_object;
1340		vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1341		int iocount = bp->b_bufsize;
1342
1343		vfs_setdirty(bp);
1344		for (i = 0; i < bp->b_npages; i++) {
1345			vm_page_t m = bp->b_pages[i];
1346			int resid = (m->offset + PAGE_SIZE) - foff;
1347
1348			if (resid > iocount)
1349				resid = iocount;
1350			obj->paging_in_progress++;
1351			m->busy++;
1352			if (clear_modify) {
1353				vm_page_protect(m, VM_PROT_READ);
1354				vm_page_set_valid(m,
1355					foff & (PAGE_SIZE-1), resid);
1356				vm_page_set_clean(m,
1357					foff & (PAGE_SIZE-1), resid);
1358			} else if (bp->b_bcount >= PAGE_SIZE) {
1359				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1360					bp->b_pages[i] = bogus_page;
1361					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1362				}
1363			}
1364			foff += resid;
1365			iocount -= resid;
1366		}
1367	}
1368}
1369
1370/*
1371 * Tell the VM system that the pages associated with this buffer
1372 * are clean.  This is used for delayed writes where the data is
1373 * going to go to disk eventually without additional VM intevention.
1374 */
1375void
1376vfs_clean_pages(struct buf * bp)
1377{
1378	int i;
1379
1380	if (bp->b_flags & B_VMIO) {
1381		vm_offset_t foff =
1382			bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1383		int iocount = bp->b_bufsize;
1384
1385		for (i = 0; i < bp->b_npages; i++) {
1386			vm_page_t m = bp->b_pages[i];
1387			int resid = (m->offset + PAGE_SIZE) - foff;
1388
1389			if (resid > iocount)
1390				resid = iocount;
1391			if (resid > 0) {
1392				vm_page_set_valid(m,
1393					foff & (PAGE_SIZE-1), resid);
1394				vm_page_set_clean(m,
1395					foff & (PAGE_SIZE-1), resid);
1396			}
1397			foff += resid;
1398			iocount -= resid;
1399		}
1400	}
1401}
1402
1403void
1404vfs_bio_clrbuf(struct buf *bp) {
1405	int i;
1406	if( bp->b_flags & B_VMIO) {
1407		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1408			int j;
1409			if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) {
1410				for(j=0; j < bp->b_bufsize / DEV_BSIZE;j++) {
1411					bzero(bp->b_data + j * DEV_BSIZE, DEV_BSIZE);
1412				}
1413			}
1414			bp->b_resid = 0;
1415			return;
1416		}
1417		for(i=0;i<bp->b_npages;i++) {
1418			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1419				continue;
1420			if( bp->b_pages[i]->valid == 0) {
1421				bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE);
1422			} else {
1423				int j;
1424				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1425					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1426						bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE);
1427				}
1428			}
1429			bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
1430		}
1431		bp->b_resid = 0;
1432	} else {
1433		clrbuf(bp);
1434	}
1435}
1436
1437/*
1438 * vm_hold_load_pages and vm_hold_unload pages get pages into
1439 * a buffers address space.  The pages are anonymous and are
1440 * not associated with a file object.
1441 */
1442void
1443vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1444{
1445	vm_offset_t pg;
1446	vm_page_t p;
1447	vm_offset_t from = round_page(froma);
1448	vm_offset_t to = round_page(toa);
1449
1450	for (pg = from; pg < to; pg += PAGE_SIZE) {
1451
1452tryagain:
1453
1454		p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS,
1455		    VM_ALLOC_NORMAL);
1456		if (!p) {
1457			VM_WAIT;
1458			goto tryagain;
1459		}
1460		vm_page_wire(p);
1461		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1462		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p;
1463		PAGE_WAKEUP(p);
1464		bp->b_npages++;
1465	}
1466}
1467
1468void
1469vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1470{
1471	vm_offset_t pg;
1472	vm_page_t p;
1473	vm_offset_t from = round_page(froma);
1474	vm_offset_t to = round_page(toa);
1475
1476	for (pg = from; pg < to; pg += PAGE_SIZE) {
1477		p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE];
1478		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0;
1479		pmap_kremove(pg);
1480		vm_page_free(p);
1481		--bp->b_npages;
1482	}
1483}
1484