vfs_bio.c revision 92461
172172Sphantom/*
272172Sphantom * Copyright (c) 1994,1997 John S. Dyson
372172Sphantom * All rights reserved.
472172Sphantom *
572172Sphantom * Redistribution and use in source and binary forms, with or without
672213Sasmodai * modification, are permitted provided that the following conditions
772319Sphantom * are met:
872213Sasmodai * 1. Redistributions of source code must retain the above copyright
972211Sasmodai *    notice immediately at the beginning of the file, without modification,
1072319Sphantom *    this list of conditions, and the following disclaimer.
1172210Sasmodai * 2. Absolutely no warranty of function or purpose is made by the author
1274341Sache *		John S. Dyson.
1372217Sasmodai *
1472212Sasmodai * $FreeBSD: head/sys/kern/vfs_bio.c 92461 2002-03-17 00:56:41Z jake $
1576105Sphantom */
1672628Sache
1772218Sasmodai/*
1876105Sphantom * this file contains a new buffer I/O scheme implementing a coherent
1972195Sasmodai * VM object and buffer cache scheme.  Pains have been taken to make
2072209Sasmodai * sure that the performance degradation associated with schemes such
2172337Sabial * as this is not realized.
2276105Sphantom *
2372707Sphantom * Author:  John S. Dyson
2472707Sphantom * Significant help during the development and debugging phases
2572208Sasmodai * had been provided by David Greenman, also of the FreeBSD core team.
2676105Sphantom *
2772362Sasmodai * see man buf(9) for more info.
2876105Sphantom */
2972565Sache
3072363Sknu#include <sys/param.h>
3172574Skeith#include <sys/systm.h>
3272574Skeith#include <sys/bio.h>
3372574Skeith#include <sys/buf.h>
3472172Sphantom#include <sys/eventhandler.h>
3572172Sphantom#include <sys/lock.h>
3676105Sphantom#include <sys/malloc.h>
3772259Swollman#include <sys/mount.h>
3872259Swollman#include <sys/mutex.h>
3972259Swollman#include <sys/kernel.h>
4072259Swollman#include <sys/kthread.h>
4172259Swollman#include <sys/ktr.h>
4272259Swollman#include <sys/proc.h>
4376105Sphantom#include <sys/reboot.h>
4476105Sphantom#include <sys/resourcevar.h>
4572172Sphantom#include <sys/sysctl.h>
4672172Sphantom#include <sys/vmmeter.h>
4772172Sphantom#include <sys/vnode.h>
4872172Sphantom#include <vm/vm.h>
4972172Sphantom#include <vm/vm_param.h>
5072172Sphantom#include <vm/vm_kern.h>
5172172Sphantom#include <vm/vm_pageout.h>
5272172Sphantom#include <vm/vm_page.h>
5372172Sphantom#include <vm/vm_object.h>
5472258Swollman#include <vm/vm_extern.h>
5572258Swollman#include <vm/vm_map.h>
5672258Swollman
5772258Swollmanstatic MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
5872259Swollman
5972259Swollmanstruct	bio_ops bioops;		/* I/O operation notification */
6072259Swollman
6172259Swollmanstruct	buf_ops buf_ops_bio = {
6272259Swollman	"buf_ops_bio",
6372259Swollman	bwrite
6472259Swollman};
6572259Swollman
6672259Swollman/*
6772172Sphantom * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
6872172Sphantom * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
69 */
70struct buf *buf;		/* buffer header pool */
71struct mtx buftimelock;		/* Interlock on setting prio and timo */
72
73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74		vm_offset_t to);
75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76		vm_offset_t to);
77static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
78			       int pageno, vm_page_t m);
79static void vfs_clean_pages(struct buf * bp);
80static void vfs_setdirty(struct buf *bp);
81static void vfs_vmio_release(struct buf *bp);
82static void vfs_backgroundwritedone(struct buf *bp);
83static int flushbufqueues(void);
84static void buf_daemon __P((void));
85
86int vmiodirenable = TRUE;
87SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
88    "Use the VM system for directory writes");
89int runningbufspace;
90SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
91    "Amount of presently outstanding async buffer io");
92static int bufspace;
93SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
94    "KVA memory used for bufs");
95static int maxbufspace;
96SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
97    "Maximum allowed value of bufspace (including buf_daemon)");
98static int bufmallocspace;
99SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
100    "Amount of malloced memory for buffers");
101static int maxbufmallocspace;
102SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
103    "Maximum amount of malloced memory for buffers");
104static int lobufspace;
105SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
106    "Minimum amount of buffers we want to have");
107static int hibufspace;
108SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
109    "Maximum allowed value of bufspace (excluding buf_daemon)");
110static int bufreusecnt;
111SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
112    "Number of times we have reused a buffer");
113static int buffreekvacnt;
114SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
115    "Number of times we have freed the KVA space from some buffer");
116static int bufdefragcnt;
117SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
118    "Number of times we have had to repeat buffer allocation to defragment");
119static int lorunningspace;
120SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
121    "Minimum preferred space used for in-progress I/O");
122static int hirunningspace;
123SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
124    "Maximum amount of space to use for in-progress I/O");
125static int numdirtybuffers;
126SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
127    "Number of buffers that are dirty (has unwritten changes) at the moment");
128static int lodirtybuffers;
129SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
130    "How many buffers we want to have free before bufdaemon can sleep");
131static int hidirtybuffers;
132SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
133    "When the number of dirty buffers is considered severe");
134static int numfreebuffers;
135SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
136    "Number of free buffers");
137static int lofreebuffers;
138SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
139   "XXX Unused");
140static int hifreebuffers;
141SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
142   "XXX Complicatedly unused");
143static int getnewbufcalls;
144SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
145   "Number of calls to getnewbuf");
146static int getnewbufrestarts;
147SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
148    "Number of times getnewbuf has had to restart a buffer aquisition");
149static int dobkgrdwrite = 1;
150SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
151    "Do background writes (honoring the BX_BKGRDWRITE flag)?");
152
153/*
154 * Wakeup point for bufdaemon, as well as indicator of whether it is already
155 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
156 * is idling.
157 */
158static int bd_request;
159
160/*
161 * bogus page -- for I/O to/from partially complete buffers
162 * this is a temporary solution to the problem, but it is not
163 * really that bad.  it would be better to split the buffer
164 * for input in the case of buffers partially already in memory,
165 * but the code is intricate enough already.
166 */
167vm_page_t bogus_page;
168
169/*
170 * Offset for bogus_page.
171 * XXX bogus_offset should be local to bufinit
172 */
173static vm_offset_t bogus_offset;
174
175/*
176 * Synchronization (sleep/wakeup) variable for active buffer space requests.
177 * Set when wait starts, cleared prior to wakeup().
178 * Used in runningbufwakeup() and waitrunningbufspace().
179 */
180static int runningbufreq;
181
182/*
183 * Synchronization (sleep/wakeup) variable for buffer requests.
184 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
185 * by and/or.
186 * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
187 * getnewbuf(), and getblk().
188 */
189static int needsbuffer;
190
191/*
192 * Mask for index into the buffer hash table, which needs to be power of 2 in
193 * size.  Set in kern_vfs_bio_buffer_alloc.
194 */
195static int bufhashmask;
196
197/*
198 * Hash table for all buffers, with a linked list hanging from each table
199 * entry.  Set in kern_vfs_bio_buffer_alloc, initialized in buf_init.
200 */
201static LIST_HEAD(bufhashhdr, buf) *bufhashtbl;
202
203/*
204 * Somewhere to store buffers when they are not in another list, to always
205 * have them in a list (and thus being able to use the same set of operations
206 * on them.)
207 */
208static struct bufhashhdr invalhash;
209
210/*
211 * Definitions for the buffer free lists.
212 */
213#define BUFFER_QUEUES	6	/* number of free buffer queues */
214
215#define QUEUE_NONE	0	/* on no queue */
216#define QUEUE_LOCKED	1	/* locked buffers */
217#define QUEUE_CLEAN	2	/* non-B_DELWRI buffers */
218#define QUEUE_DIRTY	3	/* B_DELWRI buffers */
219#define QUEUE_EMPTYKVA	4	/* empty buffer headers w/KVA assignment */
220#define QUEUE_EMPTY	5	/* empty buffer headers */
221
222/* Queues for free buffers with various properties */
223static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
224/*
225 * Single global constant for BUF_WMESG, to avoid getting multiple references.
226 * buf_wmesg is referred from macros.
227 */
228const char *buf_wmesg = BUF_WMESG;
229
230#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
231#define VFS_BIO_NEED_DIRTYFLUSH	0x02	/* waiting for dirty buffer flush */
232#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
233#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
234
235/*
236 * Buffer hash table code.  Note that the logical block scans linearly, which
237 * gives us some L1 cache locality.
238 */
239
240static __inline
241struct bufhashhdr *
242bufhash(struct vnode *vnp, daddr_t bn)
243{
244	return(&bufhashtbl[(((uintptr_t)(vnp) >> 7) + (int)bn) & bufhashmask]);
245}
246
247/*
248 *	numdirtywakeup:
249 *
250 *	If someone is blocked due to there being too many dirty buffers,
251 *	and numdirtybuffers is now reasonable, wake them up.
252 */
253
254static __inline void
255numdirtywakeup(int level)
256{
257	if (numdirtybuffers <= level) {
258		if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
259			needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
260			wakeup(&needsbuffer);
261		}
262	}
263}
264
265/*
266 *	bufspacewakeup:
267 *
268 *	Called when buffer space is potentially available for recovery.
269 *	getnewbuf() will block on this flag when it is unable to free
270 *	sufficient buffer space.  Buffer space becomes recoverable when
271 *	bp's get placed back in the queues.
272 */
273
274static __inline void
275bufspacewakeup(void)
276{
277	/*
278	 * If someone is waiting for BUF space, wake them up.  Even
279	 * though we haven't freed the kva space yet, the waiting
280	 * process will be able to now.
281	 */
282	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
283		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
284		wakeup(&needsbuffer);
285	}
286}
287
288/*
289 * runningbufwakeup() - in-progress I/O accounting.
290 *
291 */
292static __inline void
293runningbufwakeup(struct buf *bp)
294{
295	if (bp->b_runningbufspace) {
296		runningbufspace -= bp->b_runningbufspace;
297		bp->b_runningbufspace = 0;
298		if (runningbufreq && runningbufspace <= lorunningspace) {
299			runningbufreq = 0;
300			wakeup(&runningbufreq);
301		}
302	}
303}
304
305/*
306 *	bufcountwakeup:
307 *
308 *	Called when a buffer has been added to one of the free queues to
309 *	account for the buffer and to wakeup anyone waiting for free buffers.
310 *	This typically occurs when large amounts of metadata are being handled
311 *	by the buffer cache ( else buffer space runs out first, usually ).
312 */
313
314static __inline void
315bufcountwakeup(void)
316{
317	++numfreebuffers;
318	if (needsbuffer) {
319		needsbuffer &= ~VFS_BIO_NEED_ANY;
320		if (numfreebuffers >= hifreebuffers)
321			needsbuffer &= ~VFS_BIO_NEED_FREE;
322		wakeup(&needsbuffer);
323	}
324}
325
326/*
327 *	waitrunningbufspace()
328 *
329 *	runningbufspace is a measure of the amount of I/O currently
330 *	running.  This routine is used in async-write situations to
331 *	prevent creating huge backups of pending writes to a device.
332 *	Only asynchronous writes are governed by this function.
333 *
334 *	Reads will adjust runningbufspace, but will not block based on it.
335 *	The read load has a side effect of reducing the allowed write load.
336 *
337 *	This does NOT turn an async write into a sync write.  It waits
338 *	for earlier writes to complete and generally returns before the
339 *	caller's write has reached the device.
340 */
341static __inline void
342waitrunningbufspace(void)
343{
344	/*
345	 * XXX race against wakeup interrupt, currently
346	 * protected by Giant.  FIXME!
347	 */
348	while (runningbufspace > hirunningspace) {
349		++runningbufreq;
350		tsleep(&runningbufreq, PVM, "wdrain", 0);
351	}
352}
353
354
355/*
356 *	vfs_buf_test_cache:
357 *
358 *	Called when a buffer is extended.  This function clears the B_CACHE
359 *	bit if the newly extended portion of the buffer does not contain
360 *	valid data.
361 */
362static __inline__
363void
364vfs_buf_test_cache(struct buf *bp,
365		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
366		  vm_page_t m)
367{
368	GIANT_REQUIRED;
369
370	if (bp->b_flags & B_CACHE) {
371		int base = (foff + off) & PAGE_MASK;
372		if (vm_page_is_valid(m, base, size) == 0)
373			bp->b_flags &= ~B_CACHE;
374	}
375}
376
377/* Wake up the buffer deamon if necessary */
378static __inline__
379void
380bd_wakeup(int dirtybuflevel)
381{
382	if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
383		bd_request = 1;
384		wakeup(&bd_request);
385	}
386}
387
388/*
389 * bd_speedup - speedup the buffer cache flushing code
390 */
391
392static __inline__
393void
394bd_speedup(void)
395{
396	bd_wakeup(1);
397}
398
399/*
400 * Calculating buffer cache scaling values and reserve space for buffer
401 * headers.  This is called during low level kernel initialization and
402 * may be called more then once.  We CANNOT write to the memory area
403 * being reserved at this time.
404 */
405caddr_t
406kern_vfs_bio_buffer_alloc(caddr_t v, int physmem_est)
407{
408	/*
409	 * physmem_est is in pages.  Convert it to kilobytes (assumes
410	 * PAGE_SIZE is >= 1K)
411	 */
412	physmem_est = physmem_est * (PAGE_SIZE / 1024);
413
414	/*
415	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
416	 * For the first 64MB of ram nominally allocate sufficient buffers to
417	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
418	 * buffers to cover 1/20 of our ram over 64MB.  When auto-sizing
419	 * the buffer cache we limit the eventual kva reservation to
420	 * maxbcache bytes.
421	 *
422	 * factor represents the 1/4 x ram conversion.
423	 */
424	if (nbuf == 0) {
425		int factor = 4 * BKVASIZE / 1024;
426
427		nbuf = 50;
428		if (physmem_est > 4096)
429			nbuf += min((physmem_est - 4096) / factor,
430			    65536 / factor);
431		if (physmem_est > 65536)
432			nbuf += (physmem_est - 65536) * 2 / (factor * 5);
433
434		if (maxbcache && nbuf > maxbcache / BKVASIZE)
435			nbuf = maxbcache / BKVASIZE;
436	}
437
438#if 0
439	/*
440	 * Do not allow the buffer_map to be more then 1/2 the size of the
441	 * kernel_map.
442	 */
443	if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
444	    (BKVASIZE * 2)) {
445		nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
446		    (BKVASIZE * 2);
447		printf("Warning: nbufs capped at %d\n", nbuf);
448	}
449#endif
450
451	/*
452	 * swbufs are used as temporary holders for I/O, such as paging I/O.
453	 * We have no less then 16 and no more then 256.
454	 */
455	nswbuf = max(min(nbuf/4, 256), 16);
456
457	/*
458	 * Reserve space for the buffer cache buffers
459	 */
460	swbuf = (void *)v;
461	v = (caddr_t)(swbuf + nswbuf);
462	buf = (void *)v;
463	v = (caddr_t)(buf + nbuf);
464
465	/*
466	 * Calculate the hash table size and reserve space
467	 */
468	for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1)
469		;
470	bufhashtbl = (void *)v;
471	v = (caddr_t)(bufhashtbl + bufhashmask);
472	--bufhashmask;
473
474	return(v);
475}
476
477/* Initialize the buffer subsystem.  Called before use of any buffers. */
478void
479bufinit(void)
480{
481	struct buf *bp;
482	int i;
483
484	GIANT_REQUIRED;
485
486	LIST_INIT(&invalhash);
487	mtx_init(&buftimelock, "buftime lock", MTX_DEF);
488
489	for (i = 0; i <= bufhashmask; i++)
490		LIST_INIT(&bufhashtbl[i]);
491
492	/* next, make a null set of free lists */
493	for (i = 0; i < BUFFER_QUEUES; i++)
494		TAILQ_INIT(&bufqueues[i]);
495
496	/* finally, initialize each buffer header and stick on empty q */
497	for (i = 0; i < nbuf; i++) {
498		bp = &buf[i];
499		bzero(bp, sizeof *bp);
500		bp->b_flags = B_INVAL;	/* we're just an empty header */
501		bp->b_dev = NODEV;
502		bp->b_rcred = NOCRED;
503		bp->b_wcred = NOCRED;
504		bp->b_qindex = QUEUE_EMPTY;
505		bp->b_xflags = 0;
506		LIST_INIT(&bp->b_dep);
507		BUF_LOCKINIT(bp);
508		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
509		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
510	}
511
512	/*
513	 * maxbufspace is the absolute maximum amount of buffer space we are
514	 * allowed to reserve in KVM and in real terms.  The absolute maximum
515	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
516	 * used by most other processes.  The differential is required to
517	 * ensure that buf_daemon is able to run when other processes might
518	 * be blocked waiting for buffer space.
519	 *
520	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
521	 * this may result in KVM fragmentation which is not handled optimally
522	 * by the system.
523	 */
524	maxbufspace = nbuf * BKVASIZE;
525	hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
526	lobufspace = hibufspace - MAXBSIZE;
527
528	lorunningspace = 512 * 1024;
529	hirunningspace = 1024 * 1024;
530
531/*
532 * Limit the amount of malloc memory since it is wired permanently into
533 * the kernel space.  Even though this is accounted for in the buffer
534 * allocation, we don't want the malloced region to grow uncontrolled.
535 * The malloc scheme improves memory utilization significantly on average
536 * (small) directories.
537 */
538	maxbufmallocspace = hibufspace / 20;
539
540/*
541 * Reduce the chance of a deadlock occuring by limiting the number
542 * of delayed-write dirty buffers we allow to stack up.
543 */
544	hidirtybuffers = nbuf / 4 + 20;
545	numdirtybuffers = 0;
546/*
547 * To support extreme low-memory systems, make sure hidirtybuffers cannot
548 * eat up all available buffer space.  This occurs when our minimum cannot
549 * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
550 * BKVASIZE'd (8K) buffers.
551 */
552	while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
553		hidirtybuffers >>= 1;
554	}
555	lodirtybuffers = hidirtybuffers / 2;
556
557/*
558 * Try to keep the number of free buffers in the specified range,
559 * and give special processes (e.g. like buf_daemon) access to an
560 * emergency reserve.
561 */
562	lofreebuffers = nbuf / 18 + 5;
563	hifreebuffers = 2 * lofreebuffers;
564	numfreebuffers = nbuf;
565
566/*
567 * Maximum number of async ops initiated per buf_daemon loop.  This is
568 * somewhat of a hack at the moment, we really need to limit ourselves
569 * based on the number of bytes of I/O in-transit that were initiated
570 * from buf_daemon.
571 */
572
573	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
574	bogus_page = vm_page_alloc(kernel_object,
575			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
576			VM_ALLOC_NORMAL);
577	cnt.v_wire_count++;
578}
579
580/*
581 * bfreekva() - free the kva allocation for a buffer.
582 *
583 *	Must be called at splbio() or higher as this is the only locking for
584 *	buffer_map.
585 *
586 *	Since this call frees up buffer space, we call bufspacewakeup().
587 */
588static void
589bfreekva(struct buf * bp)
590{
591	GIANT_REQUIRED;
592
593	if (bp->b_kvasize) {
594		++buffreekvacnt;
595		bufspace -= bp->b_kvasize;
596		vm_map_delete(buffer_map,
597		    (vm_offset_t) bp->b_kvabase,
598		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize
599		);
600		bp->b_kvasize = 0;
601		bufspacewakeup();
602	}
603}
604
605/*
606 *	bremfree:
607 *
608 *	Remove the buffer from the appropriate free list.
609 */
610void
611bremfree(struct buf * bp)
612{
613	int s = splbio();
614	int old_qindex = bp->b_qindex;
615
616	GIANT_REQUIRED;
617
618	if (bp->b_qindex != QUEUE_NONE) {
619		KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
620		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
621		bp->b_qindex = QUEUE_NONE;
622	} else {
623		if (BUF_REFCNT(bp) <= 1)
624			panic("bremfree: removing a buffer not on a queue");
625	}
626
627	/*
628	 * Fixup numfreebuffers count.  If the buffer is invalid or not
629	 * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
630	 * the buffer was free and we must decrement numfreebuffers.
631	 */
632	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
633		switch(old_qindex) {
634		case QUEUE_DIRTY:
635		case QUEUE_CLEAN:
636		case QUEUE_EMPTY:
637		case QUEUE_EMPTYKVA:
638			--numfreebuffers;
639			break;
640		default:
641			break;
642		}
643	}
644	splx(s);
645}
646
647
648/*
649 * Get a buffer with the specified data.  Look in the cache first.  We
650 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
651 * is set, the buffer is valid and we do not have to do anything ( see
652 * getblk() ).  This is really just a special case of breadn().
653 */
654int
655bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
656    struct buf ** bpp)
657{
658
659	return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
660}
661
662/*
663 * Operates like bread, but also starts asynchronous I/O on
664 * read-ahead blocks.  We must clear BIO_ERROR and B_INVAL prior
665 * to initiating I/O . If B_CACHE is set, the buffer is valid
666 * and we do not have to do anything.
667 */
668int
669breadn(struct vnode * vp, daddr_t blkno, int size,
670    daddr_t * rablkno, int *rabsize,
671    int cnt, struct ucred * cred, struct buf ** bpp)
672{
673	struct buf *bp, *rabp;
674	int i;
675	int rv = 0, readwait = 0;
676
677	*bpp = bp = getblk(vp, blkno, size, 0, 0);
678
679	/* if not found in cache, do some I/O */
680	if ((bp->b_flags & B_CACHE) == 0) {
681		if (curthread != PCPU_GET(idlethread))
682			curthread->td_proc->p_stats->p_ru.ru_inblock++;
683		bp->b_iocmd = BIO_READ;
684		bp->b_flags &= ~B_INVAL;
685		bp->b_ioflags &= ~BIO_ERROR;
686		if (bp->b_rcred == NOCRED && cred != NOCRED)
687			bp->b_rcred = crhold(cred);
688		vfs_busy_pages(bp, 0);
689		VOP_STRATEGY(vp, bp);
690		++readwait;
691	}
692
693	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
694		if (inmem(vp, *rablkno))
695			continue;
696		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
697
698		if ((rabp->b_flags & B_CACHE) == 0) {
699			if (curthread != PCPU_GET(idlethread))
700				curthread->td_proc->p_stats->p_ru.ru_inblock++;
701			rabp->b_flags |= B_ASYNC;
702			rabp->b_flags &= ~B_INVAL;
703			rabp->b_ioflags &= ~BIO_ERROR;
704			rabp->b_iocmd = BIO_READ;
705			if (rabp->b_rcred == NOCRED && cred != NOCRED)
706				rabp->b_rcred = crhold(cred);
707			vfs_busy_pages(rabp, 0);
708			BUF_KERNPROC(rabp);
709			VOP_STRATEGY(vp, rabp);
710		} else {
711			brelse(rabp);
712		}
713	}
714
715	if (readwait) {
716		rv = bufwait(bp);
717	}
718	return (rv);
719}
720
721/*
722 * Write, release buffer on completion.  (Done by iodone
723 * if async).  Do not bother writing anything if the buffer
724 * is invalid.
725 *
726 * Note that we set B_CACHE here, indicating that buffer is
727 * fully valid and thus cacheable.  This is true even of NFS
728 * now so we set it generally.  This could be set either here
729 * or in biodone() since the I/O is synchronous.  We put it
730 * here.
731 */
732
733int
734bwrite(struct buf * bp)
735{
736	int oldflags, s;
737	struct buf *newbp;
738
739	if (bp->b_flags & B_INVAL) {
740		brelse(bp);
741		return (0);
742	}
743
744	oldflags = bp->b_flags;
745
746	if (BUF_REFCNT(bp) == 0)
747		panic("bwrite: buffer is not busy???");
748	s = splbio();
749	/*
750	 * If a background write is already in progress, delay
751	 * writing this block if it is asynchronous. Otherwise
752	 * wait for the background write to complete.
753	 */
754	if (bp->b_xflags & BX_BKGRDINPROG) {
755		if (bp->b_flags & B_ASYNC) {
756			splx(s);
757			bdwrite(bp);
758			return (0);
759		}
760		bp->b_xflags |= BX_BKGRDWAIT;
761		tsleep(&bp->b_xflags, PRIBIO, "biord", 0);
762		if (bp->b_xflags & BX_BKGRDINPROG)
763			panic("bwrite: still writing");
764	}
765
766	/* Mark the buffer clean */
767	bundirty(bp);
768
769	/*
770	 * If this buffer is marked for background writing and we
771	 * do not have to wait for it, make a copy and write the
772	 * copy so as to leave this buffer ready for further use.
773	 *
774	 * This optimization eats a lot of memory.  If we have a page
775	 * or buffer shortfall we can't do it.
776	 */
777	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
778	    (bp->b_flags & B_ASYNC) &&
779	    !vm_page_count_severe() &&
780	    !buf_dirty_count_severe()) {
781		if (bp->b_iodone != NULL) {
782			printf("bp->b_iodone = %p\n", bp->b_iodone);
783			panic("bwrite: need chained iodone");
784		}
785
786		/* get a new block */
787		newbp = geteblk(bp->b_bufsize);
788
789		/* set it to be identical to the old block */
790		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
791		bgetvp(bp->b_vp, newbp);
792		newbp->b_lblkno = bp->b_lblkno;
793		newbp->b_blkno = bp->b_blkno;
794		newbp->b_offset = bp->b_offset;
795		newbp->b_iodone = vfs_backgroundwritedone;
796		newbp->b_flags |= B_ASYNC;
797		newbp->b_flags &= ~B_INVAL;
798
799		/* move over the dependencies */
800		if (LIST_FIRST(&bp->b_dep) != NULL)
801			buf_movedeps(bp, newbp);
802
803		/*
804		 * Initiate write on the copy, release the original to
805		 * the B_LOCKED queue so that it cannot go away until
806		 * the background write completes. If not locked it could go
807		 * away and then be reconstituted while it was being written.
808		 * If the reconstituted buffer were written, we could end up
809		 * with two background copies being written at the same time.
810		 */
811		bp->b_xflags |= BX_BKGRDINPROG;
812		bp->b_flags |= B_LOCKED;
813		bqrelse(bp);
814		bp = newbp;
815	}
816
817	bp->b_flags &= ~B_DONE;
818	bp->b_ioflags &= ~BIO_ERROR;
819	bp->b_flags |= B_WRITEINPROG | B_CACHE;
820	bp->b_iocmd = BIO_WRITE;
821
822	bp->b_vp->v_numoutput++;
823	vfs_busy_pages(bp, 1);
824
825	/*
826	 * Normal bwrites pipeline writes
827	 */
828	bp->b_runningbufspace = bp->b_bufsize;
829	runningbufspace += bp->b_runningbufspace;
830
831	if (curthread != PCPU_GET(idlethread))
832		curthread->td_proc->p_stats->p_ru.ru_oublock++;
833	splx(s);
834	if (oldflags & B_ASYNC)
835		BUF_KERNPROC(bp);
836	BUF_STRATEGY(bp);
837
838	if ((oldflags & B_ASYNC) == 0) {
839		int rtval = bufwait(bp);
840		brelse(bp);
841		return (rtval);
842	} else if ((oldflags & B_NOWDRAIN) == 0) {
843		/*
844		 * don't allow the async write to saturate the I/O
845		 * system.  Deadlocks can occur only if a device strategy
846		 * routine (like in MD) turns around and issues another
847		 * high-level write, in which case B_NOWDRAIN is expected
848		 * to be set.  Otherwise we will not deadlock here because
849		 * we are blocking waiting for I/O that is already in-progress
850		 * to complete.
851		 */
852		waitrunningbufspace();
853	}
854
855	return (0);
856}
857
858/*
859 * Complete a background write started from bwrite.
860 */
861static void
862vfs_backgroundwritedone(bp)
863	struct buf *bp;
864{
865	struct buf *origbp;
866
867	/*
868	 * Find the original buffer that we are writing.
869	 */
870	if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL)
871		panic("backgroundwritedone: lost buffer");
872	/*
873	 * Process dependencies then return any unfinished ones.
874	 */
875	if (LIST_FIRST(&bp->b_dep) != NULL)
876		buf_complete(bp);
877	if (LIST_FIRST(&bp->b_dep) != NULL)
878		buf_movedeps(bp, origbp);
879	/*
880	 * Clear the BX_BKGRDINPROG flag in the original buffer
881	 * and awaken it if it is waiting for the write to complete.
882	 * If BX_BKGRDINPROG is not set in the original buffer it must
883	 * have been released and re-instantiated - which is not legal.
884	 */
885	KASSERT((origbp->b_xflags & BX_BKGRDINPROG),
886	    ("backgroundwritedone: lost buffer2"));
887	origbp->b_xflags &= ~BX_BKGRDINPROG;
888	if (origbp->b_xflags & BX_BKGRDWAIT) {
889		origbp->b_xflags &= ~BX_BKGRDWAIT;
890		wakeup(&origbp->b_xflags);
891	}
892	/*
893	 * Clear the B_LOCKED flag and remove it from the locked
894	 * queue if it currently resides there.
895	 */
896	origbp->b_flags &= ~B_LOCKED;
897	if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
898		bremfree(origbp);
899		bqrelse(origbp);
900	}
901	/*
902	 * This buffer is marked B_NOCACHE, so when it is released
903	 * by biodone, it will be tossed. We mark it with BIO_READ
904	 * to avoid biodone doing a second vwakeup.
905	 */
906	bp->b_flags |= B_NOCACHE;
907	bp->b_iocmd = BIO_READ;
908	bp->b_flags &= ~(B_CACHE | B_DONE);
909	bp->b_iodone = 0;
910	bufdone(bp);
911}
912
913/*
914 * Delayed write. (Buffer is marked dirty).  Do not bother writing
915 * anything if the buffer is marked invalid.
916 *
917 * Note that since the buffer must be completely valid, we can safely
918 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
919 * biodone() in order to prevent getblk from writing the buffer
920 * out synchronously.
921 */
922void
923bdwrite(struct buf * bp)
924{
925	GIANT_REQUIRED;
926
927	if (BUF_REFCNT(bp) == 0)
928		panic("bdwrite: buffer is not busy");
929
930	if (bp->b_flags & B_INVAL) {
931		brelse(bp);
932		return;
933	}
934	bdirty(bp);
935
936	/*
937	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
938	 * true even of NFS now.
939	 */
940	bp->b_flags |= B_CACHE;
941
942	/*
943	 * This bmap keeps the system from needing to do the bmap later,
944	 * perhaps when the system is attempting to do a sync.  Since it
945	 * is likely that the indirect block -- or whatever other datastructure
946	 * that the filesystem needs is still in memory now, it is a good
947	 * thing to do this.  Note also, that if the pageout daemon is
948	 * requesting a sync -- there might not be enough memory to do
949	 * the bmap then...  So, this is important to do.
950	 */
951	if (bp->b_lblkno == bp->b_blkno) {
952		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
953	}
954
955	/*
956	 * Set the *dirty* buffer range based upon the VM system dirty pages.
957	 */
958	vfs_setdirty(bp);
959
960	/*
961	 * We need to do this here to satisfy the vnode_pager and the
962	 * pageout daemon, so that it thinks that the pages have been
963	 * "cleaned".  Note that since the pages are in a delayed write
964	 * buffer -- the VFS layer "will" see that the pages get written
965	 * out on the next sync, or perhaps the cluster will be completed.
966	 */
967	vfs_clean_pages(bp);
968	bqrelse(bp);
969
970	/*
971	 * Wakeup the buffer flushing daemon if we have a lot of dirty
972	 * buffers (midpoint between our recovery point and our stall
973	 * point).
974	 */
975	bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
976
977	/*
978	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
979	 * due to the softdep code.
980	 */
981}
982
983/*
984 *	bdirty:
985 *
986 *	Turn buffer into delayed write request.  We must clear BIO_READ and
987 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
988 *	itself to properly update it in the dirty/clean lists.  We mark it
989 *	B_DONE to ensure that any asynchronization of the buffer properly
990 *	clears B_DONE ( else a panic will occur later ).
991 *
992 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
993 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
994 *	should only be called if the buffer is known-good.
995 *
996 *	Since the buffer is not on a queue, we do not update the numfreebuffers
997 *	count.
998 *
999 *	Must be called at splbio().
1000 *	The buffer must be on QUEUE_NONE.
1001 */
1002void
1003bdirty(bp)
1004	struct buf *bp;
1005{
1006	KASSERT(bp->b_qindex == QUEUE_NONE,
1007	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1008	bp->b_flags &= ~(B_RELBUF);
1009	bp->b_iocmd = BIO_WRITE;
1010
1011	if ((bp->b_flags & B_DELWRI) == 0) {
1012		bp->b_flags |= B_DONE | B_DELWRI;
1013		reassignbuf(bp, bp->b_vp);
1014		++numdirtybuffers;
1015		bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1016	}
1017}
1018
1019/*
1020 *	bundirty:
1021 *
1022 *	Clear B_DELWRI for buffer.
1023 *
1024 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1025 *	count.
1026 *
1027 *	Must be called at splbio().
1028 *	The buffer must be on QUEUE_NONE.
1029 */
1030
1031void
1032bundirty(bp)
1033	struct buf *bp;
1034{
1035	KASSERT(bp->b_qindex == QUEUE_NONE,
1036	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1037
1038	if (bp->b_flags & B_DELWRI) {
1039		bp->b_flags &= ~B_DELWRI;
1040		reassignbuf(bp, bp->b_vp);
1041		--numdirtybuffers;
1042		numdirtywakeup(lodirtybuffers);
1043	}
1044	/*
1045	 * Since it is now being written, we can clear its deferred write flag.
1046	 */
1047	bp->b_flags &= ~B_DEFERRED;
1048}
1049
1050/*
1051 *	bawrite:
1052 *
1053 *	Asynchronous write.  Start output on a buffer, but do not wait for
1054 *	it to complete.  The buffer is released when the output completes.
1055 *
1056 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1057 *	B_INVAL buffers.  Not us.
1058 */
1059void
1060bawrite(struct buf * bp)
1061{
1062	bp->b_flags |= B_ASYNC;
1063	(void) BUF_WRITE(bp);
1064}
1065
1066/*
1067 *	bwillwrite:
1068 *
1069 *	Called prior to the locking of any vnodes when we are expecting to
1070 *	write.  We do not want to starve the buffer cache with too many
1071 *	dirty buffers so we block here.  By blocking prior to the locking
1072 *	of any vnodes we attempt to avoid the situation where a locked vnode
1073 *	prevents the various system daemons from flushing related buffers.
1074 */
1075
1076void
1077bwillwrite(void)
1078{
1079	if (numdirtybuffers >= hidirtybuffers) {
1080		int s;
1081
1082		mtx_lock(&Giant);
1083		s = splbio();
1084		while (numdirtybuffers >= hidirtybuffers) {
1085			bd_wakeup(1);
1086			needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
1087			tsleep(&needsbuffer, (PRIBIO + 4), "flswai", 0);
1088		}
1089		splx(s);
1090		mtx_unlock(&Giant);
1091	}
1092}
1093
1094/*
1095 * Return true if we have too many dirty buffers.
1096 */
1097int
1098buf_dirty_count_severe(void)
1099{
1100	return(numdirtybuffers >= hidirtybuffers);
1101}
1102
1103/*
1104 *	brelse:
1105 *
1106 *	Release a busy buffer and, if requested, free its resources.  The
1107 *	buffer will be stashed in the appropriate bufqueue[] allowing it
1108 *	to be accessed later as a cache entity or reused for other purposes.
1109 */
1110void
1111brelse(struct buf * bp)
1112{
1113	int s;
1114
1115	GIANT_REQUIRED;
1116
1117	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1118	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1119
1120	s = splbio();
1121
1122	if (bp->b_flags & B_LOCKED)
1123		bp->b_ioflags &= ~BIO_ERROR;
1124
1125	if (bp->b_iocmd == BIO_WRITE &&
1126	    (bp->b_ioflags & BIO_ERROR) &&
1127	    !(bp->b_flags & B_INVAL)) {
1128		/*
1129		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
1130		 * pages from being scrapped.  If B_INVAL is set then
1131		 * this case is not run and the next case is run to
1132		 * destroy the buffer.  B_INVAL can occur if the buffer
1133		 * is outside the range supported by the underlying device.
1134		 */
1135		bp->b_ioflags &= ~BIO_ERROR;
1136		bdirty(bp);
1137	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1138	    (bp->b_ioflags & BIO_ERROR) ||
1139	    bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) {
1140		/*
1141		 * Either a failed I/O or we were asked to free or not
1142		 * cache the buffer.
1143		 */
1144		bp->b_flags |= B_INVAL;
1145		if (LIST_FIRST(&bp->b_dep) != NULL)
1146			buf_deallocate(bp);
1147		if (bp->b_flags & B_DELWRI) {
1148			--numdirtybuffers;
1149			numdirtywakeup(lodirtybuffers);
1150		}
1151		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1152		if ((bp->b_flags & B_VMIO) == 0) {
1153			if (bp->b_bufsize)
1154				allocbuf(bp, 0);
1155			if (bp->b_vp)
1156				brelvp(bp);
1157		}
1158	}
1159
1160	/*
1161	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
1162	 * is called with B_DELWRI set, the underlying pages may wind up
1163	 * getting freed causing a previous write (bdwrite()) to get 'lost'
1164	 * because pages associated with a B_DELWRI bp are marked clean.
1165	 *
1166	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1167	 * if B_DELWRI is set.
1168	 *
1169	 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1170	 * on pages to return pages to the VM page queues.
1171	 */
1172	if (bp->b_flags & B_DELWRI)
1173		bp->b_flags &= ~B_RELBUF;
1174	else if (vm_page_count_severe() && !(bp->b_xflags & BX_BKGRDINPROG))
1175		bp->b_flags |= B_RELBUF;
1176
1177	/*
1178	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
1179	 * constituted, not even NFS buffers now.  Two flags effect this.  If
1180	 * B_INVAL, the struct buf is invalidated but the VM object is kept
1181	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1182	 *
1183	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1184	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
1185	 * buffer is also B_INVAL because it hits the re-dirtying code above.
1186	 *
1187	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
1188	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1189	 * the commit state and we cannot afford to lose the buffer. If the
1190	 * buffer has a background write in progress, we need to keep it
1191	 * around to prevent it from being reconstituted and starting a second
1192	 * background write.
1193	 */
1194	if ((bp->b_flags & B_VMIO)
1195	    && !(bp->b_vp->v_tag == VT_NFS &&
1196		 !vn_isdisk(bp->b_vp, NULL) &&
1197		 (bp->b_flags & B_DELWRI))
1198	    ) {
1199
1200		int i, j, resid;
1201		vm_page_t m;
1202		off_t foff;
1203		vm_pindex_t poff;
1204		vm_object_t obj;
1205		struct vnode *vp;
1206
1207		vp = bp->b_vp;
1208
1209		/*
1210		 * Get the base offset and length of the buffer.  Note that
1211		 * in the VMIO case if the buffer block size is not
1212		 * page-aligned then b_data pointer may not be page-aligned.
1213		 * But our b_pages[] array *IS* page aligned.
1214		 *
1215		 * block sizes less then DEV_BSIZE (usually 512) are not
1216		 * supported due to the page granularity bits (m->valid,
1217		 * m->dirty, etc...).
1218		 *
1219		 * See man buf(9) for more information
1220		 */
1221		resid = bp->b_bufsize;
1222		foff = bp->b_offset;
1223
1224		for (i = 0; i < bp->b_npages; i++) {
1225			int had_bogus = 0;
1226
1227			m = bp->b_pages[i];
1228			vm_page_flag_clear(m, PG_ZERO);
1229
1230			/*
1231			 * If we hit a bogus page, fixup *all* the bogus pages
1232			 * now.
1233			 */
1234			if (m == bogus_page) {
1235				VOP_GETVOBJECT(vp, &obj);
1236				poff = OFF_TO_IDX(bp->b_offset);
1237				had_bogus = 1;
1238
1239				for (j = i; j < bp->b_npages; j++) {
1240					vm_page_t mtmp;
1241					mtmp = bp->b_pages[j];
1242					if (mtmp == bogus_page) {
1243						mtmp = vm_page_lookup(obj, poff + j);
1244						if (!mtmp) {
1245							panic("brelse: page missing\n");
1246						}
1247						bp->b_pages[j] = mtmp;
1248					}
1249				}
1250
1251				if ((bp->b_flags & B_INVAL) == 0) {
1252					pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
1253				}
1254				m = bp->b_pages[i];
1255			}
1256			if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) {
1257				int poffset = foff & PAGE_MASK;
1258				int presid = resid > (PAGE_SIZE - poffset) ?
1259					(PAGE_SIZE - poffset) : resid;
1260
1261				KASSERT(presid >= 0, ("brelse: extra page"));
1262				vm_page_set_invalid(m, poffset, presid);
1263				if (had_bogus)
1264					printf("avoided corruption bug in bogus_page/brelse code\n");
1265			}
1266			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1267			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1268		}
1269
1270		if (bp->b_flags & (B_INVAL | B_RELBUF))
1271			vfs_vmio_release(bp);
1272
1273	} else if (bp->b_flags & B_VMIO) {
1274
1275		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1276			vfs_vmio_release(bp);
1277		}
1278
1279	}
1280
1281	if (bp->b_qindex != QUEUE_NONE)
1282		panic("brelse: free buffer onto another queue???");
1283	if (BUF_REFCNT(bp) > 1) {
1284		/* do not release to free list */
1285		BUF_UNLOCK(bp);
1286		splx(s);
1287		return;
1288	}
1289
1290	/* enqueue */
1291
1292	/* buffers with no memory */
1293	if (bp->b_bufsize == 0) {
1294		bp->b_flags |= B_INVAL;
1295		bp->b_xflags &= ~BX_BKGRDWRITE;
1296		if (bp->b_xflags & BX_BKGRDINPROG)
1297			panic("losing buffer 1");
1298		if (bp->b_kvasize) {
1299			bp->b_qindex = QUEUE_EMPTYKVA;
1300		} else {
1301			bp->b_qindex = QUEUE_EMPTY;
1302		}
1303		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1304		LIST_REMOVE(bp, b_hash);
1305		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1306		bp->b_dev = NODEV;
1307	/* buffers with junk contents */
1308	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1309	    (bp->b_ioflags & BIO_ERROR)) {
1310		bp->b_flags |= B_INVAL;
1311		bp->b_xflags &= ~BX_BKGRDWRITE;
1312		if (bp->b_xflags & BX_BKGRDINPROG)
1313			panic("losing buffer 2");
1314		bp->b_qindex = QUEUE_CLEAN;
1315		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1316		LIST_REMOVE(bp, b_hash);
1317		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1318		bp->b_dev = NODEV;
1319
1320	/* buffers that are locked */
1321	} else if (bp->b_flags & B_LOCKED) {
1322		bp->b_qindex = QUEUE_LOCKED;
1323		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
1324
1325	/* remaining buffers */
1326	} else {
1327		if (bp->b_flags & B_DELWRI)
1328			bp->b_qindex = QUEUE_DIRTY;
1329		else
1330			bp->b_qindex = QUEUE_CLEAN;
1331		if (bp->b_flags & B_AGE)
1332			TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1333		else
1334			TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1335	}
1336
1337	/*
1338	 * If B_INVAL, clear B_DELWRI.  We've already placed the buffer
1339	 * on the correct queue.
1340	 */
1341	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) {
1342		bp->b_flags &= ~B_DELWRI;
1343		--numdirtybuffers;
1344		numdirtywakeup(lodirtybuffers);
1345	}
1346
1347	/*
1348	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
1349	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
1350	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1351	 * if B_INVAL is set ).
1352	 */
1353
1354	if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
1355		bufcountwakeup();
1356
1357	/*
1358	 * Something we can maybe free or reuse
1359	 */
1360	if (bp->b_bufsize || bp->b_kvasize)
1361		bufspacewakeup();
1362
1363	/* unlock */
1364	BUF_UNLOCK(bp);
1365	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF |
1366			B_DIRECT | B_NOWDRAIN);
1367	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1368		panic("brelse: not dirty");
1369	splx(s);
1370}
1371
1372/*
1373 * Release a buffer back to the appropriate queue but do not try to free
1374 * it.  The buffer is expected to be used again soon.
1375 *
1376 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1377 * biodone() to requeue an async I/O on completion.  It is also used when
1378 * known good buffers need to be requeued but we think we may need the data
1379 * again soon.
1380 *
1381 * XXX we should be able to leave the B_RELBUF hint set on completion.
1382 */
1383void
1384bqrelse(struct buf * bp)
1385{
1386	int s;
1387
1388	s = splbio();
1389
1390	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1391
1392	if (bp->b_qindex != QUEUE_NONE)
1393		panic("bqrelse: free buffer onto another queue???");
1394	if (BUF_REFCNT(bp) > 1) {
1395		/* do not release to free list */
1396		BUF_UNLOCK(bp);
1397		splx(s);
1398		return;
1399	}
1400	if (bp->b_flags & B_LOCKED) {
1401		bp->b_ioflags &= ~BIO_ERROR;
1402		bp->b_qindex = QUEUE_LOCKED;
1403		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
1404		/* buffers with stale but valid contents */
1405	} else if (bp->b_flags & B_DELWRI) {
1406		bp->b_qindex = QUEUE_DIRTY;
1407		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
1408	} else if (vm_page_count_severe()) {
1409		/*
1410		 * We are too low on memory, we have to try to free the
1411		 * buffer (most importantly: the wired pages making up its
1412		 * backing store) *now*.
1413		 */
1414		splx(s);
1415		brelse(bp);
1416		return;
1417	} else {
1418		bp->b_qindex = QUEUE_CLEAN;
1419		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1420	}
1421
1422	if ((bp->b_flags & B_LOCKED) == 0 &&
1423	    ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) {
1424		bufcountwakeup();
1425	}
1426
1427	/*
1428	 * Something we can maybe free or reuse.
1429	 */
1430	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1431		bufspacewakeup();
1432
1433	/* unlock */
1434	BUF_UNLOCK(bp);
1435	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1436	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1437		panic("bqrelse: not dirty");
1438	splx(s);
1439}
1440
1441/* Give pages used by the bp back to the VM system (where possible) */
1442static void
1443vfs_vmio_release(bp)
1444	struct buf *bp;
1445{
1446	int i;
1447	vm_page_t m;
1448
1449	GIANT_REQUIRED;
1450
1451	for (i = 0; i < bp->b_npages; i++) {
1452		m = bp->b_pages[i];
1453		bp->b_pages[i] = NULL;
1454		/*
1455		 * In order to keep page LRU ordering consistent, put
1456		 * everything on the inactive queue.
1457		 */
1458		vm_page_unwire(m, 0);
1459		/*
1460		 * We don't mess with busy pages, it is
1461		 * the responsibility of the process that
1462		 * busied the pages to deal with them.
1463		 */
1464		if ((m->flags & PG_BUSY) || (m->busy != 0))
1465			continue;
1466
1467		if (m->wire_count == 0) {
1468			vm_page_flag_clear(m, PG_ZERO);
1469			/*
1470			 * Might as well free the page if we can and it has
1471			 * no valid data.  We also free the page if the
1472			 * buffer was used for direct I/O
1473			 */
1474			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
1475			    m->hold_count == 0) {
1476				vm_page_busy(m);
1477				vm_page_protect(m, VM_PROT_NONE);
1478				vm_page_free(m);
1479			} else if (bp->b_flags & B_DIRECT) {
1480				vm_page_try_to_free(m);
1481			} else if (vm_page_count_severe()) {
1482				vm_page_try_to_cache(m);
1483			}
1484		}
1485	}
1486	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1487
1488	if (bp->b_bufsize) {
1489		bufspacewakeup();
1490		bp->b_bufsize = 0;
1491	}
1492	bp->b_npages = 0;
1493	bp->b_flags &= ~B_VMIO;
1494	if (bp->b_vp)
1495		brelvp(bp);
1496}
1497
1498/*
1499 * Check to see if a block is currently memory resident.
1500 */
1501struct buf *
1502gbincore(struct vnode * vp, daddr_t blkno)
1503{
1504	struct buf *bp;
1505	struct bufhashhdr *bh;
1506
1507	bh = bufhash(vp, blkno);
1508
1509	/* Search hash chain */
1510	LIST_FOREACH(bp, bh, b_hash) {
1511		/* hit */
1512		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
1513		    (bp->b_flags & B_INVAL) == 0) {
1514			break;
1515		}
1516	}
1517	return (bp);
1518}
1519
1520/*
1521 *	vfs_bio_awrite:
1522 *
1523 *	Implement clustered async writes for clearing out B_DELWRI buffers.
1524 *	This is much better then the old way of writing only one buffer at
1525 *	a time.  Note that we may not be presented with the buffers in the
1526 *	correct order, so we search for the cluster in both directions.
1527 */
1528int
1529vfs_bio_awrite(struct buf * bp)
1530{
1531	int i;
1532	int j;
1533	daddr_t lblkno = bp->b_lblkno;
1534	struct vnode *vp = bp->b_vp;
1535	int s;
1536	int ncl;
1537	struct buf *bpa;
1538	int nwritten;
1539	int size;
1540	int maxcl;
1541
1542	s = splbio();
1543	/*
1544	 * right now we support clustered writing only to regular files.  If
1545	 * we find a clusterable block we could be in the middle of a cluster
1546	 * rather then at the beginning.
1547	 */
1548	if ((vp->v_type == VREG) &&
1549	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1550	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1551
1552		size = vp->v_mount->mnt_stat.f_iosize;
1553		maxcl = MAXPHYS / size;
1554
1555		for (i = 1; i < maxcl; i++) {
1556			if ((bpa = gbincore(vp, lblkno + i)) &&
1557			    BUF_REFCNT(bpa) == 0 &&
1558			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1559			    (B_DELWRI | B_CLUSTEROK)) &&
1560			    (bpa->b_bufsize == size)) {
1561				if ((bpa->b_blkno == bpa->b_lblkno) ||
1562				    (bpa->b_blkno !=
1563				     bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
1564					break;
1565			} else {
1566				break;
1567			}
1568		}
1569		for (j = 1; i + j <= maxcl && j <= lblkno; j++) {
1570			if ((bpa = gbincore(vp, lblkno - j)) &&
1571			    BUF_REFCNT(bpa) == 0 &&
1572			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1573			    (B_DELWRI | B_CLUSTEROK)) &&
1574			    (bpa->b_bufsize == size)) {
1575				if ((bpa->b_blkno == bpa->b_lblkno) ||
1576				    (bpa->b_blkno !=
1577				     bp->b_blkno - ((j * size) >> DEV_BSHIFT)))
1578					break;
1579			} else {
1580				break;
1581			}
1582		}
1583		--j;
1584		ncl = i + j;
1585		/*
1586		 * this is a possible cluster write
1587		 */
1588		if (ncl != 1) {
1589			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
1590			splx(s);
1591			return nwritten;
1592		}
1593	}
1594
1595	BUF_LOCK(bp, LK_EXCLUSIVE);
1596	bremfree(bp);
1597	bp->b_flags |= B_ASYNC;
1598
1599	splx(s);
1600	/*
1601	 * default (old) behavior, writing out only one block
1602	 *
1603	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1604	 */
1605	nwritten = bp->b_bufsize;
1606	(void) BUF_WRITE(bp);
1607
1608	return nwritten;
1609}
1610
1611/*
1612 *	getnewbuf:
1613 *
1614 *	Find and initialize a new buffer header, freeing up existing buffers
1615 *	in the bufqueues as necessary.  The new buffer is returned locked.
1616 *
1617 *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1618 *	buffer away, the caller must set B_INVAL prior to calling brelse().
1619 *
1620 *	We block if:
1621 *		We have insufficient buffer headers
1622 *		We have insufficient buffer space
1623 *		buffer_map is too fragmented ( space reservation fails )
1624 *		If we have to flush dirty buffers ( but we try to avoid this )
1625 *
1626 *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1627 *	Instead we ask the buf daemon to do it for us.  We attempt to
1628 *	avoid piecemeal wakeups of the pageout daemon.
1629 */
1630
1631static struct buf *
1632getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
1633{
1634	struct buf *bp;
1635	struct buf *nbp;
1636	int defrag = 0;
1637	int nqindex;
1638	static int flushingbufs;
1639
1640	GIANT_REQUIRED;
1641
1642	/*
1643	 * We can't afford to block since we might be holding a vnode lock,
1644	 * which may prevent system daemons from running.  We deal with
1645	 * low-memory situations by proactively returning memory and running
1646	 * async I/O rather then sync I/O.
1647	 */
1648
1649	++getnewbufcalls;
1650	--getnewbufrestarts;
1651restart:
1652	++getnewbufrestarts;
1653
1654	/*
1655	 * Setup for scan.  If we do not have enough free buffers,
1656	 * we setup a degenerate case that immediately fails.  Note
1657	 * that if we are specially marked process, we are allowed to
1658	 * dip into our reserves.
1659	 *
1660	 * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
1661	 *
1662	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
1663	 * However, there are a number of cases (defragging, reusing, ...)
1664	 * where we cannot backup.
1665	 */
1666	nqindex = QUEUE_EMPTYKVA;
1667	nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
1668
1669	if (nbp == NULL) {
1670		/*
1671		 * If no EMPTYKVA buffers and we are either
1672		 * defragging or reusing, locate a CLEAN buffer
1673		 * to free or reuse.  If bufspace useage is low
1674		 * skip this step so we can allocate a new buffer.
1675		 */
1676		if (defrag || bufspace >= lobufspace) {
1677			nqindex = QUEUE_CLEAN;
1678			nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
1679		}
1680
1681		/*
1682		 * If we could not find or were not allowed to reuse a
1683		 * CLEAN buffer, check to see if it is ok to use an EMPTY
1684		 * buffer.  We can only use an EMPTY buffer if allocating
1685		 * its KVA would not otherwise run us out of buffer space.
1686		 */
1687		if (nbp == NULL && defrag == 0 &&
1688		    bufspace + maxsize < hibufspace) {
1689			nqindex = QUEUE_EMPTY;
1690			nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1691		}
1692	}
1693
1694	/*
1695	 * Run scan, possibly freeing data and/or kva mappings on the fly
1696	 * depending.
1697	 */
1698
1699	while ((bp = nbp) != NULL) {
1700		int qindex = nqindex;
1701
1702		/*
1703		 * Calculate next bp ( we can only use it if we do not block
1704		 * or do other fancy things ).
1705		 */
1706		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1707			switch(qindex) {
1708			case QUEUE_EMPTY:
1709				nqindex = QUEUE_EMPTYKVA;
1710				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
1711					break;
1712				/* fall through */
1713			case QUEUE_EMPTYKVA:
1714				nqindex = QUEUE_CLEAN;
1715				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
1716					break;
1717				/* fall through */
1718			case QUEUE_CLEAN:
1719				/*
1720				 * nbp is NULL.
1721				 */
1722				break;
1723			}
1724		}
1725
1726		/*
1727		 * Sanity Checks
1728		 */
1729		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1730
1731		/*
1732		 * Note: we no longer distinguish between VMIO and non-VMIO
1733		 * buffers.
1734		 */
1735
1736		KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
1737
1738		/*
1739		 * If we are defragging then we need a buffer with
1740		 * b_kvasize != 0.  XXX this situation should no longer
1741		 * occur, if defrag is non-zero the buffer's b_kvasize
1742		 * should also be non-zero at this point.  XXX
1743		 */
1744		if (defrag && bp->b_kvasize == 0) {
1745			printf("Warning: defrag empty buffer %p\n", bp);
1746			continue;
1747		}
1748
1749		/*
1750		 * Start freeing the bp.  This is somewhat involved.  nbp
1751		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1752		 */
1753
1754		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
1755			panic("getnewbuf: locked buf");
1756		bremfree(bp);
1757
1758		if (qindex == QUEUE_CLEAN) {
1759			if (bp->b_flags & B_VMIO) {
1760				bp->b_flags &= ~B_ASYNC;
1761				vfs_vmio_release(bp);
1762			}
1763			if (bp->b_vp)
1764				brelvp(bp);
1765		}
1766
1767		/*
1768		 * NOTE:  nbp is now entirely invalid.  We can only restart
1769		 * the scan from this point on.
1770		 *
1771		 * Get the rest of the buffer freed up.  b_kva* is still
1772		 * valid after this operation.
1773		 */
1774
1775		if (bp->b_rcred != NOCRED) {
1776			crfree(bp->b_rcred);
1777			bp->b_rcred = NOCRED;
1778		}
1779		if (bp->b_wcred != NOCRED) {
1780			crfree(bp->b_wcred);
1781			bp->b_wcred = NOCRED;
1782		}
1783		if (LIST_FIRST(&bp->b_dep) != NULL)
1784			buf_deallocate(bp);
1785		if (bp->b_xflags & BX_BKGRDINPROG)
1786			panic("losing buffer 3");
1787		LIST_REMOVE(bp, b_hash);
1788		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1789
1790		if (bp->b_bufsize)
1791			allocbuf(bp, 0);
1792
1793		bp->b_flags = 0;
1794		bp->b_ioflags = 0;
1795		bp->b_xflags = 0;
1796		bp->b_dev = NODEV;
1797		bp->b_vp = NULL;
1798		bp->b_blkno = bp->b_lblkno = 0;
1799		bp->b_offset = NOOFFSET;
1800		bp->b_iodone = 0;
1801		bp->b_error = 0;
1802		bp->b_resid = 0;
1803		bp->b_bcount = 0;
1804		bp->b_npages = 0;
1805		bp->b_dirtyoff = bp->b_dirtyend = 0;
1806		bp->b_magic = B_MAGIC_BIO;
1807		bp->b_op = &buf_ops_bio;
1808
1809		LIST_INIT(&bp->b_dep);
1810
1811		/*
1812		 * If we are defragging then free the buffer.
1813		 */
1814		if (defrag) {
1815			bp->b_flags |= B_INVAL;
1816			bfreekva(bp);
1817			brelse(bp);
1818			defrag = 0;
1819			goto restart;
1820		}
1821
1822		/*
1823		 * If we are overcomitted then recover the buffer and its
1824		 * KVM space.  This occurs in rare situations when multiple
1825		 * processes are blocked in getnewbuf() or allocbuf().
1826		 */
1827		if (bufspace >= hibufspace)
1828			flushingbufs = 1;
1829		if (flushingbufs && bp->b_kvasize != 0) {
1830			bp->b_flags |= B_INVAL;
1831			bfreekva(bp);
1832			brelse(bp);
1833			goto restart;
1834		}
1835		if (bufspace < lobufspace)
1836			flushingbufs = 0;
1837		break;
1838	}
1839
1840	/*
1841	 * If we exhausted our list, sleep as appropriate.  We may have to
1842	 * wakeup various daemons and write out some dirty buffers.
1843	 *
1844	 * Generally we are sleeping due to insufficient buffer space.
1845	 */
1846
1847	if (bp == NULL) {
1848		int flags;
1849		char *waitmsg;
1850
1851		if (defrag) {
1852			flags = VFS_BIO_NEED_BUFSPACE;
1853			waitmsg = "nbufkv";
1854		} else if (bufspace >= hibufspace) {
1855			waitmsg = "nbufbs";
1856			flags = VFS_BIO_NEED_BUFSPACE;
1857		} else {
1858			waitmsg = "newbuf";
1859			flags = VFS_BIO_NEED_ANY;
1860		}
1861
1862		bd_speedup();	/* heeeelp */
1863
1864		needsbuffer |= flags;
1865		while (needsbuffer & flags) {
1866			if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
1867			    waitmsg, slptimeo))
1868				return (NULL);
1869		}
1870	} else {
1871		/*
1872		 * We finally have a valid bp.  We aren't quite out of the
1873		 * woods, we still have to reserve kva space.  In order
1874		 * to keep fragmentation sane we only allocate kva in
1875		 * BKVASIZE chunks.
1876		 */
1877		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
1878
1879		if (maxsize != bp->b_kvasize) {
1880			vm_offset_t addr = 0;
1881
1882			bfreekva(bp);
1883
1884			if (vm_map_findspace(buffer_map,
1885				vm_map_min(buffer_map), maxsize, &addr)) {
1886				/*
1887				 * Uh oh.  Buffer map is to fragmented.  We
1888				 * must defragment the map.
1889				 */
1890				++bufdefragcnt;
1891				defrag = 1;
1892				bp->b_flags |= B_INVAL;
1893				brelse(bp);
1894				goto restart;
1895			}
1896			if (addr) {
1897				vm_map_insert(buffer_map, NULL, 0,
1898					addr, addr + maxsize,
1899					VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1900
1901				bp->b_kvabase = (caddr_t) addr;
1902				bp->b_kvasize = maxsize;
1903				bufspace += bp->b_kvasize;
1904				++bufreusecnt;
1905			}
1906		}
1907		bp->b_data = bp->b_kvabase;
1908	}
1909	return(bp);
1910}
1911
1912/*
1913 *	buf_daemon:
1914 *
1915 *	buffer flushing daemon.  Buffers are normally flushed by the
1916 *	update daemon but if it cannot keep up this process starts to
1917 *	take the load in an attempt to prevent getnewbuf() from blocking.
1918 */
1919
1920static struct proc *bufdaemonproc;
1921
1922static struct kproc_desc buf_kp = {
1923	"bufdaemon",
1924	buf_daemon,
1925	&bufdaemonproc
1926};
1927SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
1928
1929static void
1930buf_daemon()
1931{
1932	int s;
1933
1934	mtx_lock(&Giant);
1935
1936	/*
1937	 * This process needs to be suspended prior to shutdown sync.
1938	 */
1939	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
1940	    SHUTDOWN_PRI_LAST);
1941
1942	/*
1943	 * This process is allowed to take the buffer cache to the limit
1944	 */
1945	s = splbio();
1946
1947	for (;;) {
1948		kthread_suspend_check(bufdaemonproc);
1949
1950		bd_request = 0;
1951
1952		/*
1953		 * Do the flush.  Limit the amount of in-transit I/O we
1954		 * allow to build up, otherwise we would completely saturate
1955		 * the I/O system.  Wakeup any waiting processes before we
1956		 * normally would so they can run in parallel with our drain.
1957		 */
1958		while (numdirtybuffers > lodirtybuffers) {
1959			if (flushbufqueues() == 0)
1960				break;
1961			waitrunningbufspace();
1962			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
1963		}
1964
1965		/*
1966		 * Only clear bd_request if we have reached our low water
1967		 * mark.  The buf_daemon normally waits 1 second and
1968		 * then incrementally flushes any dirty buffers that have
1969		 * built up, within reason.
1970		 *
1971		 * If we were unable to hit our low water mark and couldn't
1972		 * find any flushable buffers, we sleep half a second.
1973		 * Otherwise we loop immediately.
1974		 */
1975		if (numdirtybuffers <= lodirtybuffers) {
1976			/*
1977			 * We reached our low water mark, reset the
1978			 * request and sleep until we are needed again.
1979			 * The sleep is just so the suspend code works.
1980			 */
1981			bd_request = 0;
1982			tsleep(&bd_request, PVM, "psleep", hz);
1983		} else {
1984			/*
1985			 * We couldn't find any flushable dirty buffers but
1986			 * still have too many dirty buffers, we
1987			 * have to sleep and try again.  (rare)
1988			 */
1989			tsleep(&bd_request, PVM, "qsleep", hz / 2);
1990		}
1991	}
1992}
1993
1994/*
1995 *	flushbufqueues:
1996 *
1997 *	Try to flush a buffer in the dirty queue.  We must be careful to
1998 *	free up B_INVAL buffers instead of write them, which NFS is
1999 *	particularly sensitive to.
2000 */
2001
2002static int
2003flushbufqueues(void)
2004{
2005	struct buf *bp;
2006	int r = 0;
2007
2008	bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]);
2009
2010	while (bp) {
2011		KASSERT((bp->b_flags & B_DELWRI), ("unexpected clean buffer %p", bp));
2012		if ((bp->b_flags & B_DELWRI) != 0 &&
2013		    (bp->b_xflags & BX_BKGRDINPROG) == 0) {
2014			if (bp->b_flags & B_INVAL) {
2015				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
2016					panic("flushbufqueues: locked buf");
2017				bremfree(bp);
2018				brelse(bp);
2019				++r;
2020				break;
2021			}
2022			if (LIST_FIRST(&bp->b_dep) != NULL &&
2023			    (bp->b_flags & B_DEFERRED) == 0 &&
2024			    buf_countdeps(bp, 0)) {
2025				TAILQ_REMOVE(&bufqueues[QUEUE_DIRTY],
2026				    bp, b_freelist);
2027				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY],
2028				    bp, b_freelist);
2029				bp->b_flags |= B_DEFERRED;
2030				bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]);
2031				continue;
2032			}
2033			vfs_bio_awrite(bp);
2034			++r;
2035			break;
2036		}
2037		bp = TAILQ_NEXT(bp, b_freelist);
2038	}
2039	return (r);
2040}
2041
2042/*
2043 * Check to see if a block is currently memory resident.
2044 */
2045struct buf *
2046incore(struct vnode * vp, daddr_t blkno)
2047{
2048	struct buf *bp;
2049
2050	int s = splbio();
2051	bp = gbincore(vp, blkno);
2052	splx(s);
2053	return (bp);
2054}
2055
2056/*
2057 * Returns true if no I/O is needed to access the
2058 * associated VM object.  This is like incore except
2059 * it also hunts around in the VM system for the data.
2060 */
2061
2062int
2063inmem(struct vnode * vp, daddr_t blkno)
2064{
2065	vm_object_t obj;
2066	vm_offset_t toff, tinc, size;
2067	vm_page_t m;
2068	vm_ooffset_t off;
2069
2070	GIANT_REQUIRED;
2071
2072	if (incore(vp, blkno))
2073		return 1;
2074	if (vp->v_mount == NULL)
2075		return 0;
2076	if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0)
2077		return 0;
2078
2079	size = PAGE_SIZE;
2080	if (size > vp->v_mount->mnt_stat.f_iosize)
2081		size = vp->v_mount->mnt_stat.f_iosize;
2082	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2083
2084	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2085		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2086		if (!m)
2087			goto notinmem;
2088		tinc = size;
2089		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2090			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2091		if (vm_page_is_valid(m,
2092		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2093			goto notinmem;
2094	}
2095	return 1;
2096
2097notinmem:
2098	return (0);
2099}
2100
2101/*
2102 *	vfs_setdirty:
2103 *
2104 *	Sets the dirty range for a buffer based on the status of the dirty
2105 *	bits in the pages comprising the buffer.
2106 *
2107 *	The range is limited to the size of the buffer.
2108 *
2109 *	This routine is primarily used by NFS, but is generalized for the
2110 *	B_VMIO case.
2111 */
2112static void
2113vfs_setdirty(struct buf *bp)
2114{
2115	int i;
2116	vm_object_t object;
2117
2118	GIANT_REQUIRED;
2119	/*
2120	 * Degenerate case - empty buffer
2121	 */
2122
2123	if (bp->b_bufsize == 0)
2124		return;
2125
2126	/*
2127	 * We qualify the scan for modified pages on whether the
2128	 * object has been flushed yet.  The OBJ_WRITEABLE flag
2129	 * is not cleared simply by protecting pages off.
2130	 */
2131
2132	if ((bp->b_flags & B_VMIO) == 0)
2133		return;
2134
2135	object = bp->b_pages[0]->object;
2136
2137	if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
2138		printf("Warning: object %p writeable but not mightbedirty\n", object);
2139	if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
2140		printf("Warning: object %p mightbedirty but not writeable\n", object);
2141
2142	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
2143		vm_offset_t boffset;
2144		vm_offset_t eoffset;
2145
2146		/*
2147		 * test the pages to see if they have been modified directly
2148		 * by users through the VM system.
2149		 */
2150		for (i = 0; i < bp->b_npages; i++) {
2151			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
2152			vm_page_test_dirty(bp->b_pages[i]);
2153		}
2154
2155		/*
2156		 * Calculate the encompassing dirty range, boffset and eoffset,
2157		 * (eoffset - boffset) bytes.
2158		 */
2159
2160		for (i = 0; i < bp->b_npages; i++) {
2161			if (bp->b_pages[i]->dirty)
2162				break;
2163		}
2164		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2165
2166		for (i = bp->b_npages - 1; i >= 0; --i) {
2167			if (bp->b_pages[i]->dirty) {
2168				break;
2169			}
2170		}
2171		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2172
2173		/*
2174		 * Fit it to the buffer.
2175		 */
2176
2177		if (eoffset > bp->b_bcount)
2178			eoffset = bp->b_bcount;
2179
2180		/*
2181		 * If we have a good dirty range, merge with the existing
2182		 * dirty range.
2183		 */
2184
2185		if (boffset < eoffset) {
2186			if (bp->b_dirtyoff > boffset)
2187				bp->b_dirtyoff = boffset;
2188			if (bp->b_dirtyend < eoffset)
2189				bp->b_dirtyend = eoffset;
2190		}
2191	}
2192}
2193
2194/*
2195 *	getblk:
2196 *
2197 *	Get a block given a specified block and offset into a file/device.
2198 *	The buffers B_DONE bit will be cleared on return, making it almost
2199 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
2200 *	return.  The caller should clear B_INVAL prior to initiating a
2201 *	READ.
2202 *
2203 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2204 *	an existing buffer.
2205 *
2206 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
2207 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2208 *	and then cleared based on the backing VM.  If the previous buffer is
2209 *	non-0-sized but invalid, B_CACHE will be cleared.
2210 *
2211 *	If getblk() must create a new buffer, the new buffer is returned with
2212 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2213 *	case it is returned with B_INVAL clear and B_CACHE set based on the
2214 *	backing VM.
2215 *
2216 *	getblk() also forces a BUF_WRITE() for any B_DELWRI buffer whos
2217 *	B_CACHE bit is clear.
2218 *
2219 *	What this means, basically, is that the caller should use B_CACHE to
2220 *	determine whether the buffer is fully valid or not and should clear
2221 *	B_INVAL prior to issuing a read.  If the caller intends to validate
2222 *	the buffer by loading its data area with something, the caller needs
2223 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
2224 *	the caller should set B_CACHE ( as an optimization ), else the caller
2225 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
2226 *	a write attempt or if it was a successfull read.  If the caller
2227 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
2228 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
2229 */
2230struct buf *
2231getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
2232{
2233	struct buf *bp;
2234	int s;
2235	struct bufhashhdr *bh;
2236
2237	if (size > MAXBSIZE)
2238		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
2239
2240	s = splbio();
2241loop:
2242	/*
2243	 * Block if we are low on buffers.   Certain processes are allowed
2244	 * to completely exhaust the buffer cache.
2245         *
2246         * If this check ever becomes a bottleneck it may be better to
2247         * move it into the else, when gbincore() fails.  At the moment
2248         * it isn't a problem.
2249	 *
2250	 * XXX remove if 0 sections (clean this up after its proven)
2251         */
2252	if (numfreebuffers == 0) {
2253		if (curthread == PCPU_GET(idlethread))
2254			return NULL;
2255		needsbuffer |= VFS_BIO_NEED_ANY;
2256	}
2257
2258	if ((bp = gbincore(vp, blkno))) {
2259		/*
2260		 * Buffer is in-core.  If the buffer is not busy, it must
2261		 * be on a queue.
2262		 */
2263
2264		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2265			if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2266			    "getblk", slpflag, slptimeo) == ENOLCK)
2267				goto loop;
2268			splx(s);
2269			return (struct buf *) NULL;
2270		}
2271
2272		/*
2273		 * The buffer is locked.  B_CACHE is cleared if the buffer is
2274		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
2275		 * and for a VMIO buffer B_CACHE is adjusted according to the
2276		 * backing VM cache.
2277		 */
2278		if (bp->b_flags & B_INVAL)
2279			bp->b_flags &= ~B_CACHE;
2280		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
2281			bp->b_flags |= B_CACHE;
2282		bremfree(bp);
2283
2284		/*
2285		 * check for size inconsistancies for non-VMIO case.
2286		 */
2287
2288		if (bp->b_bcount != size) {
2289			if ((bp->b_flags & B_VMIO) == 0 ||
2290			    (size > bp->b_kvasize)) {
2291				if (bp->b_flags & B_DELWRI) {
2292					bp->b_flags |= B_NOCACHE;
2293					BUF_WRITE(bp);
2294				} else {
2295					if ((bp->b_flags & B_VMIO) &&
2296					   (LIST_FIRST(&bp->b_dep) == NULL)) {
2297						bp->b_flags |= B_RELBUF;
2298						brelse(bp);
2299					} else {
2300						bp->b_flags |= B_NOCACHE;
2301						BUF_WRITE(bp);
2302					}
2303				}
2304				goto loop;
2305			}
2306		}
2307
2308		/*
2309		 * If the size is inconsistant in the VMIO case, we can resize
2310		 * the buffer.  This might lead to B_CACHE getting set or
2311		 * cleared.  If the size has not changed, B_CACHE remains
2312		 * unchanged from its previous state.
2313		 */
2314
2315		if (bp->b_bcount != size)
2316			allocbuf(bp, size);
2317
2318		KASSERT(bp->b_offset != NOOFFSET,
2319		    ("getblk: no buffer offset"));
2320
2321		/*
2322		 * A buffer with B_DELWRI set and B_CACHE clear must
2323		 * be committed before we can return the buffer in
2324		 * order to prevent the caller from issuing a read
2325		 * ( due to B_CACHE not being set ) and overwriting
2326		 * it.
2327		 *
2328		 * Most callers, including NFS and FFS, need this to
2329		 * operate properly either because they assume they
2330		 * can issue a read if B_CACHE is not set, or because
2331		 * ( for example ) an uncached B_DELWRI might loop due
2332		 * to softupdates re-dirtying the buffer.  In the latter
2333		 * case, B_CACHE is set after the first write completes,
2334		 * preventing further loops.
2335		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
2336		 * above while extending the buffer, we cannot allow the
2337		 * buffer to remain with B_CACHE set after the write
2338		 * completes or it will represent a corrupt state.  To
2339		 * deal with this we set B_NOCACHE to scrap the buffer
2340		 * after the write.
2341		 *
2342		 * We might be able to do something fancy, like setting
2343		 * B_CACHE in bwrite() except if B_DELWRI is already set,
2344		 * so the below call doesn't set B_CACHE, but that gets real
2345		 * confusing.  This is much easier.
2346		 */
2347
2348		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
2349			bp->b_flags |= B_NOCACHE;
2350			BUF_WRITE(bp);
2351			goto loop;
2352		}
2353
2354		splx(s);
2355		bp->b_flags &= ~B_DONE;
2356	} else {
2357		/*
2358		 * Buffer is not in-core, create new buffer.  The buffer
2359		 * returned by getnewbuf() is locked.  Note that the returned
2360		 * buffer is also considered valid (not marked B_INVAL).
2361		 */
2362		int bsize, maxsize, vmio;
2363		off_t offset;
2364
2365		if (vn_isdisk(vp, NULL))
2366			bsize = DEV_BSIZE;
2367		else if (vp->v_mountedhere)
2368			bsize = vp->v_mountedhere->mnt_stat.f_iosize;
2369		else if (vp->v_mount)
2370			bsize = vp->v_mount->mnt_stat.f_iosize;
2371		else
2372			bsize = size;
2373
2374		offset = (off_t)blkno * bsize;
2375		vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF);
2376		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
2377		maxsize = imax(maxsize, bsize);
2378
2379		if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) {
2380			if (slpflag || slptimeo) {
2381				splx(s);
2382				return NULL;
2383			}
2384			goto loop;
2385		}
2386
2387		/*
2388		 * This code is used to make sure that a buffer is not
2389		 * created while the getnewbuf routine is blocked.
2390		 * This can be a problem whether the vnode is locked or not.
2391		 * If the buffer is created out from under us, we have to
2392		 * throw away the one we just created.  There is now window
2393		 * race because we are safely running at splbio() from the
2394		 * point of the duplicate buffer creation through to here,
2395		 * and we've locked the buffer.
2396		 */
2397		if (gbincore(vp, blkno)) {
2398			bp->b_flags |= B_INVAL;
2399			brelse(bp);
2400			goto loop;
2401		}
2402
2403		/*
2404		 * Insert the buffer into the hash, so that it can
2405		 * be found by incore.
2406		 */
2407		bp->b_blkno = bp->b_lblkno = blkno;
2408		bp->b_offset = offset;
2409
2410		bgetvp(vp, bp);
2411		LIST_REMOVE(bp, b_hash);
2412		bh = bufhash(vp, blkno);
2413		LIST_INSERT_HEAD(bh, bp, b_hash);
2414
2415		/*
2416		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2417		 * buffer size starts out as 0, B_CACHE will be set by
2418		 * allocbuf() for the VMIO case prior to it testing the
2419		 * backing store for validity.
2420		 */
2421
2422		if (vmio) {
2423			bp->b_flags |= B_VMIO;
2424#if defined(VFS_BIO_DEBUG)
2425			if (vp->v_type != VREG)
2426				printf("getblk: vmioing file type %d???\n", vp->v_type);
2427#endif
2428		} else {
2429			bp->b_flags &= ~B_VMIO;
2430		}
2431
2432		allocbuf(bp, size);
2433
2434		splx(s);
2435		bp->b_flags &= ~B_DONE;
2436	}
2437	return (bp);
2438}
2439
2440/*
2441 * Get an empty, disassociated buffer of given size.  The buffer is initially
2442 * set to B_INVAL.
2443 */
2444struct buf *
2445geteblk(int size)
2446{
2447	struct buf *bp;
2448	int s;
2449	int maxsize;
2450
2451	maxsize = (size + BKVAMASK) & ~BKVAMASK;
2452
2453	s = splbio();
2454	while ((bp = getnewbuf(0, 0, size, maxsize)) == 0);
2455	splx(s);
2456	allocbuf(bp, size);
2457	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2458	return (bp);
2459}
2460
2461
2462/*
2463 * This code constitutes the buffer memory from either anonymous system
2464 * memory (in the case of non-VMIO operations) or from an associated
2465 * VM object (in the case of VMIO operations).  This code is able to
2466 * resize a buffer up or down.
2467 *
2468 * Note that this code is tricky, and has many complications to resolve
2469 * deadlock or inconsistant data situations.  Tread lightly!!!
2470 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2471 * the caller.  Calling this code willy nilly can result in the loss of data.
2472 *
2473 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2474 * B_CACHE for the non-VMIO case.
2475 */
2476
2477int
2478allocbuf(struct buf *bp, int size)
2479{
2480	int newbsize, mbsize;
2481	int i;
2482
2483	GIANT_REQUIRED;
2484
2485	if (BUF_REFCNT(bp) == 0)
2486		panic("allocbuf: buffer not busy");
2487
2488	if (bp->b_kvasize < size)
2489		panic("allocbuf: buffer too small");
2490
2491	if ((bp->b_flags & B_VMIO) == 0) {
2492		caddr_t origbuf;
2493		int origbufsize;
2494		/*
2495		 * Just get anonymous memory from the kernel.  Don't
2496		 * mess with B_CACHE.
2497		 */
2498		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2499#if !defined(NO_B_MALLOC)
2500		if (bp->b_flags & B_MALLOC)
2501			newbsize = mbsize;
2502		else
2503#endif
2504			newbsize = round_page(size);
2505
2506		if (newbsize < bp->b_bufsize) {
2507#if !defined(NO_B_MALLOC)
2508			/*
2509			 * malloced buffers are not shrunk
2510			 */
2511			if (bp->b_flags & B_MALLOC) {
2512				if (newbsize) {
2513					bp->b_bcount = size;
2514				} else {
2515					free(bp->b_data, M_BIOBUF);
2516					if (bp->b_bufsize) {
2517						bufmallocspace -= bp->b_bufsize;
2518						bufspacewakeup();
2519						bp->b_bufsize = 0;
2520					}
2521					bp->b_data = bp->b_kvabase;
2522					bp->b_bcount = 0;
2523					bp->b_flags &= ~B_MALLOC;
2524				}
2525				return 1;
2526			}
2527#endif
2528			vm_hold_free_pages(
2529			    bp,
2530			    (vm_offset_t) bp->b_data + newbsize,
2531			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2532		} else if (newbsize > bp->b_bufsize) {
2533#if !defined(NO_B_MALLOC)
2534			/*
2535			 * We only use malloced memory on the first allocation.
2536			 * and revert to page-allocated memory when the buffer
2537			 * grows.
2538			 */
2539			if ( (bufmallocspace < maxbufmallocspace) &&
2540				(bp->b_bufsize == 0) &&
2541				(mbsize <= PAGE_SIZE/2)) {
2542
2543				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2544				bp->b_bufsize = mbsize;
2545				bp->b_bcount = size;
2546				bp->b_flags |= B_MALLOC;
2547				bufmallocspace += mbsize;
2548				return 1;
2549			}
2550#endif
2551			origbuf = NULL;
2552			origbufsize = 0;
2553#if !defined(NO_B_MALLOC)
2554			/*
2555			 * If the buffer is growing on its other-than-first allocation,
2556			 * then we revert to the page-allocation scheme.
2557			 */
2558			if (bp->b_flags & B_MALLOC) {
2559				origbuf = bp->b_data;
2560				origbufsize = bp->b_bufsize;
2561				bp->b_data = bp->b_kvabase;
2562				if (bp->b_bufsize) {
2563					bufmallocspace -= bp->b_bufsize;
2564					bufspacewakeup();
2565					bp->b_bufsize = 0;
2566				}
2567				bp->b_flags &= ~B_MALLOC;
2568				newbsize = round_page(newbsize);
2569			}
2570#endif
2571			vm_hold_load_pages(
2572			    bp,
2573			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2574			    (vm_offset_t) bp->b_data + newbsize);
2575#if !defined(NO_B_MALLOC)
2576			if (origbuf) {
2577				bcopy(origbuf, bp->b_data, origbufsize);
2578				free(origbuf, M_BIOBUF);
2579			}
2580#endif
2581		}
2582	} else {
2583		vm_page_t m;
2584		int desiredpages;
2585
2586		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2587		desiredpages = (size == 0) ? 0 :
2588			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2589
2590#if !defined(NO_B_MALLOC)
2591		if (bp->b_flags & B_MALLOC)
2592			panic("allocbuf: VMIO buffer can't be malloced");
2593#endif
2594		/*
2595		 * Set B_CACHE initially if buffer is 0 length or will become
2596		 * 0-length.
2597		 */
2598		if (size == 0 || bp->b_bufsize == 0)
2599			bp->b_flags |= B_CACHE;
2600
2601		if (newbsize < bp->b_bufsize) {
2602			/*
2603			 * DEV_BSIZE aligned new buffer size is less then the
2604			 * DEV_BSIZE aligned existing buffer size.  Figure out
2605			 * if we have to remove any pages.
2606			 */
2607			if (desiredpages < bp->b_npages) {
2608				for (i = desiredpages; i < bp->b_npages; i++) {
2609					/*
2610					 * the page is not freed here -- it
2611					 * is the responsibility of
2612					 * vnode_pager_setsize
2613					 */
2614					m = bp->b_pages[i];
2615					KASSERT(m != bogus_page,
2616					    ("allocbuf: bogus page found"));
2617					while (vm_page_sleep_busy(m, TRUE, "biodep"))
2618						;
2619
2620					bp->b_pages[i] = NULL;
2621					vm_page_unwire(m, 0);
2622				}
2623				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2624				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2625				bp->b_npages = desiredpages;
2626			}
2627		} else if (size > bp->b_bcount) {
2628			/*
2629			 * We are growing the buffer, possibly in a
2630			 * byte-granular fashion.
2631			 */
2632			struct vnode *vp;
2633			vm_object_t obj;
2634			vm_offset_t toff;
2635			vm_offset_t tinc;
2636
2637			/*
2638			 * Step 1, bring in the VM pages from the object,
2639			 * allocating them if necessary.  We must clear
2640			 * B_CACHE if these pages are not valid for the
2641			 * range covered by the buffer.
2642			 */
2643
2644			vp = bp->b_vp;
2645			VOP_GETVOBJECT(vp, &obj);
2646
2647			while (bp->b_npages < desiredpages) {
2648				vm_page_t m;
2649				vm_pindex_t pi;
2650
2651				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2652				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2653					/*
2654					 * note: must allocate system pages
2655					 * since blocking here could intefere
2656					 * with paging I/O, no matter which
2657					 * process we are.
2658					 */
2659					m = vm_page_alloc(obj, pi, VM_ALLOC_SYSTEM);
2660					if (m == NULL) {
2661						VM_WAIT;
2662						vm_pageout_deficit += desiredpages - bp->b_npages;
2663					} else {
2664						vm_page_wire(m);
2665						vm_page_wakeup(m);
2666						bp->b_flags &= ~B_CACHE;
2667						bp->b_pages[bp->b_npages] = m;
2668						++bp->b_npages;
2669					}
2670					continue;
2671				}
2672
2673				/*
2674				 * We found a page.  If we have to sleep on it,
2675				 * retry because it might have gotten freed out
2676				 * from under us.
2677				 *
2678				 * We can only test PG_BUSY here.  Blocking on
2679				 * m->busy might lead to a deadlock:
2680				 *
2681				 *  vm_fault->getpages->cluster_read->allocbuf
2682				 *
2683				 */
2684
2685				if (vm_page_sleep_busy(m, FALSE, "pgtblk"))
2686					continue;
2687
2688				/*
2689				 * We have a good page.  Should we wakeup the
2690				 * page daemon?
2691				 */
2692				if ((curproc != pageproc) &&
2693				    ((m->queue - m->pc) == PQ_CACHE) &&
2694				    ((cnt.v_free_count + cnt.v_cache_count) <
2695					(cnt.v_free_min + cnt.v_cache_min))) {
2696					pagedaemon_wakeup();
2697				}
2698				vm_page_flag_clear(m, PG_ZERO);
2699				vm_page_wire(m);
2700				bp->b_pages[bp->b_npages] = m;
2701				++bp->b_npages;
2702			}
2703
2704			/*
2705			 * Step 2.  We've loaded the pages into the buffer,
2706			 * we have to figure out if we can still have B_CACHE
2707			 * set.  Note that B_CACHE is set according to the
2708			 * byte-granular range ( bcount and size ), new the
2709			 * aligned range ( newbsize ).
2710			 *
2711			 * The VM test is against m->valid, which is DEV_BSIZE
2712			 * aligned.  Needless to say, the validity of the data
2713			 * needs to also be DEV_BSIZE aligned.  Note that this
2714			 * fails with NFS if the server or some other client
2715			 * extends the file's EOF.  If our buffer is resized,
2716			 * B_CACHE may remain set! XXX
2717			 */
2718
2719			toff = bp->b_bcount;
2720			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2721
2722			while ((bp->b_flags & B_CACHE) && toff < size) {
2723				vm_pindex_t pi;
2724
2725				if (tinc > (size - toff))
2726					tinc = size - toff;
2727
2728				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2729				    PAGE_SHIFT;
2730
2731				vfs_buf_test_cache(
2732				    bp,
2733				    bp->b_offset,
2734				    toff,
2735				    tinc,
2736				    bp->b_pages[pi]
2737				);
2738				toff += tinc;
2739				tinc = PAGE_SIZE;
2740			}
2741
2742			/*
2743			 * Step 3, fixup the KVM pmap.  Remember that
2744			 * bp->b_data is relative to bp->b_offset, but
2745			 * bp->b_offset may be offset into the first page.
2746			 */
2747
2748			bp->b_data = (caddr_t)
2749			    trunc_page((vm_offset_t)bp->b_data);
2750			pmap_qenter(
2751			    (vm_offset_t)bp->b_data,
2752			    bp->b_pages,
2753			    bp->b_npages
2754			);
2755
2756			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2757			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
2758		}
2759	}
2760	if (newbsize < bp->b_bufsize)
2761		bufspacewakeup();
2762	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
2763	bp->b_bcount = size;		/* requested buffer size	*/
2764	return 1;
2765}
2766
2767/*
2768 *	bufwait:
2769 *
2770 *	Wait for buffer I/O completion, returning error status.  The buffer
2771 *	is left locked and B_DONE on return.  B_EINTR is converted into a EINTR
2772 *	error and cleared.
2773 */
2774int
2775bufwait(register struct buf * bp)
2776{
2777	int s;
2778
2779	s = splbio();
2780	while ((bp->b_flags & B_DONE) == 0) {
2781		if (bp->b_iocmd == BIO_READ)
2782			tsleep(bp, PRIBIO, "biord", 0);
2783		else
2784			tsleep(bp, PRIBIO, "biowr", 0);
2785	}
2786	splx(s);
2787	if (bp->b_flags & B_EINTR) {
2788		bp->b_flags &= ~B_EINTR;
2789		return (EINTR);
2790	}
2791	if (bp->b_ioflags & BIO_ERROR) {
2792		return (bp->b_error ? bp->b_error : EIO);
2793	} else {
2794		return (0);
2795	}
2796}
2797
2798 /*
2799  * Call back function from struct bio back up to struct buf.
2800  * The corresponding initialization lives in sys/conf.h:DEV_STRATEGY().
2801  */
2802void
2803bufdonebio(struct bio *bp)
2804{
2805	bufdone(bp->bio_caller2);
2806}
2807
2808/*
2809 *	bufdone:
2810 *
2811 *	Finish I/O on a buffer, optionally calling a completion function.
2812 *	This is usually called from an interrupt so process blocking is
2813 *	not allowed.
2814 *
2815 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
2816 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
2817 *	assuming B_INVAL is clear.
2818 *
2819 *	For the VMIO case, we set B_CACHE if the op was a read and no
2820 *	read error occured, or if the op was a write.  B_CACHE is never
2821 *	set if the buffer is invalid or otherwise uncacheable.
2822 *
2823 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
2824 *	initiator to leave B_INVAL set to brelse the buffer out of existance
2825 *	in the biodone routine.
2826 */
2827void
2828bufdone(struct buf *bp)
2829{
2830	int s, error;
2831	void    (*biodone) __P((struct buf *));
2832
2833	GIANT_REQUIRED;
2834
2835	s = splbio();
2836
2837	KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
2838	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
2839
2840	bp->b_flags |= B_DONE;
2841	runningbufwakeup(bp);
2842
2843	if (bp->b_iocmd == BIO_DELETE) {
2844		brelse(bp);
2845		splx(s);
2846		return;
2847	}
2848
2849	if (bp->b_iocmd == BIO_WRITE) {
2850		vwakeup(bp);
2851	}
2852
2853	/* call optional completion function if requested */
2854	if (bp->b_iodone != NULL) {
2855		biodone = bp->b_iodone;
2856		bp->b_iodone = NULL;
2857		(*biodone) (bp);
2858		splx(s);
2859		return;
2860	}
2861	if (LIST_FIRST(&bp->b_dep) != NULL)
2862		buf_complete(bp);
2863
2864	if (bp->b_flags & B_VMIO) {
2865		int i;
2866		vm_ooffset_t foff;
2867		vm_page_t m;
2868		vm_object_t obj;
2869		int iosize;
2870		struct vnode *vp = bp->b_vp;
2871
2872		error = VOP_GETVOBJECT(vp, &obj);
2873
2874#if defined(VFS_BIO_DEBUG)
2875		if (vp->v_usecount == 0) {
2876			panic("biodone: zero vnode ref count");
2877		}
2878
2879		if (error) {
2880			panic("biodone: missing VM object");
2881		}
2882
2883		if ((vp->v_flag & VOBJBUF) == 0) {
2884			panic("biodone: vnode is not setup for merged cache");
2885		}
2886#endif
2887
2888		foff = bp->b_offset;
2889		KASSERT(bp->b_offset != NOOFFSET,
2890		    ("biodone: no buffer offset"));
2891
2892		if (error) {
2893			panic("biodone: no object");
2894		}
2895#if defined(VFS_BIO_DEBUG)
2896		if (obj->paging_in_progress < bp->b_npages) {
2897			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2898			    obj->paging_in_progress, bp->b_npages);
2899		}
2900#endif
2901
2902		/*
2903		 * Set B_CACHE if the op was a normal read and no error
2904		 * occured.  B_CACHE is set for writes in the b*write()
2905		 * routines.
2906		 */
2907		iosize = bp->b_bcount - bp->b_resid;
2908		if (bp->b_iocmd == BIO_READ &&
2909		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
2910		    !(bp->b_ioflags & BIO_ERROR)) {
2911			bp->b_flags |= B_CACHE;
2912		}
2913
2914		for (i = 0; i < bp->b_npages; i++) {
2915			int bogusflag = 0;
2916			int resid;
2917
2918			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2919			if (resid > iosize)
2920				resid = iosize;
2921
2922			/*
2923			 * cleanup bogus pages, restoring the originals
2924			 */
2925			m = bp->b_pages[i];
2926			if (m == bogus_page) {
2927				bogusflag = 1;
2928				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2929				if (m == NULL)
2930					panic("biodone: page disappeared!");
2931				bp->b_pages[i] = m;
2932				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2933			}
2934#if defined(VFS_BIO_DEBUG)
2935			if (OFF_TO_IDX(foff) != m->pindex) {
2936				printf(
2937"biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2938				    (unsigned long)foff, m->pindex);
2939			}
2940#endif
2941
2942			/*
2943			 * In the write case, the valid and clean bits are
2944			 * already changed correctly ( see bdwrite() ), so we
2945			 * only need to do this here in the read case.
2946			 */
2947			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
2948				vfs_page_set_valid(bp, foff, i, m);
2949			}
2950			vm_page_flag_clear(m, PG_ZERO);
2951
2952			/*
2953			 * when debugging new filesystems or buffer I/O methods, this
2954			 * is the most common error that pops up.  if you see this, you
2955			 * have not set the page busy flag correctly!!!
2956			 */
2957			if (m->busy == 0) {
2958				printf("biodone: page busy < 0, "
2959				    "pindex: %d, foff: 0x(%x,%x), "
2960				    "resid: %d, index: %d\n",
2961				    (int) m->pindex, (int)(foff >> 32),
2962						(int) foff & 0xffffffff, resid, i);
2963				if (!vn_isdisk(vp, NULL))
2964					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
2965					    bp->b_vp->v_mount->mnt_stat.f_iosize,
2966					    (int) bp->b_lblkno,
2967					    bp->b_flags, bp->b_npages);
2968				else
2969					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
2970					    (int) bp->b_lblkno,
2971					    bp->b_flags, bp->b_npages);
2972				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
2973				    m->valid, m->dirty, m->wire_count);
2974				panic("biodone: page busy < 0\n");
2975			}
2976			vm_page_io_finish(m);
2977			vm_object_pip_subtract(obj, 1);
2978			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2979			iosize -= resid;
2980		}
2981		if (obj)
2982			vm_object_pip_wakeupn(obj, 0);
2983	}
2984
2985	/*
2986	 * For asynchronous completions, release the buffer now. The brelse
2987	 * will do a wakeup there if necessary - so no need to do a wakeup
2988	 * here in the async case. The sync case always needs to do a wakeup.
2989	 */
2990
2991	if (bp->b_flags & B_ASYNC) {
2992		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
2993			brelse(bp);
2994		else
2995			bqrelse(bp);
2996	} else {
2997		wakeup(bp);
2998	}
2999	splx(s);
3000}
3001
3002/*
3003 * This routine is called in lieu of iodone in the case of
3004 * incomplete I/O.  This keeps the busy status for pages
3005 * consistant.
3006 */
3007void
3008vfs_unbusy_pages(struct buf * bp)
3009{
3010	int i;
3011
3012	GIANT_REQUIRED;
3013
3014	runningbufwakeup(bp);
3015	if (bp->b_flags & B_VMIO) {
3016		struct vnode *vp = bp->b_vp;
3017		vm_object_t obj;
3018
3019		VOP_GETVOBJECT(vp, &obj);
3020
3021		for (i = 0; i < bp->b_npages; i++) {
3022			vm_page_t m = bp->b_pages[i];
3023
3024			if (m == bogus_page) {
3025				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3026				if (!m) {
3027					panic("vfs_unbusy_pages: page missing\n");
3028				}
3029				bp->b_pages[i] = m;
3030				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
3031			}
3032			vm_object_pip_subtract(obj, 1);
3033			vm_page_flag_clear(m, PG_ZERO);
3034			vm_page_io_finish(m);
3035		}
3036		vm_object_pip_wakeupn(obj, 0);
3037	}
3038}
3039
3040/*
3041 * vfs_page_set_valid:
3042 *
3043 *	Set the valid bits in a page based on the supplied offset.   The
3044 *	range is restricted to the buffer's size.
3045 *
3046 *	This routine is typically called after a read completes.
3047 */
3048static void
3049vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
3050{
3051	vm_ooffset_t soff, eoff;
3052
3053	GIANT_REQUIRED;
3054	/*
3055	 * Start and end offsets in buffer.  eoff - soff may not cross a
3056	 * page boundry or cross the end of the buffer.  The end of the
3057	 * buffer, in this case, is our file EOF, not the allocation size
3058	 * of the buffer.
3059	 */
3060	soff = off;
3061	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3062	if (eoff > bp->b_offset + bp->b_bcount)
3063		eoff = bp->b_offset + bp->b_bcount;
3064
3065	/*
3066	 * Set valid range.  This is typically the entire buffer and thus the
3067	 * entire page.
3068	 */
3069	if (eoff > soff) {
3070		vm_page_set_validclean(
3071		    m,
3072		   (vm_offset_t) (soff & PAGE_MASK),
3073		   (vm_offset_t) (eoff - soff)
3074		);
3075	}
3076}
3077
3078/*
3079 * This routine is called before a device strategy routine.
3080 * It is used to tell the VM system that paging I/O is in
3081 * progress, and treat the pages associated with the buffer
3082 * almost as being PG_BUSY.  Also the object paging_in_progress
3083 * flag is handled to make sure that the object doesn't become
3084 * inconsistant.
3085 *
3086 * Since I/O has not been initiated yet, certain buffer flags
3087 * such as BIO_ERROR or B_INVAL may be in an inconsistant state
3088 * and should be ignored.
3089 */
3090void
3091vfs_busy_pages(struct buf * bp, int clear_modify)
3092{
3093	int i, bogus;
3094
3095	GIANT_REQUIRED;
3096
3097	if (bp->b_flags & B_VMIO) {
3098		struct vnode *vp = bp->b_vp;
3099		vm_object_t obj;
3100		vm_ooffset_t foff;
3101
3102		VOP_GETVOBJECT(vp, &obj);
3103		foff = bp->b_offset;
3104		KASSERT(bp->b_offset != NOOFFSET,
3105		    ("vfs_busy_pages: no buffer offset"));
3106		vfs_setdirty(bp);
3107
3108retry:
3109		for (i = 0; i < bp->b_npages; i++) {
3110			vm_page_t m = bp->b_pages[i];
3111			if (vm_page_sleep_busy(m, FALSE, "vbpage"))
3112				goto retry;
3113		}
3114
3115		bogus = 0;
3116		for (i = 0; i < bp->b_npages; i++) {
3117			vm_page_t m = bp->b_pages[i];
3118
3119			vm_page_flag_clear(m, PG_ZERO);
3120			if ((bp->b_flags & B_CLUSTER) == 0) {
3121				vm_object_pip_add(obj, 1);
3122				vm_page_io_start(m);
3123			}
3124
3125			/*
3126			 * When readying a buffer for a read ( i.e
3127			 * clear_modify == 0 ), it is important to do
3128			 * bogus_page replacement for valid pages in
3129			 * partially instantiated buffers.  Partially
3130			 * instantiated buffers can, in turn, occur when
3131			 * reconstituting a buffer from its VM backing store
3132			 * base.  We only have to do this if B_CACHE is
3133			 * clear ( which causes the I/O to occur in the
3134			 * first place ).  The replacement prevents the read
3135			 * I/O from overwriting potentially dirty VM-backed
3136			 * pages.  XXX bogus page replacement is, uh, bogus.
3137			 * It may not work properly with small-block devices.
3138			 * We need to find a better way.
3139			 */
3140
3141			vm_page_protect(m, VM_PROT_NONE);
3142			if (clear_modify)
3143				vfs_page_set_valid(bp, foff, i, m);
3144			else if (m->valid == VM_PAGE_BITS_ALL &&
3145				(bp->b_flags & B_CACHE) == 0) {
3146				bp->b_pages[i] = bogus_page;
3147				bogus++;
3148			}
3149			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3150		}
3151		if (bogus)
3152			pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
3153	}
3154}
3155
3156/*
3157 * Tell the VM system that the pages associated with this buffer
3158 * are clean.  This is used for delayed writes where the data is
3159 * going to go to disk eventually without additional VM intevention.
3160 *
3161 * Note that while we only really need to clean through to b_bcount, we
3162 * just go ahead and clean through to b_bufsize.
3163 */
3164static void
3165vfs_clean_pages(struct buf * bp)
3166{
3167	int i;
3168
3169	GIANT_REQUIRED;
3170
3171	if (bp->b_flags & B_VMIO) {
3172		vm_ooffset_t foff;
3173
3174		foff = bp->b_offset;
3175		KASSERT(bp->b_offset != NOOFFSET,
3176		    ("vfs_clean_pages: no buffer offset"));
3177		for (i = 0; i < bp->b_npages; i++) {
3178			vm_page_t m = bp->b_pages[i];
3179			vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3180			vm_ooffset_t eoff = noff;
3181
3182			if (eoff > bp->b_offset + bp->b_bufsize)
3183				eoff = bp->b_offset + bp->b_bufsize;
3184			vfs_page_set_valid(bp, foff, i, m);
3185			/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3186			foff = noff;
3187		}
3188	}
3189}
3190
3191/*
3192 *	vfs_bio_set_validclean:
3193 *
3194 *	Set the range within the buffer to valid and clean.  The range is
3195 *	relative to the beginning of the buffer, b_offset.  Note that b_offset
3196 *	itself may be offset from the beginning of the first page.
3197 *
3198 */
3199
3200void
3201vfs_bio_set_validclean(struct buf *bp, int base, int size)
3202{
3203	if (bp->b_flags & B_VMIO) {
3204		int i;
3205		int n;
3206
3207		/*
3208		 * Fixup base to be relative to beginning of first page.
3209		 * Set initial n to be the maximum number of bytes in the
3210		 * first page that can be validated.
3211		 */
3212
3213		base += (bp->b_offset & PAGE_MASK);
3214		n = PAGE_SIZE - (base & PAGE_MASK);
3215
3216		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
3217			vm_page_t m = bp->b_pages[i];
3218
3219			if (n > size)
3220				n = size;
3221
3222			vm_page_set_validclean(m, base & PAGE_MASK, n);
3223			base += n;
3224			size -= n;
3225			n = PAGE_SIZE;
3226		}
3227	}
3228}
3229
3230/*
3231 *	vfs_bio_clrbuf:
3232 *
3233 *	clear a buffer.  This routine essentially fakes an I/O, so we need
3234 *	to clear BIO_ERROR and B_INVAL.
3235 *
3236 *	Note that while we only theoretically need to clear through b_bcount,
3237 *	we go ahead and clear through b_bufsize.
3238 */
3239
3240void
3241vfs_bio_clrbuf(struct buf *bp) {
3242	int i, mask = 0;
3243	caddr_t sa, ea;
3244
3245	GIANT_REQUIRED;
3246
3247	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
3248		bp->b_flags &= ~B_INVAL;
3249		bp->b_ioflags &= ~BIO_ERROR;
3250		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
3251		    (bp->b_offset & PAGE_MASK) == 0) {
3252			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
3253			if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
3254			    ((bp->b_pages[0]->valid & mask) != mask)) {
3255				bzero(bp->b_data, bp->b_bufsize);
3256			}
3257			bp->b_pages[0]->valid |= mask;
3258			bp->b_resid = 0;
3259			return;
3260		}
3261		ea = sa = bp->b_data;
3262		for(i=0;i<bp->b_npages;i++,sa=ea) {
3263			int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
3264			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
3265			ea = (caddr_t)(vm_offset_t)ulmin(
3266			    (u_long)(vm_offset_t)ea,
3267			    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
3268			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
3269			if ((bp->b_pages[i]->valid & mask) == mask)
3270				continue;
3271			if ((bp->b_pages[i]->valid & mask) == 0) {
3272				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
3273					bzero(sa, ea - sa);
3274				}
3275			} else {
3276				for (; sa < ea; sa += DEV_BSIZE, j++) {
3277					if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
3278						(bp->b_pages[i]->valid & (1<<j)) == 0)
3279						bzero(sa, DEV_BSIZE);
3280				}
3281			}
3282			bp->b_pages[i]->valid |= mask;
3283			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
3284		}
3285		bp->b_resid = 0;
3286	} else {
3287		clrbuf(bp);
3288	}
3289}
3290
3291/*
3292 * vm_hold_load_pages and vm_hold_free_pages get pages into
3293 * a buffers address space.  The pages are anonymous and are
3294 * not associated with a file object.
3295 */
3296static void
3297vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3298{
3299	vm_offset_t pg;
3300	vm_page_t p;
3301	int index;
3302
3303	GIANT_REQUIRED;
3304
3305	to = round_page(to);
3306	from = round_page(from);
3307	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3308
3309	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3310tryagain:
3311		/*
3312		 * note: must allocate system pages since blocking here
3313		 * could intefere with paging I/O, no matter which
3314		 * process we are.
3315		 */
3316		p = vm_page_alloc(kernel_object,
3317			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
3318		    VM_ALLOC_SYSTEM);
3319		if (!p) {
3320			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
3321			VM_WAIT;
3322			goto tryagain;
3323		}
3324		vm_page_wire(p);
3325		p->valid = VM_PAGE_BITS_ALL;
3326		vm_page_flag_clear(p, PG_ZERO);
3327		pmap_qenter(pg, &p, 1);
3328		bp->b_pages[index] = p;
3329		vm_page_wakeup(p);
3330	}
3331	bp->b_npages = index;
3332}
3333
3334/* Return pages associated with this buf to the vm system */
3335void
3336vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3337{
3338	vm_offset_t pg;
3339	vm_page_t p;
3340	int index, newnpages;
3341
3342	GIANT_REQUIRED;
3343
3344	from = round_page(from);
3345	to = round_page(to);
3346	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3347
3348	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3349		p = bp->b_pages[index];
3350		if (p && (index < bp->b_npages)) {
3351			if (p->busy) {
3352				printf("vm_hold_free_pages: blkno: %lld, lblkno: %lld\n",
3353					bp->b_blkno, bp->b_lblkno);
3354			}
3355			bp->b_pages[index] = NULL;
3356			pmap_qremove(pg, 1);
3357			vm_page_busy(p);
3358			vm_page_unwire(p, 0);
3359			vm_page_free(p);
3360		}
3361	}
3362	bp->b_npages = newnpages;
3363}
3364
3365
3366#include "opt_ddb.h"
3367#ifdef DDB
3368#include <ddb/ddb.h>
3369
3370/* DDB command to show buffer data */
3371DB_SHOW_COMMAND(buffer, db_show_buffer)
3372{
3373	/* get args */
3374	struct buf *bp = (struct buf *)addr;
3375
3376	if (!have_addr) {
3377		db_printf("usage: show buffer <addr>\n");
3378		return;
3379	}
3380
3381	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
3382	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
3383		  "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, "
3384		  "b_blkno = %lld, b_pblkno = %lld\n",
3385		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
3386		  major(bp->b_dev), minor(bp->b_dev),
3387		  bp->b_data, bp->b_blkno, bp->b_pblkno);
3388	if (bp->b_npages) {
3389		int i;
3390		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
3391		for (i = 0; i < bp->b_npages; i++) {
3392			vm_page_t m;
3393			m = bp->b_pages[i];
3394			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
3395			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
3396			if ((i + 1) < bp->b_npages)
3397				db_printf(",");
3398		}
3399		db_printf("\n");
3400	}
3401}
3402#endif /* DDB */
3403