vfs_bio.c revision 207617
1/*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * this file contains a new buffer I/O scheme implementing a coherent
30 * VM object and buffer cache scheme.  Pains have been taken to make
31 * sure that the performance degradation associated with schemes such
32 * as this is not realized.
33 *
34 * Author:  John S. Dyson
35 * Significant help during the development and debugging phases
36 * had been provided by David Greenman, also of the FreeBSD core team.
37 *
38 * see man buf(9) for more info.
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/vfs_bio.c 207617 2010-05-04 15:55:41Z alc $");
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/bio.h>
47#include <sys/conf.h>
48#include <sys/buf.h>
49#include <sys/devicestat.h>
50#include <sys/eventhandler.h>
51#include <sys/fail.h>
52#include <sys/limits.h>
53#include <sys/lock.h>
54#include <sys/malloc.h>
55#include <sys/mount.h>
56#include <sys/mutex.h>
57#include <sys/kernel.h>
58#include <sys/kthread.h>
59#include <sys/proc.h>
60#include <sys/resourcevar.h>
61#include <sys/sysctl.h>
62#include <sys/vmmeter.h>
63#include <sys/vnode.h>
64#include <geom/geom.h>
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <vm/vm_kern.h>
68#include <vm/vm_pageout.h>
69#include <vm/vm_page.h>
70#include <vm/vm_object.h>
71#include <vm/vm_extern.h>
72#include <vm/vm_map.h>
73#include "opt_compat.h"
74#include "opt_directio.h"
75#include "opt_swap.h"
76
77static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
78
79struct	bio_ops bioops;		/* I/O operation notification */
80
81struct	buf_ops buf_ops_bio = {
82	.bop_name	=	"buf_ops_bio",
83	.bop_write	=	bufwrite,
84	.bop_strategy	=	bufstrategy,
85	.bop_sync	=	bufsync,
86	.bop_bdflush	=	bufbdflush,
87};
88
89/*
90 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
91 * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
92 */
93struct buf *buf;		/* buffer header pool */
94
95static struct proc *bufdaemonproc;
96
97static int inmem(struct vnode *vp, daddr_t blkno);
98static void vm_hold_free_pages(struct buf *bp, vm_offset_t from,
99		vm_offset_t to);
100static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
101		vm_offset_t to);
102static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
103static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
104		vm_page_t m);
105static void vfs_clean_pages(struct buf *bp);
106static void vfs_setdirty(struct buf *bp);
107static void vfs_setdirty_locked_object(struct buf *bp);
108static void vfs_vmio_release(struct buf *bp);
109static int vfs_bio_clcheck(struct vnode *vp, int size,
110		daddr_t lblkno, daddr_t blkno);
111static int buf_do_flush(struct vnode *vp);
112static int flushbufqueues(struct vnode *, int, int);
113static void buf_daemon(void);
114static void bremfreel(struct buf *bp);
115#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
116    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
117static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
118#endif
119
120int vmiodirenable = TRUE;
121SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
122    "Use the VM system for directory writes");
123long runningbufspace;
124SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
125    "Amount of presently outstanding async buffer io");
126static long bufspace;
127#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
128    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
129SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
130    &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
131#else
132SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
133    "Virtual memory used for buffers");
134#endif
135static long maxbufspace;
136SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
137    "Maximum allowed value of bufspace (including buf_daemon)");
138static long bufmallocspace;
139SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
140    "Amount of malloced memory for buffers");
141static long maxbufmallocspace;
142SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
143    "Maximum amount of malloced memory for buffers");
144static long lobufspace;
145SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
146    "Minimum amount of buffers we want to have");
147long hibufspace;
148SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
149    "Maximum allowed value of bufspace (excluding buf_daemon)");
150static int bufreusecnt;
151SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
152    "Number of times we have reused a buffer");
153static int buffreekvacnt;
154SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
155    "Number of times we have freed the KVA space from some buffer");
156static int bufdefragcnt;
157SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
158    "Number of times we have had to repeat buffer allocation to defragment");
159static long lorunningspace;
160SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
161    "Minimum preferred space used for in-progress I/O");
162static long hirunningspace;
163SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
164    "Maximum amount of space to use for in-progress I/O");
165int dirtybufferflushes;
166SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
167    0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
168int bdwriteskip;
169SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
170    0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
171int altbufferflushes;
172SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
173    0, "Number of fsync flushes to limit dirty buffers");
174static int recursiveflushes;
175SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
176    0, "Number of flushes skipped due to being recursive");
177static int numdirtybuffers;
178SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
179    "Number of buffers that are dirty (has unwritten changes) at the moment");
180static int lodirtybuffers;
181SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
182    "How many buffers we want to have free before bufdaemon can sleep");
183static int hidirtybuffers;
184SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
185    "When the number of dirty buffers is considered severe");
186int dirtybufthresh;
187SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
188    0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
189static int numfreebuffers;
190SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
191    "Number of free buffers");
192static int lofreebuffers;
193SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
194   "XXX Unused");
195static int hifreebuffers;
196SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
197   "XXX Complicatedly unused");
198static int getnewbufcalls;
199SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
200   "Number of calls to getnewbuf");
201static int getnewbufrestarts;
202SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
203    "Number of times getnewbuf has had to restart a buffer aquisition");
204static int flushbufqtarget = 100;
205SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
206    "Amount of work to do in flushbufqueues when helping bufdaemon");
207static long notbufdflashes;
208SYSCTL_LONG(_vfs, OID_AUTO, notbufdflashes, CTLFLAG_RD, &notbufdflashes, 0,
209    "Number of dirty buffer flushes done by the bufdaemon helpers");
210
211/*
212 * Wakeup point for bufdaemon, as well as indicator of whether it is already
213 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
214 * is idling.
215 */
216static int bd_request;
217
218/*
219 * Request for the buf daemon to write more buffers than is indicated by
220 * lodirtybuf.  This may be necessary to push out excess dependencies or
221 * defragment the address space where a simple count of the number of dirty
222 * buffers is insufficient to characterize the demand for flushing them.
223 */
224static int bd_speedupreq;
225
226/*
227 * This lock synchronizes access to bd_request.
228 */
229static struct mtx bdlock;
230
231/*
232 * bogus page -- for I/O to/from partially complete buffers
233 * this is a temporary solution to the problem, but it is not
234 * really that bad.  it would be better to split the buffer
235 * for input in the case of buffers partially already in memory,
236 * but the code is intricate enough already.
237 */
238vm_page_t bogus_page;
239
240/*
241 * Synchronization (sleep/wakeup) variable for active buffer space requests.
242 * Set when wait starts, cleared prior to wakeup().
243 * Used in runningbufwakeup() and waitrunningbufspace().
244 */
245static int runningbufreq;
246
247/*
248 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
249 * waitrunningbufspace().
250 */
251static struct mtx rbreqlock;
252
253/*
254 * Synchronization (sleep/wakeup) variable for buffer requests.
255 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
256 * by and/or.
257 * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
258 * getnewbuf(), and getblk().
259 */
260static int needsbuffer;
261
262/*
263 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
264 */
265static struct mtx nblock;
266
267/*
268 * Definitions for the buffer free lists.
269 */
270#define BUFFER_QUEUES	6	/* number of free buffer queues */
271
272#define QUEUE_NONE	0	/* on no queue */
273#define QUEUE_CLEAN	1	/* non-B_DELWRI buffers */
274#define QUEUE_DIRTY	2	/* B_DELWRI buffers */
275#define QUEUE_DIRTY_GIANT 3	/* B_DELWRI buffers that need giant */
276#define QUEUE_EMPTYKVA	4	/* empty buffer headers w/KVA assignment */
277#define QUEUE_EMPTY	5	/* empty buffer headers */
278#define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
279
280/* Queues for free buffers with various properties */
281static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
282
283/* Lock for the bufqueues */
284static struct mtx bqlock;
285
286/*
287 * Single global constant for BUF_WMESG, to avoid getting multiple references.
288 * buf_wmesg is referred from macros.
289 */
290const char *buf_wmesg = BUF_WMESG;
291
292#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
293#define VFS_BIO_NEED_DIRTYFLUSH	0x02	/* waiting for dirty buffer flush */
294#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
295#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
296
297#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
298    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
299static int
300sysctl_bufspace(SYSCTL_HANDLER_ARGS)
301{
302	long lvalue;
303	int ivalue;
304
305	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
306		return (sysctl_handle_long(oidp, arg1, arg2, req));
307	lvalue = *(long *)arg1;
308	if (lvalue > INT_MAX)
309		/* On overflow, still write out a long to trigger ENOMEM. */
310		return (sysctl_handle_long(oidp, &lvalue, 0, req));
311	ivalue = lvalue;
312	return (sysctl_handle_int(oidp, &ivalue, 0, req));
313}
314#endif
315
316#ifdef DIRECTIO
317extern void ffs_rawread_setup(void);
318#endif /* DIRECTIO */
319/*
320 *	numdirtywakeup:
321 *
322 *	If someone is blocked due to there being too many dirty buffers,
323 *	and numdirtybuffers is now reasonable, wake them up.
324 */
325
326static __inline void
327numdirtywakeup(int level)
328{
329
330	if (numdirtybuffers <= level) {
331		mtx_lock(&nblock);
332		if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
333			needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
334			wakeup(&needsbuffer);
335		}
336		mtx_unlock(&nblock);
337	}
338}
339
340/*
341 *	bufspacewakeup:
342 *
343 *	Called when buffer space is potentially available for recovery.
344 *	getnewbuf() will block on this flag when it is unable to free
345 *	sufficient buffer space.  Buffer space becomes recoverable when
346 *	bp's get placed back in the queues.
347 */
348
349static __inline void
350bufspacewakeup(void)
351{
352
353	/*
354	 * If someone is waiting for BUF space, wake them up.  Even
355	 * though we haven't freed the kva space yet, the waiting
356	 * process will be able to now.
357	 */
358	mtx_lock(&nblock);
359	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
360		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
361		wakeup(&needsbuffer);
362	}
363	mtx_unlock(&nblock);
364}
365
366/*
367 * runningbufwakeup() - in-progress I/O accounting.
368 *
369 */
370void
371runningbufwakeup(struct buf *bp)
372{
373
374	if (bp->b_runningbufspace) {
375		atomic_subtract_long(&runningbufspace, bp->b_runningbufspace);
376		bp->b_runningbufspace = 0;
377		mtx_lock(&rbreqlock);
378		if (runningbufreq && runningbufspace <= lorunningspace) {
379			runningbufreq = 0;
380			wakeup(&runningbufreq);
381		}
382		mtx_unlock(&rbreqlock);
383	}
384}
385
386/*
387 *	bufcountwakeup:
388 *
389 *	Called when a buffer has been added to one of the free queues to
390 *	account for the buffer and to wakeup anyone waiting for free buffers.
391 *	This typically occurs when large amounts of metadata are being handled
392 *	by the buffer cache ( else buffer space runs out first, usually ).
393 */
394
395static __inline void
396bufcountwakeup(void)
397{
398
399	atomic_add_int(&numfreebuffers, 1);
400	mtx_lock(&nblock);
401	if (needsbuffer) {
402		needsbuffer &= ~VFS_BIO_NEED_ANY;
403		if (numfreebuffers >= hifreebuffers)
404			needsbuffer &= ~VFS_BIO_NEED_FREE;
405		wakeup(&needsbuffer);
406	}
407	mtx_unlock(&nblock);
408}
409
410/*
411 *	waitrunningbufspace()
412 *
413 *	runningbufspace is a measure of the amount of I/O currently
414 *	running.  This routine is used in async-write situations to
415 *	prevent creating huge backups of pending writes to a device.
416 *	Only asynchronous writes are governed by this function.
417 *
418 *	Reads will adjust runningbufspace, but will not block based on it.
419 *	The read load has a side effect of reducing the allowed write load.
420 *
421 *	This does NOT turn an async write into a sync write.  It waits
422 *	for earlier writes to complete and generally returns before the
423 *	caller's write has reached the device.
424 */
425void
426waitrunningbufspace(void)
427{
428
429	mtx_lock(&rbreqlock);
430	while (runningbufspace > hirunningspace) {
431		++runningbufreq;
432		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
433	}
434	mtx_unlock(&rbreqlock);
435}
436
437
438/*
439 *	vfs_buf_test_cache:
440 *
441 *	Called when a buffer is extended.  This function clears the B_CACHE
442 *	bit if the newly extended portion of the buffer does not contain
443 *	valid data.
444 */
445static __inline
446void
447vfs_buf_test_cache(struct buf *bp,
448		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
449		  vm_page_t m)
450{
451
452	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
453	if (bp->b_flags & B_CACHE) {
454		int base = (foff + off) & PAGE_MASK;
455		if (vm_page_is_valid(m, base, size) == 0)
456			bp->b_flags &= ~B_CACHE;
457	}
458}
459
460/* Wake up the buffer daemon if necessary */
461static __inline
462void
463bd_wakeup(int dirtybuflevel)
464{
465
466	mtx_lock(&bdlock);
467	if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
468		bd_request = 1;
469		wakeup(&bd_request);
470	}
471	mtx_unlock(&bdlock);
472}
473
474/*
475 * bd_speedup - speedup the buffer cache flushing code
476 */
477
478void
479bd_speedup(void)
480{
481	int needwake;
482
483	mtx_lock(&bdlock);
484	needwake = 0;
485	if (bd_speedupreq == 0 || bd_request == 0)
486		needwake = 1;
487	bd_speedupreq = 1;
488	bd_request = 1;
489	if (needwake)
490		wakeup(&bd_request);
491	mtx_unlock(&bdlock);
492}
493
494/*
495 * Calculating buffer cache scaling values and reserve space for buffer
496 * headers.  This is called during low level kernel initialization and
497 * may be called more then once.  We CANNOT write to the memory area
498 * being reserved at this time.
499 */
500caddr_t
501kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
502{
503	int tuned_nbuf;
504	long maxbuf;
505
506	/*
507	 * physmem_est is in pages.  Convert it to kilobytes (assumes
508	 * PAGE_SIZE is >= 1K)
509	 */
510	physmem_est = physmem_est * (PAGE_SIZE / 1024);
511
512	/*
513	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
514	 * For the first 64MB of ram nominally allocate sufficient buffers to
515	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
516	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
517	 * the buffer cache we limit the eventual kva reservation to
518	 * maxbcache bytes.
519	 *
520	 * factor represents the 1/4 x ram conversion.
521	 */
522	if (nbuf == 0) {
523		int factor = 4 * BKVASIZE / 1024;
524
525		nbuf = 50;
526		if (physmem_est > 4096)
527			nbuf += min((physmem_est - 4096) / factor,
528			    65536 / factor);
529		if (physmem_est > 65536)
530			nbuf += (physmem_est - 65536) * 2 / (factor * 5);
531
532		if (maxbcache && nbuf > maxbcache / BKVASIZE)
533			nbuf = maxbcache / BKVASIZE;
534		tuned_nbuf = 1;
535	} else
536		tuned_nbuf = 0;
537
538	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
539	maxbuf = (LONG_MAX / 3) / BKVASIZE;
540	if (nbuf > maxbuf) {
541		if (!tuned_nbuf)
542			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
543			    maxbuf);
544		nbuf = maxbuf;
545	}
546
547	/*
548	 * swbufs are used as temporary holders for I/O, such as paging I/O.
549	 * We have no less then 16 and no more then 256.
550	 */
551	nswbuf = max(min(nbuf/4, 256), 16);
552#ifdef NSWBUF_MIN
553	if (nswbuf < NSWBUF_MIN)
554		nswbuf = NSWBUF_MIN;
555#endif
556#ifdef DIRECTIO
557	ffs_rawread_setup();
558#endif
559
560	/*
561	 * Reserve space for the buffer cache buffers
562	 */
563	swbuf = (void *)v;
564	v = (caddr_t)(swbuf + nswbuf);
565	buf = (void *)v;
566	v = (caddr_t)(buf + nbuf);
567
568	return(v);
569}
570
571/* Initialize the buffer subsystem.  Called before use of any buffers. */
572void
573bufinit(void)
574{
575	struct buf *bp;
576	int i;
577
578	mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
579	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
580	mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
581	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
582
583	/* next, make a null set of free lists */
584	for (i = 0; i < BUFFER_QUEUES; i++)
585		TAILQ_INIT(&bufqueues[i]);
586
587	/* finally, initialize each buffer header and stick on empty q */
588	for (i = 0; i < nbuf; i++) {
589		bp = &buf[i];
590		bzero(bp, sizeof *bp);
591		bp->b_flags = B_INVAL;	/* we're just an empty header */
592		bp->b_rcred = NOCRED;
593		bp->b_wcred = NOCRED;
594		bp->b_qindex = QUEUE_EMPTY;
595		bp->b_vflags = 0;
596		bp->b_xflags = 0;
597		LIST_INIT(&bp->b_dep);
598		BUF_LOCKINIT(bp);
599		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
600	}
601
602	/*
603	 * maxbufspace is the absolute maximum amount of buffer space we are
604	 * allowed to reserve in KVM and in real terms.  The absolute maximum
605	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
606	 * used by most other processes.  The differential is required to
607	 * ensure that buf_daemon is able to run when other processes might
608	 * be blocked waiting for buffer space.
609	 *
610	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
611	 * this may result in KVM fragmentation which is not handled optimally
612	 * by the system.
613	 */
614	maxbufspace = (long)nbuf * BKVASIZE;
615	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
616	lobufspace = hibufspace - MAXBSIZE;
617
618	lorunningspace = 512 * 1024;
619	hirunningspace = 1024 * 1024;
620
621/*
622 * Limit the amount of malloc memory since it is wired permanently into
623 * the kernel space.  Even though this is accounted for in the buffer
624 * allocation, we don't want the malloced region to grow uncontrolled.
625 * The malloc scheme improves memory utilization significantly on average
626 * (small) directories.
627 */
628	maxbufmallocspace = hibufspace / 20;
629
630/*
631 * Reduce the chance of a deadlock occuring by limiting the number
632 * of delayed-write dirty buffers we allow to stack up.
633 */
634	hidirtybuffers = nbuf / 4 + 20;
635	dirtybufthresh = hidirtybuffers * 9 / 10;
636	numdirtybuffers = 0;
637/*
638 * To support extreme low-memory systems, make sure hidirtybuffers cannot
639 * eat up all available buffer space.  This occurs when our minimum cannot
640 * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
641 * BKVASIZE'd (8K) buffers.
642 */
643	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
644		hidirtybuffers >>= 1;
645	}
646	lodirtybuffers = hidirtybuffers / 2;
647
648/*
649 * Try to keep the number of free buffers in the specified range,
650 * and give special processes (e.g. like buf_daemon) access to an
651 * emergency reserve.
652 */
653	lofreebuffers = nbuf / 18 + 5;
654	hifreebuffers = 2 * lofreebuffers;
655	numfreebuffers = nbuf;
656
657	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
658	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
659}
660
661/*
662 * bfreekva() - free the kva allocation for a buffer.
663 *
664 *	Since this call frees up buffer space, we call bufspacewakeup().
665 */
666static void
667bfreekva(struct buf *bp)
668{
669
670	if (bp->b_kvasize) {
671		atomic_add_int(&buffreekvacnt, 1);
672		atomic_subtract_long(&bufspace, bp->b_kvasize);
673		vm_map_remove(buffer_map, (vm_offset_t) bp->b_kvabase,
674		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize);
675		bp->b_kvasize = 0;
676		bufspacewakeup();
677	}
678}
679
680/*
681 *	bremfree:
682 *
683 *	Mark the buffer for removal from the appropriate free list in brelse.
684 *
685 */
686void
687bremfree(struct buf *bp)
688{
689
690	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
691	KASSERT((bp->b_flags & B_REMFREE) == 0,
692	    ("bremfree: buffer %p already marked for delayed removal.", bp));
693	KASSERT(bp->b_qindex != QUEUE_NONE,
694	    ("bremfree: buffer %p not on a queue.", bp));
695	BUF_ASSERT_HELD(bp);
696
697	bp->b_flags |= B_REMFREE;
698	/* Fixup numfreebuffers count.  */
699	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
700		atomic_subtract_int(&numfreebuffers, 1);
701}
702
703/*
704 *	bremfreef:
705 *
706 *	Force an immediate removal from a free list.  Used only in nfs when
707 *	it abuses the b_freelist pointer.
708 */
709void
710bremfreef(struct buf *bp)
711{
712	mtx_lock(&bqlock);
713	bremfreel(bp);
714	mtx_unlock(&bqlock);
715}
716
717/*
718 *	bremfreel:
719 *
720 *	Removes a buffer from the free list, must be called with the
721 *	bqlock held.
722 */
723static void
724bremfreel(struct buf *bp)
725{
726	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
727	    bp, bp->b_vp, bp->b_flags);
728	KASSERT(bp->b_qindex != QUEUE_NONE,
729	    ("bremfreel: buffer %p not on a queue.", bp));
730	BUF_ASSERT_HELD(bp);
731	mtx_assert(&bqlock, MA_OWNED);
732
733	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
734	bp->b_qindex = QUEUE_NONE;
735	/*
736	 * If this was a delayed bremfree() we only need to remove the buffer
737	 * from the queue and return the stats are already done.
738	 */
739	if (bp->b_flags & B_REMFREE) {
740		bp->b_flags &= ~B_REMFREE;
741		return;
742	}
743	/*
744	 * Fixup numfreebuffers count.  If the buffer is invalid or not
745	 * delayed-write, the buffer was free and we must decrement
746	 * numfreebuffers.
747	 */
748	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
749		atomic_subtract_int(&numfreebuffers, 1);
750}
751
752
753/*
754 * Get a buffer with the specified data.  Look in the cache first.  We
755 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
756 * is set, the buffer is valid and we do not have to do anything ( see
757 * getblk() ).  This is really just a special case of breadn().
758 */
759int
760bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
761    struct buf **bpp)
762{
763
764	return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
765}
766
767/*
768 * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
769 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
770 * the buffer is valid and we do not have to do anything.
771 */
772void
773breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
774    int cnt, struct ucred * cred)
775{
776	struct buf *rabp;
777	int i;
778
779	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
780		if (inmem(vp, *rablkno))
781			continue;
782		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
783
784		if ((rabp->b_flags & B_CACHE) == 0) {
785			if (!TD_IS_IDLETHREAD(curthread))
786				curthread->td_ru.ru_inblock++;
787			rabp->b_flags |= B_ASYNC;
788			rabp->b_flags &= ~B_INVAL;
789			rabp->b_ioflags &= ~BIO_ERROR;
790			rabp->b_iocmd = BIO_READ;
791			if (rabp->b_rcred == NOCRED && cred != NOCRED)
792				rabp->b_rcred = crhold(cred);
793			vfs_busy_pages(rabp, 0);
794			BUF_KERNPROC(rabp);
795			rabp->b_iooffset = dbtob(rabp->b_blkno);
796			bstrategy(rabp);
797		} else {
798			brelse(rabp);
799		}
800	}
801}
802
803/*
804 * Operates like bread, but also starts asynchronous I/O on
805 * read-ahead blocks.
806 */
807int
808breadn(struct vnode * vp, daddr_t blkno, int size,
809    daddr_t * rablkno, int *rabsize,
810    int cnt, struct ucred * cred, struct buf **bpp)
811{
812	struct buf *bp;
813	int rv = 0, readwait = 0;
814
815	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
816	*bpp = bp = getblk(vp, blkno, size, 0, 0, 0);
817
818	/* if not found in cache, do some I/O */
819	if ((bp->b_flags & B_CACHE) == 0) {
820		if (!TD_IS_IDLETHREAD(curthread))
821			curthread->td_ru.ru_inblock++;
822		bp->b_iocmd = BIO_READ;
823		bp->b_flags &= ~B_INVAL;
824		bp->b_ioflags &= ~BIO_ERROR;
825		if (bp->b_rcred == NOCRED && cred != NOCRED)
826			bp->b_rcred = crhold(cred);
827		vfs_busy_pages(bp, 0);
828		bp->b_iooffset = dbtob(bp->b_blkno);
829		bstrategy(bp);
830		++readwait;
831	}
832
833	breada(vp, rablkno, rabsize, cnt, cred);
834
835	if (readwait) {
836		rv = bufwait(bp);
837	}
838	return (rv);
839}
840
841/*
842 * Write, release buffer on completion.  (Done by iodone
843 * if async).  Do not bother writing anything if the buffer
844 * is invalid.
845 *
846 * Note that we set B_CACHE here, indicating that buffer is
847 * fully valid and thus cacheable.  This is true even of NFS
848 * now so we set it generally.  This could be set either here
849 * or in biodone() since the I/O is synchronous.  We put it
850 * here.
851 */
852int
853bufwrite(struct buf *bp)
854{
855	int oldflags;
856	struct vnode *vp;
857	int vp_md;
858
859	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
860	if (bp->b_flags & B_INVAL) {
861		brelse(bp);
862		return (0);
863	}
864
865	oldflags = bp->b_flags;
866
867	BUF_ASSERT_HELD(bp);
868
869	if (bp->b_pin_count > 0)
870		bunpin_wait(bp);
871
872	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
873	    ("FFS background buffer should not get here %p", bp));
874
875	vp = bp->b_vp;
876	if (vp)
877		vp_md = vp->v_vflag & VV_MD;
878	else
879		vp_md = 0;
880
881	/* Mark the buffer clean */
882	bundirty(bp);
883
884	bp->b_flags &= ~B_DONE;
885	bp->b_ioflags &= ~BIO_ERROR;
886	bp->b_flags |= B_CACHE;
887	bp->b_iocmd = BIO_WRITE;
888
889	bufobj_wref(bp->b_bufobj);
890	vfs_busy_pages(bp, 1);
891
892	/*
893	 * Normal bwrites pipeline writes
894	 */
895	bp->b_runningbufspace = bp->b_bufsize;
896	atomic_add_long(&runningbufspace, bp->b_runningbufspace);
897
898	if (!TD_IS_IDLETHREAD(curthread))
899		curthread->td_ru.ru_oublock++;
900	if (oldflags & B_ASYNC)
901		BUF_KERNPROC(bp);
902	bp->b_iooffset = dbtob(bp->b_blkno);
903	bstrategy(bp);
904
905	if ((oldflags & B_ASYNC) == 0) {
906		int rtval = bufwait(bp);
907		brelse(bp);
908		return (rtval);
909	} else {
910		/*
911		 * don't allow the async write to saturate the I/O
912		 * system.  We will not deadlock here because
913		 * we are blocking waiting for I/O that is already in-progress
914		 * to complete. We do not block here if it is the update
915		 * or syncer daemon trying to clean up as that can lead
916		 * to deadlock.
917		 */
918		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
919			waitrunningbufspace();
920	}
921
922	return (0);
923}
924
925void
926bufbdflush(struct bufobj *bo, struct buf *bp)
927{
928	struct buf *nbp;
929
930	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
931		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
932		altbufferflushes++;
933	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
934		BO_LOCK(bo);
935		/*
936		 * Try to find a buffer to flush.
937		 */
938		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
939			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
940			    BUF_LOCK(nbp,
941				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
942				continue;
943			if (bp == nbp)
944				panic("bdwrite: found ourselves");
945			BO_UNLOCK(bo);
946			/* Don't countdeps with the bo lock held. */
947			if (buf_countdeps(nbp, 0)) {
948				BO_LOCK(bo);
949				BUF_UNLOCK(nbp);
950				continue;
951			}
952			if (nbp->b_flags & B_CLUSTEROK) {
953				vfs_bio_awrite(nbp);
954			} else {
955				bremfree(nbp);
956				bawrite(nbp);
957			}
958			dirtybufferflushes++;
959			break;
960		}
961		if (nbp == NULL)
962			BO_UNLOCK(bo);
963	}
964}
965
966/*
967 * Delayed write. (Buffer is marked dirty).  Do not bother writing
968 * anything if the buffer is marked invalid.
969 *
970 * Note that since the buffer must be completely valid, we can safely
971 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
972 * biodone() in order to prevent getblk from writing the buffer
973 * out synchronously.
974 */
975void
976bdwrite(struct buf *bp)
977{
978	struct thread *td = curthread;
979	struct vnode *vp;
980	struct bufobj *bo;
981
982	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
983	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
984	BUF_ASSERT_HELD(bp);
985
986	if (bp->b_flags & B_INVAL) {
987		brelse(bp);
988		return;
989	}
990
991	/*
992	 * If we have too many dirty buffers, don't create any more.
993	 * If we are wildly over our limit, then force a complete
994	 * cleanup. Otherwise, just keep the situation from getting
995	 * out of control. Note that we have to avoid a recursive
996	 * disaster and not try to clean up after our own cleanup!
997	 */
998	vp = bp->b_vp;
999	bo = bp->b_bufobj;
1000	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
1001		td->td_pflags |= TDP_INBDFLUSH;
1002		BO_BDFLUSH(bo, bp);
1003		td->td_pflags &= ~TDP_INBDFLUSH;
1004	} else
1005		recursiveflushes++;
1006
1007	bdirty(bp);
1008	/*
1009	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
1010	 * true even of NFS now.
1011	 */
1012	bp->b_flags |= B_CACHE;
1013
1014	/*
1015	 * This bmap keeps the system from needing to do the bmap later,
1016	 * perhaps when the system is attempting to do a sync.  Since it
1017	 * is likely that the indirect block -- or whatever other datastructure
1018	 * that the filesystem needs is still in memory now, it is a good
1019	 * thing to do this.  Note also, that if the pageout daemon is
1020	 * requesting a sync -- there might not be enough memory to do
1021	 * the bmap then...  So, this is important to do.
1022	 */
1023	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
1024		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1025	}
1026
1027	/*
1028	 * Set the *dirty* buffer range based upon the VM system dirty pages.
1029	 */
1030	vfs_setdirty(bp);
1031
1032	/*
1033	 * We need to do this here to satisfy the vnode_pager and the
1034	 * pageout daemon, so that it thinks that the pages have been
1035	 * "cleaned".  Note that since the pages are in a delayed write
1036	 * buffer -- the VFS layer "will" see that the pages get written
1037	 * out on the next sync, or perhaps the cluster will be completed.
1038	 */
1039	vfs_clean_pages(bp);
1040	bqrelse(bp);
1041
1042	/*
1043	 * Wakeup the buffer flushing daemon if we have a lot of dirty
1044	 * buffers (midpoint between our recovery point and our stall
1045	 * point).
1046	 */
1047	bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1048
1049	/*
1050	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
1051	 * due to the softdep code.
1052	 */
1053}
1054
1055/*
1056 *	bdirty:
1057 *
1058 *	Turn buffer into delayed write request.  We must clear BIO_READ and
1059 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
1060 *	itself to properly update it in the dirty/clean lists.  We mark it
1061 *	B_DONE to ensure that any asynchronization of the buffer properly
1062 *	clears B_DONE ( else a panic will occur later ).
1063 *
1064 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
1065 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
1066 *	should only be called if the buffer is known-good.
1067 *
1068 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1069 *	count.
1070 *
1071 *	The buffer must be on QUEUE_NONE.
1072 */
1073void
1074bdirty(struct buf *bp)
1075{
1076
1077	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
1078	    bp, bp->b_vp, bp->b_flags);
1079	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1080	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1081	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1082	BUF_ASSERT_HELD(bp);
1083	bp->b_flags &= ~(B_RELBUF);
1084	bp->b_iocmd = BIO_WRITE;
1085
1086	if ((bp->b_flags & B_DELWRI) == 0) {
1087		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
1088		reassignbuf(bp);
1089		atomic_add_int(&numdirtybuffers, 1);
1090		bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1091	}
1092}
1093
1094/*
1095 *	bundirty:
1096 *
1097 *	Clear B_DELWRI for buffer.
1098 *
1099 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1100 *	count.
1101 *
1102 *	The buffer must be on QUEUE_NONE.
1103 */
1104
1105void
1106bundirty(struct buf *bp)
1107{
1108
1109	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1110	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1111	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1112	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1113	BUF_ASSERT_HELD(bp);
1114
1115	if (bp->b_flags & B_DELWRI) {
1116		bp->b_flags &= ~B_DELWRI;
1117		reassignbuf(bp);
1118		atomic_subtract_int(&numdirtybuffers, 1);
1119		numdirtywakeup(lodirtybuffers);
1120	}
1121	/*
1122	 * Since it is now being written, we can clear its deferred write flag.
1123	 */
1124	bp->b_flags &= ~B_DEFERRED;
1125}
1126
1127/*
1128 *	bawrite:
1129 *
1130 *	Asynchronous write.  Start output on a buffer, but do not wait for
1131 *	it to complete.  The buffer is released when the output completes.
1132 *
1133 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1134 *	B_INVAL buffers.  Not us.
1135 */
1136void
1137bawrite(struct buf *bp)
1138{
1139
1140	bp->b_flags |= B_ASYNC;
1141	(void) bwrite(bp);
1142}
1143
1144/*
1145 *	bwillwrite:
1146 *
1147 *	Called prior to the locking of any vnodes when we are expecting to
1148 *	write.  We do not want to starve the buffer cache with too many
1149 *	dirty buffers so we block here.  By blocking prior to the locking
1150 *	of any vnodes we attempt to avoid the situation where a locked vnode
1151 *	prevents the various system daemons from flushing related buffers.
1152 */
1153
1154void
1155bwillwrite(void)
1156{
1157
1158	if (numdirtybuffers >= hidirtybuffers) {
1159		mtx_lock(&nblock);
1160		while (numdirtybuffers >= hidirtybuffers) {
1161			bd_wakeup(1);
1162			needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
1163			msleep(&needsbuffer, &nblock,
1164			    (PRIBIO + 4), "flswai", 0);
1165		}
1166		mtx_unlock(&nblock);
1167	}
1168}
1169
1170/*
1171 * Return true if we have too many dirty buffers.
1172 */
1173int
1174buf_dirty_count_severe(void)
1175{
1176
1177	return(numdirtybuffers >= hidirtybuffers);
1178}
1179
1180static __noinline int
1181buf_vm_page_count_severe(void)
1182{
1183
1184	KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1);
1185
1186	return vm_page_count_severe();
1187}
1188
1189/*
1190 *	brelse:
1191 *
1192 *	Release a busy buffer and, if requested, free its resources.  The
1193 *	buffer will be stashed in the appropriate bufqueue[] allowing it
1194 *	to be accessed later as a cache entity or reused for other purposes.
1195 */
1196void
1197brelse(struct buf *bp)
1198{
1199	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
1200	    bp, bp->b_vp, bp->b_flags);
1201	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1202	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1203
1204	if (bp->b_flags & B_MANAGED) {
1205		bqrelse(bp);
1206		return;
1207	}
1208
1209	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
1210	    bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
1211		/*
1212		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
1213		 * pages from being scrapped.  If the error is anything
1214		 * other than an I/O error (EIO), assume that retrying
1215		 * is futile.
1216		 */
1217		bp->b_ioflags &= ~BIO_ERROR;
1218		bdirty(bp);
1219	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1220	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
1221		/*
1222		 * Either a failed I/O or we were asked to free or not
1223		 * cache the buffer.
1224		 */
1225		bp->b_flags |= B_INVAL;
1226		if (!LIST_EMPTY(&bp->b_dep))
1227			buf_deallocate(bp);
1228		if (bp->b_flags & B_DELWRI) {
1229			atomic_subtract_int(&numdirtybuffers, 1);
1230			numdirtywakeup(lodirtybuffers);
1231		}
1232		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1233		if ((bp->b_flags & B_VMIO) == 0) {
1234			if (bp->b_bufsize)
1235				allocbuf(bp, 0);
1236			if (bp->b_vp)
1237				brelvp(bp);
1238		}
1239	}
1240
1241	/*
1242	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
1243	 * is called with B_DELWRI set, the underlying pages may wind up
1244	 * getting freed causing a previous write (bdwrite()) to get 'lost'
1245	 * because pages associated with a B_DELWRI bp are marked clean.
1246	 *
1247	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1248	 * if B_DELWRI is set.
1249	 *
1250	 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1251	 * on pages to return pages to the VM page queues.
1252	 */
1253	if (bp->b_flags & B_DELWRI)
1254		bp->b_flags &= ~B_RELBUF;
1255	else if (buf_vm_page_count_severe()) {
1256		/*
1257		 * The locking of the BO_LOCK is not necessary since
1258		 * BKGRDINPROG cannot be set while we hold the buf
1259		 * lock, it can only be cleared if it is already
1260		 * pending.
1261		 */
1262		if (bp->b_vp) {
1263			if (!(bp->b_vflags & BV_BKGRDINPROG))
1264				bp->b_flags |= B_RELBUF;
1265		} else
1266			bp->b_flags |= B_RELBUF;
1267	}
1268
1269	/*
1270	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
1271	 * constituted, not even NFS buffers now.  Two flags effect this.  If
1272	 * B_INVAL, the struct buf is invalidated but the VM object is kept
1273	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1274	 *
1275	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1276	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
1277	 * buffer is also B_INVAL because it hits the re-dirtying code above.
1278	 *
1279	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
1280	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1281	 * the commit state and we cannot afford to lose the buffer. If the
1282	 * buffer has a background write in progress, we need to keep it
1283	 * around to prevent it from being reconstituted and starting a second
1284	 * background write.
1285	 */
1286	if ((bp->b_flags & B_VMIO)
1287	    && !(bp->b_vp->v_mount != NULL &&
1288		 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1289		 !vn_isdisk(bp->b_vp, NULL) &&
1290		 (bp->b_flags & B_DELWRI))
1291	    ) {
1292
1293		int i, j, resid;
1294		vm_page_t m;
1295		off_t foff;
1296		vm_pindex_t poff;
1297		vm_object_t obj;
1298
1299		obj = bp->b_bufobj->bo_object;
1300
1301		/*
1302		 * Get the base offset and length of the buffer.  Note that
1303		 * in the VMIO case if the buffer block size is not
1304		 * page-aligned then b_data pointer may not be page-aligned.
1305		 * But our b_pages[] array *IS* page aligned.
1306		 *
1307		 * block sizes less then DEV_BSIZE (usually 512) are not
1308		 * supported due to the page granularity bits (m->valid,
1309		 * m->dirty, etc...).
1310		 *
1311		 * See man buf(9) for more information
1312		 */
1313		resid = bp->b_bufsize;
1314		foff = bp->b_offset;
1315		VM_OBJECT_LOCK(obj);
1316		for (i = 0; i < bp->b_npages; i++) {
1317			int had_bogus = 0;
1318
1319			m = bp->b_pages[i];
1320
1321			/*
1322			 * If we hit a bogus page, fixup *all* the bogus pages
1323			 * now.
1324			 */
1325			if (m == bogus_page) {
1326				poff = OFF_TO_IDX(bp->b_offset);
1327				had_bogus = 1;
1328
1329				for (j = i; j < bp->b_npages; j++) {
1330					vm_page_t mtmp;
1331					mtmp = bp->b_pages[j];
1332					if (mtmp == bogus_page) {
1333						mtmp = vm_page_lookup(obj, poff + j);
1334						if (!mtmp) {
1335							panic("brelse: page missing\n");
1336						}
1337						bp->b_pages[j] = mtmp;
1338					}
1339				}
1340
1341				if ((bp->b_flags & B_INVAL) == 0) {
1342					pmap_qenter(
1343					    trunc_page((vm_offset_t)bp->b_data),
1344					    bp->b_pages, bp->b_npages);
1345				}
1346				m = bp->b_pages[i];
1347			}
1348			if ((bp->b_flags & B_NOCACHE) ||
1349			    (bp->b_ioflags & BIO_ERROR &&
1350			     bp->b_iocmd == BIO_READ)) {
1351				int poffset = foff & PAGE_MASK;
1352				int presid = resid > (PAGE_SIZE - poffset) ?
1353					(PAGE_SIZE - poffset) : resid;
1354
1355				KASSERT(presid >= 0, ("brelse: extra page"));
1356				vm_page_lock_queues();
1357				vm_page_set_invalid(m, poffset, presid);
1358				vm_page_unlock_queues();
1359				if (had_bogus)
1360					printf("avoided corruption bug in bogus_page/brelse code\n");
1361			}
1362			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1363			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1364		}
1365		VM_OBJECT_UNLOCK(obj);
1366		if (bp->b_flags & (B_INVAL | B_RELBUF))
1367			vfs_vmio_release(bp);
1368
1369	} else if (bp->b_flags & B_VMIO) {
1370
1371		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1372			vfs_vmio_release(bp);
1373		}
1374
1375	} else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
1376		if (bp->b_bufsize != 0)
1377			allocbuf(bp, 0);
1378		if (bp->b_vp != NULL)
1379			brelvp(bp);
1380	}
1381
1382	if (BUF_LOCKRECURSED(bp)) {
1383		/* do not release to free list */
1384		BUF_UNLOCK(bp);
1385		return;
1386	}
1387
1388	/* enqueue */
1389	mtx_lock(&bqlock);
1390	/* Handle delayed bremfree() processing. */
1391	if (bp->b_flags & B_REMFREE)
1392		bremfreel(bp);
1393	if (bp->b_qindex != QUEUE_NONE)
1394		panic("brelse: free buffer onto another queue???");
1395
1396	/*
1397	 * If the buffer has junk contents signal it and eventually
1398	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
1399	 * doesn't find it.
1400	 */
1401	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
1402	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
1403		bp->b_flags |= B_INVAL;
1404	if (bp->b_flags & B_INVAL) {
1405		if (bp->b_flags & B_DELWRI)
1406			bundirty(bp);
1407		if (bp->b_vp)
1408			brelvp(bp);
1409	}
1410
1411	/* buffers with no memory */
1412	if (bp->b_bufsize == 0) {
1413		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1414		if (bp->b_vflags & BV_BKGRDINPROG)
1415			panic("losing buffer 1");
1416		if (bp->b_kvasize) {
1417			bp->b_qindex = QUEUE_EMPTYKVA;
1418		} else {
1419			bp->b_qindex = QUEUE_EMPTY;
1420		}
1421		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1422	/* buffers with junk contents */
1423	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1424	    (bp->b_ioflags & BIO_ERROR)) {
1425		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1426		if (bp->b_vflags & BV_BKGRDINPROG)
1427			panic("losing buffer 2");
1428		bp->b_qindex = QUEUE_CLEAN;
1429		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1430	/* remaining buffers */
1431	} else {
1432		if ((bp->b_flags & (B_DELWRI|B_NEEDSGIANT)) ==
1433		    (B_DELWRI|B_NEEDSGIANT))
1434			bp->b_qindex = QUEUE_DIRTY_GIANT;
1435		else if (bp->b_flags & B_DELWRI)
1436			bp->b_qindex = QUEUE_DIRTY;
1437		else
1438			bp->b_qindex = QUEUE_CLEAN;
1439		if (bp->b_flags & B_AGE)
1440			TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1441		else
1442			TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1443	}
1444	mtx_unlock(&bqlock);
1445
1446	/*
1447	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
1448	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
1449	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1450	 * if B_INVAL is set ).
1451	 */
1452
1453	if (!(bp->b_flags & B_DELWRI))
1454		bufcountwakeup();
1455
1456	/*
1457	 * Something we can maybe free or reuse
1458	 */
1459	if (bp->b_bufsize || bp->b_kvasize)
1460		bufspacewakeup();
1461
1462	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
1463	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1464		panic("brelse: not dirty");
1465	/* unlock */
1466	BUF_UNLOCK(bp);
1467}
1468
1469/*
1470 * Release a buffer back to the appropriate queue but do not try to free
1471 * it.  The buffer is expected to be used again soon.
1472 *
1473 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1474 * biodone() to requeue an async I/O on completion.  It is also used when
1475 * known good buffers need to be requeued but we think we may need the data
1476 * again soon.
1477 *
1478 * XXX we should be able to leave the B_RELBUF hint set on completion.
1479 */
1480void
1481bqrelse(struct buf *bp)
1482{
1483	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1484	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1485	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1486
1487	if (BUF_LOCKRECURSED(bp)) {
1488		/* do not release to free list */
1489		BUF_UNLOCK(bp);
1490		return;
1491	}
1492
1493	if (bp->b_flags & B_MANAGED) {
1494		if (bp->b_flags & B_REMFREE) {
1495			mtx_lock(&bqlock);
1496			bremfreel(bp);
1497			mtx_unlock(&bqlock);
1498		}
1499		bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1500		BUF_UNLOCK(bp);
1501		return;
1502	}
1503
1504	mtx_lock(&bqlock);
1505	/* Handle delayed bremfree() processing. */
1506	if (bp->b_flags & B_REMFREE)
1507		bremfreel(bp);
1508	if (bp->b_qindex != QUEUE_NONE)
1509		panic("bqrelse: free buffer onto another queue???");
1510	/* buffers with stale but valid contents */
1511	if (bp->b_flags & B_DELWRI) {
1512		if (bp->b_flags & B_NEEDSGIANT)
1513			bp->b_qindex = QUEUE_DIRTY_GIANT;
1514		else
1515			bp->b_qindex = QUEUE_DIRTY;
1516		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1517	} else {
1518		/*
1519		 * The locking of the BO_LOCK for checking of the
1520		 * BV_BKGRDINPROG is not necessary since the
1521		 * BV_BKGRDINPROG cannot be set while we hold the buf
1522		 * lock, it can only be cleared if it is already
1523		 * pending.
1524		 */
1525		if (!buf_vm_page_count_severe() || (bp->b_vflags & BV_BKGRDINPROG)) {
1526			bp->b_qindex = QUEUE_CLEAN;
1527			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
1528			    b_freelist);
1529		} else {
1530			/*
1531			 * We are too low on memory, we have to try to free
1532			 * the buffer (most importantly: the wired pages
1533			 * making up its backing store) *now*.
1534			 */
1535			mtx_unlock(&bqlock);
1536			brelse(bp);
1537			return;
1538		}
1539	}
1540	mtx_unlock(&bqlock);
1541
1542	if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
1543		bufcountwakeup();
1544
1545	/*
1546	 * Something we can maybe free or reuse.
1547	 */
1548	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1549		bufspacewakeup();
1550
1551	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1552	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1553		panic("bqrelse: not dirty");
1554	/* unlock */
1555	BUF_UNLOCK(bp);
1556}
1557
1558/* Give pages used by the bp back to the VM system (where possible) */
1559static void
1560vfs_vmio_release(struct buf *bp)
1561{
1562	int i;
1563	vm_page_t m;
1564
1565	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
1566	for (i = 0; i < bp->b_npages; i++) {
1567		m = bp->b_pages[i];
1568		bp->b_pages[i] = NULL;
1569		/*
1570		 * In order to keep page LRU ordering consistent, put
1571		 * everything on the inactive queue.
1572		 */
1573		vm_page_lock(m);
1574		vm_page_lock_queues();
1575		vm_page_unwire(m, 0);
1576		/*
1577		 * We don't mess with busy pages, it is
1578		 * the responsibility of the process that
1579		 * busied the pages to deal with them.
1580		 */
1581		if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
1582		    m->wire_count == 0) {
1583			/*
1584			 * Might as well free the page if we can and it has
1585			 * no valid data.  We also free the page if the
1586			 * buffer was used for direct I/O
1587			 */
1588			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
1589			    m->hold_count == 0) {
1590				vm_page_free(m);
1591			} else if (bp->b_flags & B_DIRECT) {
1592				vm_page_try_to_free(m);
1593			} else if (buf_vm_page_count_severe()) {
1594				vm_page_try_to_cache(m);
1595			}
1596		}
1597		vm_page_unlock_queues();
1598		vm_page_unlock(m);
1599	}
1600	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
1601	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1602
1603	if (bp->b_bufsize) {
1604		bufspacewakeup();
1605		bp->b_bufsize = 0;
1606	}
1607	bp->b_npages = 0;
1608	bp->b_flags &= ~B_VMIO;
1609	if (bp->b_vp)
1610		brelvp(bp);
1611}
1612
1613/*
1614 * Check to see if a block at a particular lbn is available for a clustered
1615 * write.
1616 */
1617static int
1618vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
1619{
1620	struct buf *bpa;
1621	int match;
1622
1623	match = 0;
1624
1625	/* If the buf isn't in core skip it */
1626	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
1627		return (0);
1628
1629	/* If the buf is busy we don't want to wait for it */
1630	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1631		return (0);
1632
1633	/* Only cluster with valid clusterable delayed write buffers */
1634	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
1635	    (B_DELWRI | B_CLUSTEROK))
1636		goto done;
1637
1638	if (bpa->b_bufsize != size)
1639		goto done;
1640
1641	/*
1642	 * Check to see if it is in the expected place on disk and that the
1643	 * block has been mapped.
1644	 */
1645	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
1646		match = 1;
1647done:
1648	BUF_UNLOCK(bpa);
1649	return (match);
1650}
1651
1652/*
1653 *	vfs_bio_awrite:
1654 *
1655 *	Implement clustered async writes for clearing out B_DELWRI buffers.
1656 *	This is much better then the old way of writing only one buffer at
1657 *	a time.  Note that we may not be presented with the buffers in the
1658 *	correct order, so we search for the cluster in both directions.
1659 */
1660int
1661vfs_bio_awrite(struct buf *bp)
1662{
1663	struct bufobj *bo;
1664	int i;
1665	int j;
1666	daddr_t lblkno = bp->b_lblkno;
1667	struct vnode *vp = bp->b_vp;
1668	int ncl;
1669	int nwritten;
1670	int size;
1671	int maxcl;
1672
1673	bo = &vp->v_bufobj;
1674	/*
1675	 * right now we support clustered writing only to regular files.  If
1676	 * we find a clusterable block we could be in the middle of a cluster
1677	 * rather then at the beginning.
1678	 */
1679	if ((vp->v_type == VREG) &&
1680	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1681	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1682
1683		size = vp->v_mount->mnt_stat.f_iosize;
1684		maxcl = MAXPHYS / size;
1685
1686		BO_LOCK(bo);
1687		for (i = 1; i < maxcl; i++)
1688			if (vfs_bio_clcheck(vp, size, lblkno + i,
1689			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
1690				break;
1691
1692		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
1693			if (vfs_bio_clcheck(vp, size, lblkno - j,
1694			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
1695				break;
1696		BO_UNLOCK(bo);
1697		--j;
1698		ncl = i + j;
1699		/*
1700		 * this is a possible cluster write
1701		 */
1702		if (ncl != 1) {
1703			BUF_UNLOCK(bp);
1704			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
1705			return nwritten;
1706		}
1707	}
1708	bremfree(bp);
1709	bp->b_flags |= B_ASYNC;
1710	/*
1711	 * default (old) behavior, writing out only one block
1712	 *
1713	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1714	 */
1715	nwritten = bp->b_bufsize;
1716	(void) bwrite(bp);
1717
1718	return nwritten;
1719}
1720
1721/*
1722 *	getnewbuf:
1723 *
1724 *	Find and initialize a new buffer header, freeing up existing buffers
1725 *	in the bufqueues as necessary.  The new buffer is returned locked.
1726 *
1727 *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1728 *	buffer away, the caller must set B_INVAL prior to calling brelse().
1729 *
1730 *	We block if:
1731 *		We have insufficient buffer headers
1732 *		We have insufficient buffer space
1733 *		buffer_map is too fragmented ( space reservation fails )
1734 *		If we have to flush dirty buffers ( but we try to avoid this )
1735 *
1736 *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1737 *	Instead we ask the buf daemon to do it for us.  We attempt to
1738 *	avoid piecemeal wakeups of the pageout daemon.
1739 */
1740
1741static struct buf *
1742getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
1743    int gbflags)
1744{
1745	struct thread *td;
1746	struct buf *bp;
1747	struct buf *nbp;
1748	int defrag = 0;
1749	int nqindex;
1750	static int flushingbufs;
1751
1752	td = curthread;
1753	/*
1754	 * We can't afford to block since we might be holding a vnode lock,
1755	 * which may prevent system daemons from running.  We deal with
1756	 * low-memory situations by proactively returning memory and running
1757	 * async I/O rather then sync I/O.
1758	 */
1759	atomic_add_int(&getnewbufcalls, 1);
1760	atomic_subtract_int(&getnewbufrestarts, 1);
1761restart:
1762	atomic_add_int(&getnewbufrestarts, 1);
1763
1764	/*
1765	 * Setup for scan.  If we do not have enough free buffers,
1766	 * we setup a degenerate case that immediately fails.  Note
1767	 * that if we are specially marked process, we are allowed to
1768	 * dip into our reserves.
1769	 *
1770	 * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
1771	 *
1772	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
1773	 * However, there are a number of cases (defragging, reusing, ...)
1774	 * where we cannot backup.
1775	 */
1776	mtx_lock(&bqlock);
1777	nqindex = QUEUE_EMPTYKVA;
1778	nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
1779
1780	if (nbp == NULL) {
1781		/*
1782		 * If no EMPTYKVA buffers and we are either
1783		 * defragging or reusing, locate a CLEAN buffer
1784		 * to free or reuse.  If bufspace useage is low
1785		 * skip this step so we can allocate a new buffer.
1786		 */
1787		if (defrag || bufspace >= lobufspace) {
1788			nqindex = QUEUE_CLEAN;
1789			nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
1790		}
1791
1792		/*
1793		 * If we could not find or were not allowed to reuse a
1794		 * CLEAN buffer, check to see if it is ok to use an EMPTY
1795		 * buffer.  We can only use an EMPTY buffer if allocating
1796		 * its KVA would not otherwise run us out of buffer space.
1797		 */
1798		if (nbp == NULL && defrag == 0 &&
1799		    bufspace + maxsize < hibufspace) {
1800			nqindex = QUEUE_EMPTY;
1801			nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1802		}
1803	}
1804
1805	/*
1806	 * Run scan, possibly freeing data and/or kva mappings on the fly
1807	 * depending.
1808	 */
1809
1810	while ((bp = nbp) != NULL) {
1811		int qindex = nqindex;
1812
1813		/*
1814		 * Calculate next bp ( we can only use it if we do not block
1815		 * or do other fancy things ).
1816		 */
1817		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1818			switch(qindex) {
1819			case QUEUE_EMPTY:
1820				nqindex = QUEUE_EMPTYKVA;
1821				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
1822					break;
1823				/* FALLTHROUGH */
1824			case QUEUE_EMPTYKVA:
1825				nqindex = QUEUE_CLEAN;
1826				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
1827					break;
1828				/* FALLTHROUGH */
1829			case QUEUE_CLEAN:
1830				/*
1831				 * nbp is NULL.
1832				 */
1833				break;
1834			}
1835		}
1836		/*
1837		 * If we are defragging then we need a buffer with
1838		 * b_kvasize != 0.  XXX this situation should no longer
1839		 * occur, if defrag is non-zero the buffer's b_kvasize
1840		 * should also be non-zero at this point.  XXX
1841		 */
1842		if (defrag && bp->b_kvasize == 0) {
1843			printf("Warning: defrag empty buffer %p\n", bp);
1844			continue;
1845		}
1846
1847		/*
1848		 * Start freeing the bp.  This is somewhat involved.  nbp
1849		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1850		 */
1851		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1852			continue;
1853		if (bp->b_vp) {
1854			BO_LOCK(bp->b_bufobj);
1855			if (bp->b_vflags & BV_BKGRDINPROG) {
1856				BO_UNLOCK(bp->b_bufobj);
1857				BUF_UNLOCK(bp);
1858				continue;
1859			}
1860			BO_UNLOCK(bp->b_bufobj);
1861		}
1862		CTR6(KTR_BUF,
1863		    "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
1864		    "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
1865		    bp->b_kvasize, bp->b_bufsize, qindex);
1866
1867		/*
1868		 * Sanity Checks
1869		 */
1870		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1871
1872		/*
1873		 * Note: we no longer distinguish between VMIO and non-VMIO
1874		 * buffers.
1875		 */
1876
1877		KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
1878
1879		bremfreel(bp);
1880		mtx_unlock(&bqlock);
1881
1882		if (qindex == QUEUE_CLEAN) {
1883			if (bp->b_flags & B_VMIO) {
1884				bp->b_flags &= ~B_ASYNC;
1885				vfs_vmio_release(bp);
1886			}
1887			if (bp->b_vp)
1888				brelvp(bp);
1889		}
1890
1891		/*
1892		 * NOTE:  nbp is now entirely invalid.  We can only restart
1893		 * the scan from this point on.
1894		 *
1895		 * Get the rest of the buffer freed up.  b_kva* is still
1896		 * valid after this operation.
1897		 */
1898
1899		if (bp->b_rcred != NOCRED) {
1900			crfree(bp->b_rcred);
1901			bp->b_rcred = NOCRED;
1902		}
1903		if (bp->b_wcred != NOCRED) {
1904			crfree(bp->b_wcred);
1905			bp->b_wcred = NOCRED;
1906		}
1907		if (!LIST_EMPTY(&bp->b_dep))
1908			buf_deallocate(bp);
1909		if (bp->b_vflags & BV_BKGRDINPROG)
1910			panic("losing buffer 3");
1911		KASSERT(bp->b_vp == NULL,
1912		    ("bp: %p still has vnode %p.  qindex: %d",
1913		    bp, bp->b_vp, qindex));
1914		KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1915		   ("bp: %p still on a buffer list. xflags %X",
1916		    bp, bp->b_xflags));
1917
1918		if (bp->b_bufsize)
1919			allocbuf(bp, 0);
1920
1921		bp->b_flags = 0;
1922		bp->b_ioflags = 0;
1923		bp->b_xflags = 0;
1924		bp->b_vflags = 0;
1925		bp->b_vp = NULL;
1926		bp->b_blkno = bp->b_lblkno = 0;
1927		bp->b_offset = NOOFFSET;
1928		bp->b_iodone = 0;
1929		bp->b_error = 0;
1930		bp->b_resid = 0;
1931		bp->b_bcount = 0;
1932		bp->b_npages = 0;
1933		bp->b_dirtyoff = bp->b_dirtyend = 0;
1934		bp->b_bufobj = NULL;
1935		bp->b_pin_count = 0;
1936		bp->b_fsprivate1 = NULL;
1937		bp->b_fsprivate2 = NULL;
1938		bp->b_fsprivate3 = NULL;
1939
1940		LIST_INIT(&bp->b_dep);
1941
1942		/*
1943		 * If we are defragging then free the buffer.
1944		 */
1945		if (defrag) {
1946			bp->b_flags |= B_INVAL;
1947			bfreekva(bp);
1948			brelse(bp);
1949			defrag = 0;
1950			goto restart;
1951		}
1952
1953		/*
1954		 * Notify any waiters for the buffer lock about
1955		 * identity change by freeing the buffer.
1956		 */
1957		if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
1958			bp->b_flags |= B_INVAL;
1959			bfreekva(bp);
1960			brelse(bp);
1961			goto restart;
1962		}
1963
1964		/*
1965		 * If we are overcomitted then recover the buffer and its
1966		 * KVM space.  This occurs in rare situations when multiple
1967		 * processes are blocked in getnewbuf() or allocbuf().
1968		 */
1969		if (bufspace >= hibufspace)
1970			flushingbufs = 1;
1971		if (flushingbufs && bp->b_kvasize != 0) {
1972			bp->b_flags |= B_INVAL;
1973			bfreekva(bp);
1974			brelse(bp);
1975			goto restart;
1976		}
1977		if (bufspace < lobufspace)
1978			flushingbufs = 0;
1979		break;
1980	}
1981
1982	/*
1983	 * If we exhausted our list, sleep as appropriate.  We may have to
1984	 * wakeup various daemons and write out some dirty buffers.
1985	 *
1986	 * Generally we are sleeping due to insufficient buffer space.
1987	 */
1988
1989	if (bp == NULL) {
1990		int flags, norunbuf;
1991		char *waitmsg;
1992		int fl;
1993
1994		if (defrag) {
1995			flags = VFS_BIO_NEED_BUFSPACE;
1996			waitmsg = "nbufkv";
1997		} else if (bufspace >= hibufspace) {
1998			waitmsg = "nbufbs";
1999			flags = VFS_BIO_NEED_BUFSPACE;
2000		} else {
2001			waitmsg = "newbuf";
2002			flags = VFS_BIO_NEED_ANY;
2003		}
2004		mtx_lock(&nblock);
2005		needsbuffer |= flags;
2006		mtx_unlock(&nblock);
2007		mtx_unlock(&bqlock);
2008
2009		bd_speedup();	/* heeeelp */
2010		if (gbflags & GB_NOWAIT_BD)
2011			return (NULL);
2012
2013		mtx_lock(&nblock);
2014		while (needsbuffer & flags) {
2015			if (vp != NULL && (td->td_pflags & TDP_BUFNEED) == 0) {
2016				mtx_unlock(&nblock);
2017				/*
2018				 * getblk() is called with a vnode
2019				 * locked, and some majority of the
2020				 * dirty buffers may as well belong to
2021				 * the vnode. Flushing the buffers
2022				 * there would make a progress that
2023				 * cannot be achieved by the
2024				 * buf_daemon, that cannot lock the
2025				 * vnode.
2026				 */
2027				norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
2028				    (td->td_pflags & TDP_NORUNNINGBUF);
2029				/* play bufdaemon */
2030				td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
2031				fl = buf_do_flush(vp);
2032				td->td_pflags &= norunbuf;
2033				mtx_lock(&nblock);
2034				if (fl != 0)
2035					continue;
2036				if ((needsbuffer & flags) == 0)
2037					break;
2038			}
2039			if (msleep(&needsbuffer, &nblock,
2040			    (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
2041				mtx_unlock(&nblock);
2042				return (NULL);
2043			}
2044		}
2045		mtx_unlock(&nblock);
2046	} else {
2047		/*
2048		 * We finally have a valid bp.  We aren't quite out of the
2049		 * woods, we still have to reserve kva space.  In order
2050		 * to keep fragmentation sane we only allocate kva in
2051		 * BKVASIZE chunks.
2052		 */
2053		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2054
2055		if (maxsize != bp->b_kvasize) {
2056			vm_offset_t addr = 0;
2057
2058			bfreekva(bp);
2059
2060			vm_map_lock(buffer_map);
2061			if (vm_map_findspace(buffer_map,
2062				vm_map_min(buffer_map), maxsize, &addr)) {
2063				/*
2064				 * Uh oh.  Buffer map is to fragmented.  We
2065				 * must defragment the map.
2066				 */
2067				atomic_add_int(&bufdefragcnt, 1);
2068				vm_map_unlock(buffer_map);
2069				defrag = 1;
2070				bp->b_flags |= B_INVAL;
2071				brelse(bp);
2072				goto restart;
2073			}
2074			if (addr) {
2075				vm_map_insert(buffer_map, NULL, 0,
2076					addr, addr + maxsize,
2077					VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
2078
2079				bp->b_kvabase = (caddr_t) addr;
2080				bp->b_kvasize = maxsize;
2081				atomic_add_long(&bufspace, bp->b_kvasize);
2082				atomic_add_int(&bufreusecnt, 1);
2083			}
2084			vm_map_unlock(buffer_map);
2085		}
2086		bp->b_saveaddr = bp->b_kvabase;
2087		bp->b_data = bp->b_saveaddr;
2088	}
2089	return(bp);
2090}
2091
2092/*
2093 *	buf_daemon:
2094 *
2095 *	buffer flushing daemon.  Buffers are normally flushed by the
2096 *	update daemon but if it cannot keep up this process starts to
2097 *	take the load in an attempt to prevent getnewbuf() from blocking.
2098 */
2099
2100static struct kproc_desc buf_kp = {
2101	"bufdaemon",
2102	buf_daemon,
2103	&bufdaemonproc
2104};
2105SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2106
2107static int
2108buf_do_flush(struct vnode *vp)
2109{
2110	int flushed;
2111
2112	flushed = flushbufqueues(vp, QUEUE_DIRTY, 0);
2113	/* The list empty check here is slightly racy */
2114	if (!TAILQ_EMPTY(&bufqueues[QUEUE_DIRTY_GIANT])) {
2115		mtx_lock(&Giant);
2116		flushed += flushbufqueues(vp, QUEUE_DIRTY_GIANT, 0);
2117		mtx_unlock(&Giant);
2118	}
2119	if (flushed == 0) {
2120		/*
2121		 * Could not find any buffers without rollback
2122		 * dependencies, so just write the first one
2123		 * in the hopes of eventually making progress.
2124		 */
2125		flushbufqueues(vp, QUEUE_DIRTY, 1);
2126		if (!TAILQ_EMPTY(
2127			    &bufqueues[QUEUE_DIRTY_GIANT])) {
2128			mtx_lock(&Giant);
2129			flushbufqueues(vp, QUEUE_DIRTY_GIANT, 1);
2130			mtx_unlock(&Giant);
2131		}
2132	}
2133	return (flushed);
2134}
2135
2136static void
2137buf_daemon()
2138{
2139	int lodirtysave;
2140
2141	/*
2142	 * This process needs to be suspended prior to shutdown sync.
2143	 */
2144	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
2145	    SHUTDOWN_PRI_LAST);
2146
2147	/*
2148	 * This process is allowed to take the buffer cache to the limit
2149	 */
2150	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
2151	mtx_lock(&bdlock);
2152	for (;;) {
2153		bd_request = 0;
2154		mtx_unlock(&bdlock);
2155
2156		kproc_suspend_check(bufdaemonproc);
2157		lodirtysave = lodirtybuffers;
2158		if (bd_speedupreq) {
2159			lodirtybuffers = numdirtybuffers / 2;
2160			bd_speedupreq = 0;
2161		}
2162		/*
2163		 * Do the flush.  Limit the amount of in-transit I/O we
2164		 * allow to build up, otherwise we would completely saturate
2165		 * the I/O system.  Wakeup any waiting processes before we
2166		 * normally would so they can run in parallel with our drain.
2167		 */
2168		while (numdirtybuffers > lodirtybuffers) {
2169			if (buf_do_flush(NULL) == 0)
2170				break;
2171			uio_yield();
2172		}
2173		lodirtybuffers = lodirtysave;
2174
2175		/*
2176		 * Only clear bd_request if we have reached our low water
2177		 * mark.  The buf_daemon normally waits 1 second and
2178		 * then incrementally flushes any dirty buffers that have
2179		 * built up, within reason.
2180		 *
2181		 * If we were unable to hit our low water mark and couldn't
2182		 * find any flushable buffers, we sleep half a second.
2183		 * Otherwise we loop immediately.
2184		 */
2185		mtx_lock(&bdlock);
2186		if (numdirtybuffers <= lodirtybuffers) {
2187			/*
2188			 * We reached our low water mark, reset the
2189			 * request and sleep until we are needed again.
2190			 * The sleep is just so the suspend code works.
2191			 */
2192			bd_request = 0;
2193			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
2194		} else {
2195			/*
2196			 * We couldn't find any flushable dirty buffers but
2197			 * still have too many dirty buffers, we
2198			 * have to sleep and try again.  (rare)
2199			 */
2200			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
2201		}
2202	}
2203}
2204
2205/*
2206 *	flushbufqueues:
2207 *
2208 *	Try to flush a buffer in the dirty queue.  We must be careful to
2209 *	free up B_INVAL buffers instead of write them, which NFS is
2210 *	particularly sensitive to.
2211 */
2212static int flushwithdeps = 0;
2213SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
2214    0, "Number of buffers flushed with dependecies that require rollbacks");
2215
2216static int
2217flushbufqueues(struct vnode *lvp, int queue, int flushdeps)
2218{
2219	struct buf *sentinel;
2220	struct vnode *vp;
2221	struct mount *mp;
2222	struct buf *bp;
2223	int hasdeps;
2224	int flushed;
2225	int target;
2226
2227	if (lvp == NULL) {
2228		target = numdirtybuffers - lodirtybuffers;
2229		if (flushdeps && target > 2)
2230			target /= 2;
2231	} else
2232		target = flushbufqtarget;
2233	flushed = 0;
2234	bp = NULL;
2235	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
2236	sentinel->b_qindex = QUEUE_SENTINEL;
2237	mtx_lock(&bqlock);
2238	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
2239	while (flushed != target) {
2240		bp = TAILQ_NEXT(sentinel, b_freelist);
2241		if (bp != NULL) {
2242			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2243			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
2244			    b_freelist);
2245		} else
2246			break;
2247		/*
2248		 * Skip sentinels inserted by other invocations of the
2249		 * flushbufqueues(), taking care to not reorder them.
2250		 */
2251		if (bp->b_qindex == QUEUE_SENTINEL)
2252			continue;
2253		/*
2254		 * Only flush the buffers that belong to the
2255		 * vnode locked by the curthread.
2256		 */
2257		if (lvp != NULL && bp->b_vp != lvp)
2258			continue;
2259		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2260			continue;
2261		if (bp->b_pin_count > 0) {
2262			BUF_UNLOCK(bp);
2263			continue;
2264		}
2265		BO_LOCK(bp->b_bufobj);
2266		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
2267		    (bp->b_flags & B_DELWRI) == 0) {
2268			BO_UNLOCK(bp->b_bufobj);
2269			BUF_UNLOCK(bp);
2270			continue;
2271		}
2272		BO_UNLOCK(bp->b_bufobj);
2273		if (bp->b_flags & B_INVAL) {
2274			bremfreel(bp);
2275			mtx_unlock(&bqlock);
2276			brelse(bp);
2277			flushed++;
2278			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2279			mtx_lock(&bqlock);
2280			continue;
2281		}
2282
2283		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
2284			if (flushdeps == 0) {
2285				BUF_UNLOCK(bp);
2286				continue;
2287			}
2288			hasdeps = 1;
2289		} else
2290			hasdeps = 0;
2291		/*
2292		 * We must hold the lock on a vnode before writing
2293		 * one of its buffers. Otherwise we may confuse, or
2294		 * in the case of a snapshot vnode, deadlock the
2295		 * system.
2296		 *
2297		 * The lock order here is the reverse of the normal
2298		 * of vnode followed by buf lock.  This is ok because
2299		 * the NOWAIT will prevent deadlock.
2300		 */
2301		vp = bp->b_vp;
2302		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2303			BUF_UNLOCK(bp);
2304			continue;
2305		}
2306		if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_CANRECURSE) == 0) {
2307			mtx_unlock(&bqlock);
2308			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
2309			    bp, bp->b_vp, bp->b_flags);
2310			if (curproc == bufdaemonproc)
2311				vfs_bio_awrite(bp);
2312			else {
2313				bremfree(bp);
2314				bwrite(bp);
2315				notbufdflashes++;
2316			}
2317			vn_finished_write(mp);
2318			VOP_UNLOCK(vp, 0);
2319			flushwithdeps += hasdeps;
2320			flushed++;
2321
2322			/*
2323			 * Sleeping on runningbufspace while holding
2324			 * vnode lock leads to deadlock.
2325			 */
2326			if (curproc == bufdaemonproc)
2327				waitrunningbufspace();
2328			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2329			mtx_lock(&bqlock);
2330			continue;
2331		}
2332		vn_finished_write(mp);
2333		BUF_UNLOCK(bp);
2334	}
2335	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2336	mtx_unlock(&bqlock);
2337	free(sentinel, M_TEMP);
2338	return (flushed);
2339}
2340
2341/*
2342 * Check to see if a block is currently memory resident.
2343 */
2344struct buf *
2345incore(struct bufobj *bo, daddr_t blkno)
2346{
2347	struct buf *bp;
2348
2349	BO_LOCK(bo);
2350	bp = gbincore(bo, blkno);
2351	BO_UNLOCK(bo);
2352	return (bp);
2353}
2354
2355/*
2356 * Returns true if no I/O is needed to access the
2357 * associated VM object.  This is like incore except
2358 * it also hunts around in the VM system for the data.
2359 */
2360
2361static int
2362inmem(struct vnode * vp, daddr_t blkno)
2363{
2364	vm_object_t obj;
2365	vm_offset_t toff, tinc, size;
2366	vm_page_t m;
2367	vm_ooffset_t off;
2368
2369	ASSERT_VOP_LOCKED(vp, "inmem");
2370
2371	if (incore(&vp->v_bufobj, blkno))
2372		return 1;
2373	if (vp->v_mount == NULL)
2374		return 0;
2375	obj = vp->v_object;
2376	if (obj == NULL)
2377		return (0);
2378
2379	size = PAGE_SIZE;
2380	if (size > vp->v_mount->mnt_stat.f_iosize)
2381		size = vp->v_mount->mnt_stat.f_iosize;
2382	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2383
2384	VM_OBJECT_LOCK(obj);
2385	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2386		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2387		if (!m)
2388			goto notinmem;
2389		tinc = size;
2390		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2391			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2392		if (vm_page_is_valid(m,
2393		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2394			goto notinmem;
2395	}
2396	VM_OBJECT_UNLOCK(obj);
2397	return 1;
2398
2399notinmem:
2400	VM_OBJECT_UNLOCK(obj);
2401	return (0);
2402}
2403
2404/*
2405 *	vfs_setdirty:
2406 *
2407 *	Sets the dirty range for a buffer based on the status of the dirty
2408 *	bits in the pages comprising the buffer.
2409 *
2410 *	The range is limited to the size of the buffer.
2411 *
2412 *	This routine is primarily used by NFS, but is generalized for the
2413 *	B_VMIO case.
2414 */
2415static void
2416vfs_setdirty(struct buf *bp)
2417{
2418
2419	/*
2420	 * Degenerate case - empty buffer
2421	 */
2422	if (bp->b_bufsize == 0)
2423		return;
2424
2425	if ((bp->b_flags & B_VMIO) == 0)
2426		return;
2427
2428	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2429	vfs_setdirty_locked_object(bp);
2430	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2431}
2432
2433static void
2434vfs_setdirty_locked_object(struct buf *bp)
2435{
2436	vm_object_t object;
2437	int i;
2438
2439	object = bp->b_bufobj->bo_object;
2440	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2441
2442	/*
2443	 * We qualify the scan for modified pages on whether the
2444	 * object has been flushed yet.
2445	 */
2446	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
2447		vm_offset_t boffset;
2448		vm_offset_t eoffset;
2449
2450		vm_page_lock_queues();
2451		/*
2452		 * test the pages to see if they have been modified directly
2453		 * by users through the VM system.
2454		 */
2455		for (i = 0; i < bp->b_npages; i++)
2456			vm_page_test_dirty(bp->b_pages[i]);
2457
2458		/*
2459		 * Calculate the encompassing dirty range, boffset and eoffset,
2460		 * (eoffset - boffset) bytes.
2461		 */
2462
2463		for (i = 0; i < bp->b_npages; i++) {
2464			if (bp->b_pages[i]->dirty)
2465				break;
2466		}
2467		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2468
2469		for (i = bp->b_npages - 1; i >= 0; --i) {
2470			if (bp->b_pages[i]->dirty) {
2471				break;
2472			}
2473		}
2474		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2475
2476		vm_page_unlock_queues();
2477		/*
2478		 * Fit it to the buffer.
2479		 */
2480
2481		if (eoffset > bp->b_bcount)
2482			eoffset = bp->b_bcount;
2483
2484		/*
2485		 * If we have a good dirty range, merge with the existing
2486		 * dirty range.
2487		 */
2488
2489		if (boffset < eoffset) {
2490			if (bp->b_dirtyoff > boffset)
2491				bp->b_dirtyoff = boffset;
2492			if (bp->b_dirtyend < eoffset)
2493				bp->b_dirtyend = eoffset;
2494		}
2495	}
2496}
2497
2498/*
2499 *	getblk:
2500 *
2501 *	Get a block given a specified block and offset into a file/device.
2502 *	The buffers B_DONE bit will be cleared on return, making it almost
2503 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
2504 *	return.  The caller should clear B_INVAL prior to initiating a
2505 *	READ.
2506 *
2507 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2508 *	an existing buffer.
2509 *
2510 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
2511 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2512 *	and then cleared based on the backing VM.  If the previous buffer is
2513 *	non-0-sized but invalid, B_CACHE will be cleared.
2514 *
2515 *	If getblk() must create a new buffer, the new buffer is returned with
2516 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2517 *	case it is returned with B_INVAL clear and B_CACHE set based on the
2518 *	backing VM.
2519 *
2520 *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
2521 *	B_CACHE bit is clear.
2522 *
2523 *	What this means, basically, is that the caller should use B_CACHE to
2524 *	determine whether the buffer is fully valid or not and should clear
2525 *	B_INVAL prior to issuing a read.  If the caller intends to validate
2526 *	the buffer by loading its data area with something, the caller needs
2527 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
2528 *	the caller should set B_CACHE ( as an optimization ), else the caller
2529 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
2530 *	a write attempt or if it was a successfull read.  If the caller
2531 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
2532 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
2533 */
2534struct buf *
2535getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
2536    int flags)
2537{
2538	struct buf *bp;
2539	struct bufobj *bo;
2540	int error;
2541
2542	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
2543	ASSERT_VOP_LOCKED(vp, "getblk");
2544	if (size > MAXBSIZE)
2545		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
2546
2547	bo = &vp->v_bufobj;
2548loop:
2549	/*
2550	 * Block if we are low on buffers.   Certain processes are allowed
2551	 * to completely exhaust the buffer cache.
2552         *
2553         * If this check ever becomes a bottleneck it may be better to
2554         * move it into the else, when gbincore() fails.  At the moment
2555         * it isn't a problem.
2556	 *
2557	 * XXX remove if 0 sections (clean this up after its proven)
2558         */
2559	if (numfreebuffers == 0) {
2560		if (TD_IS_IDLETHREAD(curthread))
2561			return NULL;
2562		mtx_lock(&nblock);
2563		needsbuffer |= VFS_BIO_NEED_ANY;
2564		mtx_unlock(&nblock);
2565	}
2566
2567	BO_LOCK(bo);
2568	bp = gbincore(bo, blkno);
2569	if (bp != NULL) {
2570		int lockflags;
2571		/*
2572		 * Buffer is in-core.  If the buffer is not busy, it must
2573		 * be on a queue.
2574		 */
2575		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
2576
2577		if (flags & GB_LOCK_NOWAIT)
2578			lockflags |= LK_NOWAIT;
2579
2580		error = BUF_TIMELOCK(bp, lockflags,
2581		    BO_MTX(bo), "getblk", slpflag, slptimeo);
2582
2583		/*
2584		 * If we slept and got the lock we have to restart in case
2585		 * the buffer changed identities.
2586		 */
2587		if (error == ENOLCK)
2588			goto loop;
2589		/* We timed out or were interrupted. */
2590		else if (error)
2591			return (NULL);
2592
2593		/*
2594		 * The buffer is locked.  B_CACHE is cleared if the buffer is
2595		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
2596		 * and for a VMIO buffer B_CACHE is adjusted according to the
2597		 * backing VM cache.
2598		 */
2599		if (bp->b_flags & B_INVAL)
2600			bp->b_flags &= ~B_CACHE;
2601		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
2602			bp->b_flags |= B_CACHE;
2603		bremfree(bp);
2604
2605		/*
2606		 * check for size inconsistancies for non-VMIO case.
2607		 */
2608
2609		if (bp->b_bcount != size) {
2610			if ((bp->b_flags & B_VMIO) == 0 ||
2611			    (size > bp->b_kvasize)) {
2612				if (bp->b_flags & B_DELWRI) {
2613					/*
2614					 * If buffer is pinned and caller does
2615					 * not want sleep  waiting for it to be
2616					 * unpinned, bail out
2617					 * */
2618					if (bp->b_pin_count > 0) {
2619						if (flags & GB_LOCK_NOWAIT) {
2620							bqrelse(bp);
2621							return (NULL);
2622						} else {
2623							bunpin_wait(bp);
2624						}
2625					}
2626					bp->b_flags |= B_NOCACHE;
2627					bwrite(bp);
2628				} else {
2629					if (LIST_EMPTY(&bp->b_dep)) {
2630						bp->b_flags |= B_RELBUF;
2631						brelse(bp);
2632					} else {
2633						bp->b_flags |= B_NOCACHE;
2634						bwrite(bp);
2635					}
2636				}
2637				goto loop;
2638			}
2639		}
2640
2641		/*
2642		 * If the size is inconsistant in the VMIO case, we can resize
2643		 * the buffer.  This might lead to B_CACHE getting set or
2644		 * cleared.  If the size has not changed, B_CACHE remains
2645		 * unchanged from its previous state.
2646		 */
2647
2648		if (bp->b_bcount != size)
2649			allocbuf(bp, size);
2650
2651		KASSERT(bp->b_offset != NOOFFSET,
2652		    ("getblk: no buffer offset"));
2653
2654		/*
2655		 * A buffer with B_DELWRI set and B_CACHE clear must
2656		 * be committed before we can return the buffer in
2657		 * order to prevent the caller from issuing a read
2658		 * ( due to B_CACHE not being set ) and overwriting
2659		 * it.
2660		 *
2661		 * Most callers, including NFS and FFS, need this to
2662		 * operate properly either because they assume they
2663		 * can issue a read if B_CACHE is not set, or because
2664		 * ( for example ) an uncached B_DELWRI might loop due
2665		 * to softupdates re-dirtying the buffer.  In the latter
2666		 * case, B_CACHE is set after the first write completes,
2667		 * preventing further loops.
2668		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
2669		 * above while extending the buffer, we cannot allow the
2670		 * buffer to remain with B_CACHE set after the write
2671		 * completes or it will represent a corrupt state.  To
2672		 * deal with this we set B_NOCACHE to scrap the buffer
2673		 * after the write.
2674		 *
2675		 * We might be able to do something fancy, like setting
2676		 * B_CACHE in bwrite() except if B_DELWRI is already set,
2677		 * so the below call doesn't set B_CACHE, but that gets real
2678		 * confusing.  This is much easier.
2679		 */
2680
2681		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
2682			bp->b_flags |= B_NOCACHE;
2683			bwrite(bp);
2684			goto loop;
2685		}
2686		bp->b_flags &= ~B_DONE;
2687	} else {
2688		int bsize, maxsize, vmio;
2689		off_t offset;
2690
2691		/*
2692		 * Buffer is not in-core, create new buffer.  The buffer
2693		 * returned by getnewbuf() is locked.  Note that the returned
2694		 * buffer is also considered valid (not marked B_INVAL).
2695		 */
2696		BO_UNLOCK(bo);
2697		/*
2698		 * If the user does not want us to create the buffer, bail out
2699		 * here.
2700		 */
2701		if (flags & GB_NOCREAT)
2702			return NULL;
2703		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
2704		offset = blkno * bsize;
2705		vmio = vp->v_object != NULL;
2706		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
2707		maxsize = imax(maxsize, bsize);
2708
2709		bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags);
2710		if (bp == NULL) {
2711			if (slpflag || slptimeo)
2712				return NULL;
2713			goto loop;
2714		}
2715
2716		/*
2717		 * This code is used to make sure that a buffer is not
2718		 * created while the getnewbuf routine is blocked.
2719		 * This can be a problem whether the vnode is locked or not.
2720		 * If the buffer is created out from under us, we have to
2721		 * throw away the one we just created.
2722		 *
2723		 * Note: this must occur before we associate the buffer
2724		 * with the vp especially considering limitations in
2725		 * the splay tree implementation when dealing with duplicate
2726		 * lblkno's.
2727		 */
2728		BO_LOCK(bo);
2729		if (gbincore(bo, blkno)) {
2730			BO_UNLOCK(bo);
2731			bp->b_flags |= B_INVAL;
2732			brelse(bp);
2733			goto loop;
2734		}
2735
2736		/*
2737		 * Insert the buffer into the hash, so that it can
2738		 * be found by incore.
2739		 */
2740		bp->b_blkno = bp->b_lblkno = blkno;
2741		bp->b_offset = offset;
2742		bgetvp(vp, bp);
2743		BO_UNLOCK(bo);
2744
2745		/*
2746		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2747		 * buffer size starts out as 0, B_CACHE will be set by
2748		 * allocbuf() for the VMIO case prior to it testing the
2749		 * backing store for validity.
2750		 */
2751
2752		if (vmio) {
2753			bp->b_flags |= B_VMIO;
2754#if defined(VFS_BIO_DEBUG)
2755			if (vn_canvmio(vp) != TRUE)
2756				printf("getblk: VMIO on vnode type %d\n",
2757					vp->v_type);
2758#endif
2759			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
2760			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
2761			    bp, vp->v_object, bp->b_bufobj->bo_object));
2762		} else {
2763			bp->b_flags &= ~B_VMIO;
2764			KASSERT(bp->b_bufobj->bo_object == NULL,
2765			    ("ARGH! has b_bufobj->bo_object %p %p\n",
2766			    bp, bp->b_bufobj->bo_object));
2767		}
2768
2769		allocbuf(bp, size);
2770		bp->b_flags &= ~B_DONE;
2771	}
2772	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
2773	BUF_ASSERT_HELD(bp);
2774	KASSERT(bp->b_bufobj == bo,
2775	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2776	return (bp);
2777}
2778
2779/*
2780 * Get an empty, disassociated buffer of given size.  The buffer is initially
2781 * set to B_INVAL.
2782 */
2783struct buf *
2784geteblk(int size, int flags)
2785{
2786	struct buf *bp;
2787	int maxsize;
2788
2789	maxsize = (size + BKVAMASK) & ~BKVAMASK;
2790	while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) {
2791		if ((flags & GB_NOWAIT_BD) &&
2792		    (curthread->td_pflags & TDP_BUFNEED) != 0)
2793			return (NULL);
2794	}
2795	allocbuf(bp, size);
2796	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2797	BUF_ASSERT_HELD(bp);
2798	return (bp);
2799}
2800
2801
2802/*
2803 * This code constitutes the buffer memory from either anonymous system
2804 * memory (in the case of non-VMIO operations) or from an associated
2805 * VM object (in the case of VMIO operations).  This code is able to
2806 * resize a buffer up or down.
2807 *
2808 * Note that this code is tricky, and has many complications to resolve
2809 * deadlock or inconsistant data situations.  Tread lightly!!!
2810 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2811 * the caller.  Calling this code willy nilly can result in the loss of data.
2812 *
2813 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2814 * B_CACHE for the non-VMIO case.
2815 */
2816
2817int
2818allocbuf(struct buf *bp, int size)
2819{
2820	int newbsize, mbsize;
2821	int i;
2822
2823	BUF_ASSERT_HELD(bp);
2824
2825	if (bp->b_kvasize < size)
2826		panic("allocbuf: buffer too small");
2827
2828	if ((bp->b_flags & B_VMIO) == 0) {
2829		caddr_t origbuf;
2830		int origbufsize;
2831		/*
2832		 * Just get anonymous memory from the kernel.  Don't
2833		 * mess with B_CACHE.
2834		 */
2835		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2836		if (bp->b_flags & B_MALLOC)
2837			newbsize = mbsize;
2838		else
2839			newbsize = round_page(size);
2840
2841		if (newbsize < bp->b_bufsize) {
2842			/*
2843			 * malloced buffers are not shrunk
2844			 */
2845			if (bp->b_flags & B_MALLOC) {
2846				if (newbsize) {
2847					bp->b_bcount = size;
2848				} else {
2849					free(bp->b_data, M_BIOBUF);
2850					if (bp->b_bufsize) {
2851						atomic_subtract_long(
2852						    &bufmallocspace,
2853						    bp->b_bufsize);
2854						bufspacewakeup();
2855						bp->b_bufsize = 0;
2856					}
2857					bp->b_saveaddr = bp->b_kvabase;
2858					bp->b_data = bp->b_saveaddr;
2859					bp->b_bcount = 0;
2860					bp->b_flags &= ~B_MALLOC;
2861				}
2862				return 1;
2863			}
2864			vm_hold_free_pages(
2865			    bp,
2866			    (vm_offset_t) bp->b_data + newbsize,
2867			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2868		} else if (newbsize > bp->b_bufsize) {
2869			/*
2870			 * We only use malloced memory on the first allocation.
2871			 * and revert to page-allocated memory when the buffer
2872			 * grows.
2873			 */
2874			/*
2875			 * There is a potential smp race here that could lead
2876			 * to bufmallocspace slightly passing the max.  It
2877			 * is probably extremely rare and not worth worrying
2878			 * over.
2879			 */
2880			if ( (bufmallocspace < maxbufmallocspace) &&
2881				(bp->b_bufsize == 0) &&
2882				(mbsize <= PAGE_SIZE/2)) {
2883
2884				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2885				bp->b_bufsize = mbsize;
2886				bp->b_bcount = size;
2887				bp->b_flags |= B_MALLOC;
2888				atomic_add_long(&bufmallocspace, mbsize);
2889				return 1;
2890			}
2891			origbuf = NULL;
2892			origbufsize = 0;
2893			/*
2894			 * If the buffer is growing on its other-than-first allocation,
2895			 * then we revert to the page-allocation scheme.
2896			 */
2897			if (bp->b_flags & B_MALLOC) {
2898				origbuf = bp->b_data;
2899				origbufsize = bp->b_bufsize;
2900				bp->b_data = bp->b_kvabase;
2901				if (bp->b_bufsize) {
2902					atomic_subtract_long(&bufmallocspace,
2903					    bp->b_bufsize);
2904					bufspacewakeup();
2905					bp->b_bufsize = 0;
2906				}
2907				bp->b_flags &= ~B_MALLOC;
2908				newbsize = round_page(newbsize);
2909			}
2910			vm_hold_load_pages(
2911			    bp,
2912			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2913			    (vm_offset_t) bp->b_data + newbsize);
2914			if (origbuf) {
2915				bcopy(origbuf, bp->b_data, origbufsize);
2916				free(origbuf, M_BIOBUF);
2917			}
2918		}
2919	} else {
2920		int desiredpages;
2921
2922		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2923		desiredpages = (size == 0) ? 0 :
2924			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2925
2926		if (bp->b_flags & B_MALLOC)
2927			panic("allocbuf: VMIO buffer can't be malloced");
2928		/*
2929		 * Set B_CACHE initially if buffer is 0 length or will become
2930		 * 0-length.
2931		 */
2932		if (size == 0 || bp->b_bufsize == 0)
2933			bp->b_flags |= B_CACHE;
2934
2935		if (newbsize < bp->b_bufsize) {
2936			/*
2937			 * DEV_BSIZE aligned new buffer size is less then the
2938			 * DEV_BSIZE aligned existing buffer size.  Figure out
2939			 * if we have to remove any pages.
2940			 */
2941			if (desiredpages < bp->b_npages) {
2942				vm_page_t m;
2943
2944				VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2945				for (i = desiredpages; i < bp->b_npages; i++) {
2946					/*
2947					 * the page is not freed here -- it
2948					 * is the responsibility of
2949					 * vnode_pager_setsize
2950					 */
2951					m = bp->b_pages[i];
2952					KASSERT(m != bogus_page,
2953					    ("allocbuf: bogus page found"));
2954					while (vm_page_sleep_if_busy(m, TRUE,
2955					    "biodep"))
2956						continue;
2957
2958					bp->b_pages[i] = NULL;
2959					vm_page_lock(m);
2960					vm_page_lock_queues();
2961					vm_page_unwire(m, 0);
2962					vm_page_unlock_queues();
2963					vm_page_unlock(m);
2964				}
2965				VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2966				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2967				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2968				bp->b_npages = desiredpages;
2969			}
2970		} else if (size > bp->b_bcount) {
2971			/*
2972			 * We are growing the buffer, possibly in a
2973			 * byte-granular fashion.
2974			 */
2975			vm_object_t obj;
2976			vm_offset_t toff;
2977			vm_offset_t tinc;
2978
2979			/*
2980			 * Step 1, bring in the VM pages from the object,
2981			 * allocating them if necessary.  We must clear
2982			 * B_CACHE if these pages are not valid for the
2983			 * range covered by the buffer.
2984			 */
2985
2986			obj = bp->b_bufobj->bo_object;
2987
2988			VM_OBJECT_LOCK(obj);
2989			while (bp->b_npages < desiredpages) {
2990				vm_page_t m;
2991				vm_pindex_t pi;
2992
2993				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2994				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2995					/*
2996					 * note: must allocate system pages
2997					 * since blocking here could intefere
2998					 * with paging I/O, no matter which
2999					 * process we are.
3000					 */
3001					m = vm_page_alloc(obj, pi,
3002					    VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
3003					    VM_ALLOC_WIRED);
3004					if (m == NULL) {
3005						atomic_add_int(&vm_pageout_deficit,
3006						    desiredpages - bp->b_npages);
3007						VM_OBJECT_UNLOCK(obj);
3008						VM_WAIT;
3009						VM_OBJECT_LOCK(obj);
3010					} else {
3011						if (m->valid == 0)
3012							bp->b_flags &= ~B_CACHE;
3013						bp->b_pages[bp->b_npages] = m;
3014						++bp->b_npages;
3015					}
3016					continue;
3017				}
3018
3019				/*
3020				 * We found a page.  If we have to sleep on it,
3021				 * retry because it might have gotten freed out
3022				 * from under us.
3023				 *
3024				 * We can only test VPO_BUSY here.  Blocking on
3025				 * m->busy might lead to a deadlock:
3026				 *
3027				 *  vm_fault->getpages->cluster_read->allocbuf
3028				 *
3029				 */
3030				if ((m->oflags & VPO_BUSY) != 0) {
3031					/*
3032					 * Reference the page before unlocking
3033					 * and sleeping so that the page daemon
3034					 * is less likely to reclaim it.
3035					 */
3036					vm_page_lock_queues();
3037					vm_page_flag_set(m, PG_REFERENCED);
3038					vm_page_sleep(m, "pgtblk");
3039					continue;
3040				}
3041
3042				/*
3043				 * We have a good page.
3044				 */
3045				vm_page_lock(m);
3046				vm_page_wire(m);
3047				vm_page_unlock(m);
3048				bp->b_pages[bp->b_npages] = m;
3049				++bp->b_npages;
3050			}
3051
3052			/*
3053			 * Step 2.  We've loaded the pages into the buffer,
3054			 * we have to figure out if we can still have B_CACHE
3055			 * set.  Note that B_CACHE is set according to the
3056			 * byte-granular range ( bcount and size ), new the
3057			 * aligned range ( newbsize ).
3058			 *
3059			 * The VM test is against m->valid, which is DEV_BSIZE
3060			 * aligned.  Needless to say, the validity of the data
3061			 * needs to also be DEV_BSIZE aligned.  Note that this
3062			 * fails with NFS if the server or some other client
3063			 * extends the file's EOF.  If our buffer is resized,
3064			 * B_CACHE may remain set! XXX
3065			 */
3066
3067			toff = bp->b_bcount;
3068			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3069
3070			while ((bp->b_flags & B_CACHE) && toff < size) {
3071				vm_pindex_t pi;
3072
3073				if (tinc > (size - toff))
3074					tinc = size - toff;
3075
3076				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
3077				    PAGE_SHIFT;
3078
3079				vfs_buf_test_cache(
3080				    bp,
3081				    bp->b_offset,
3082				    toff,
3083				    tinc,
3084				    bp->b_pages[pi]
3085				);
3086				toff += tinc;
3087				tinc = PAGE_SIZE;
3088			}
3089			VM_OBJECT_UNLOCK(obj);
3090
3091			/*
3092			 * Step 3, fixup the KVM pmap.  Remember that
3093			 * bp->b_data is relative to bp->b_offset, but
3094			 * bp->b_offset may be offset into the first page.
3095			 */
3096
3097			bp->b_data = (caddr_t)
3098			    trunc_page((vm_offset_t)bp->b_data);
3099			pmap_qenter(
3100			    (vm_offset_t)bp->b_data,
3101			    bp->b_pages,
3102			    bp->b_npages
3103			);
3104
3105			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
3106			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
3107		}
3108	}
3109	if (newbsize < bp->b_bufsize)
3110		bufspacewakeup();
3111	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
3112	bp->b_bcount = size;		/* requested buffer size	*/
3113	return 1;
3114}
3115
3116void
3117biodone(struct bio *bp)
3118{
3119	struct mtx *mtxp;
3120	void (*done)(struct bio *);
3121
3122	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3123	mtx_lock(mtxp);
3124	bp->bio_flags |= BIO_DONE;
3125	done = bp->bio_done;
3126	if (done == NULL)
3127		wakeup(bp);
3128	mtx_unlock(mtxp);
3129	if (done != NULL)
3130		done(bp);
3131}
3132
3133/*
3134 * Wait for a BIO to finish.
3135 *
3136 * XXX: resort to a timeout for now.  The optimal locking (if any) for this
3137 * case is not yet clear.
3138 */
3139int
3140biowait(struct bio *bp, const char *wchan)
3141{
3142	struct mtx *mtxp;
3143
3144	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3145	mtx_lock(mtxp);
3146	while ((bp->bio_flags & BIO_DONE) == 0)
3147		msleep(bp, mtxp, PRIBIO, wchan, hz / 10);
3148	mtx_unlock(mtxp);
3149	if (bp->bio_error != 0)
3150		return (bp->bio_error);
3151	if (!(bp->bio_flags & BIO_ERROR))
3152		return (0);
3153	return (EIO);
3154}
3155
3156void
3157biofinish(struct bio *bp, struct devstat *stat, int error)
3158{
3159
3160	if (error) {
3161		bp->bio_error = error;
3162		bp->bio_flags |= BIO_ERROR;
3163	}
3164	if (stat != NULL)
3165		devstat_end_transaction_bio(stat, bp);
3166	biodone(bp);
3167}
3168
3169/*
3170 *	bufwait:
3171 *
3172 *	Wait for buffer I/O completion, returning error status.  The buffer
3173 *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3174 *	error and cleared.
3175 */
3176int
3177bufwait(struct buf *bp)
3178{
3179	if (bp->b_iocmd == BIO_READ)
3180		bwait(bp, PRIBIO, "biord");
3181	else
3182		bwait(bp, PRIBIO, "biowr");
3183	if (bp->b_flags & B_EINTR) {
3184		bp->b_flags &= ~B_EINTR;
3185		return (EINTR);
3186	}
3187	if (bp->b_ioflags & BIO_ERROR) {
3188		return (bp->b_error ? bp->b_error : EIO);
3189	} else {
3190		return (0);
3191	}
3192}
3193
3194 /*
3195  * Call back function from struct bio back up to struct buf.
3196  */
3197static void
3198bufdonebio(struct bio *bip)
3199{
3200	struct buf *bp;
3201
3202	bp = bip->bio_caller2;
3203	bp->b_resid = bp->b_bcount - bip->bio_completed;
3204	bp->b_resid = bip->bio_resid;	/* XXX: remove */
3205	bp->b_ioflags = bip->bio_flags;
3206	bp->b_error = bip->bio_error;
3207	if (bp->b_error)
3208		bp->b_ioflags |= BIO_ERROR;
3209	bufdone(bp);
3210	g_destroy_bio(bip);
3211}
3212
3213void
3214dev_strategy(struct cdev *dev, struct buf *bp)
3215{
3216	struct cdevsw *csw;
3217	struct bio *bip;
3218
3219	if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
3220		panic("b_iocmd botch");
3221	for (;;) {
3222		bip = g_new_bio();
3223		if (bip != NULL)
3224			break;
3225		/* Try again later */
3226		tsleep(&bp, PRIBIO, "dev_strat", hz/10);
3227	}
3228	bip->bio_cmd = bp->b_iocmd;
3229	bip->bio_offset = bp->b_iooffset;
3230	bip->bio_length = bp->b_bcount;
3231	bip->bio_bcount = bp->b_bcount;	/* XXX: remove */
3232	bip->bio_data = bp->b_data;
3233	bip->bio_done = bufdonebio;
3234	bip->bio_caller2 = bp;
3235	bip->bio_dev = dev;
3236	KASSERT(dev->si_refcount > 0,
3237	    ("dev_strategy on un-referenced struct cdev *(%s)",
3238	    devtoname(dev)));
3239	csw = dev_refthread(dev);
3240	if (csw == NULL) {
3241		g_destroy_bio(bip);
3242		bp->b_error = ENXIO;
3243		bp->b_ioflags = BIO_ERROR;
3244		bufdone(bp);
3245		return;
3246	}
3247	(*csw->d_strategy)(bip);
3248	dev_relthread(dev);
3249}
3250
3251/*
3252 *	bufdone:
3253 *
3254 *	Finish I/O on a buffer, optionally calling a completion function.
3255 *	This is usually called from an interrupt so process blocking is
3256 *	not allowed.
3257 *
3258 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3259 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3260 *	assuming B_INVAL is clear.
3261 *
3262 *	For the VMIO case, we set B_CACHE if the op was a read and no
3263 *	read error occured, or if the op was a write.  B_CACHE is never
3264 *	set if the buffer is invalid or otherwise uncacheable.
3265 *
3266 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
3267 *	initiator to leave B_INVAL set to brelse the buffer out of existance
3268 *	in the biodone routine.
3269 */
3270void
3271bufdone(struct buf *bp)
3272{
3273	struct bufobj *dropobj;
3274	void    (*biodone)(struct buf *);
3275
3276	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3277	dropobj = NULL;
3278
3279	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
3280	BUF_ASSERT_HELD(bp);
3281
3282	runningbufwakeup(bp);
3283	if (bp->b_iocmd == BIO_WRITE)
3284		dropobj = bp->b_bufobj;
3285	/* call optional completion function if requested */
3286	if (bp->b_iodone != NULL) {
3287		biodone = bp->b_iodone;
3288		bp->b_iodone = NULL;
3289		(*biodone) (bp);
3290		if (dropobj)
3291			bufobj_wdrop(dropobj);
3292		return;
3293	}
3294
3295	bufdone_finish(bp);
3296
3297	if (dropobj)
3298		bufobj_wdrop(dropobj);
3299}
3300
3301void
3302bufdone_finish(struct buf *bp)
3303{
3304	BUF_ASSERT_HELD(bp);
3305
3306	if (!LIST_EMPTY(&bp->b_dep))
3307		buf_complete(bp);
3308
3309	if (bp->b_flags & B_VMIO) {
3310		int i;
3311		vm_ooffset_t foff;
3312		vm_page_t m;
3313		vm_object_t obj;
3314		int iosize;
3315		struct vnode *vp = bp->b_vp;
3316
3317		obj = bp->b_bufobj->bo_object;
3318
3319#if defined(VFS_BIO_DEBUG)
3320		mp_fixme("usecount and vflag accessed without locks.");
3321		if (vp->v_usecount == 0) {
3322			panic("biodone: zero vnode ref count");
3323		}
3324
3325		KASSERT(vp->v_object != NULL,
3326			("biodone: vnode %p has no vm_object", vp));
3327#endif
3328
3329		foff = bp->b_offset;
3330		KASSERT(bp->b_offset != NOOFFSET,
3331		    ("biodone: no buffer offset"));
3332
3333		VM_OBJECT_LOCK(obj);
3334#if defined(VFS_BIO_DEBUG)
3335		if (obj->paging_in_progress < bp->b_npages) {
3336			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
3337			    obj->paging_in_progress, bp->b_npages);
3338		}
3339#endif
3340
3341		/*
3342		 * Set B_CACHE if the op was a normal read and no error
3343		 * occured.  B_CACHE is set for writes in the b*write()
3344		 * routines.
3345		 */
3346		iosize = bp->b_bcount - bp->b_resid;
3347		if (bp->b_iocmd == BIO_READ &&
3348		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
3349		    !(bp->b_ioflags & BIO_ERROR)) {
3350			bp->b_flags |= B_CACHE;
3351		}
3352		for (i = 0; i < bp->b_npages; i++) {
3353			int bogusflag = 0;
3354			int resid;
3355
3356			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3357			if (resid > iosize)
3358				resid = iosize;
3359
3360			/*
3361			 * cleanup bogus pages, restoring the originals
3362			 */
3363			m = bp->b_pages[i];
3364			if (m == bogus_page) {
3365				bogusflag = 1;
3366				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3367				if (m == NULL)
3368					panic("biodone: page disappeared!");
3369				bp->b_pages[i] = m;
3370				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3371				    bp->b_pages, bp->b_npages);
3372			}
3373#if defined(VFS_BIO_DEBUG)
3374			if (OFF_TO_IDX(foff) != m->pindex) {
3375				printf(
3376"biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
3377				    (intmax_t)foff, (uintmax_t)m->pindex);
3378			}
3379#endif
3380
3381			/*
3382			 * In the write case, the valid and clean bits are
3383			 * already changed correctly ( see bdwrite() ), so we
3384			 * only need to do this here in the read case.
3385			 */
3386			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
3387				KASSERT((m->dirty & vm_page_bits(foff &
3388				    PAGE_MASK, resid)) == 0, ("bufdone_finish:"
3389				    " page %p has unexpected dirty bits", m));
3390				vfs_page_set_valid(bp, foff, m);
3391			}
3392
3393			/*
3394			 * when debugging new filesystems or buffer I/O methods, this
3395			 * is the most common error that pops up.  if you see this, you
3396			 * have not set the page busy flag correctly!!!
3397			 */
3398			if (m->busy == 0) {
3399				printf("biodone: page busy < 0, "
3400				    "pindex: %d, foff: 0x(%x,%x), "
3401				    "resid: %d, index: %d\n",
3402				    (int) m->pindex, (int)(foff >> 32),
3403						(int) foff & 0xffffffff, resid, i);
3404				if (!vn_isdisk(vp, NULL))
3405					printf(" iosize: %jd, lblkno: %jd, flags: 0x%x, npages: %d\n",
3406					    (intmax_t)bp->b_vp->v_mount->mnt_stat.f_iosize,
3407					    (intmax_t) bp->b_lblkno,
3408					    bp->b_flags, bp->b_npages);
3409				else
3410					printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n",
3411					    (intmax_t) bp->b_lblkno,
3412					    bp->b_flags, bp->b_npages);
3413				printf(" valid: 0x%lx, dirty: 0x%lx, wired: %d\n",
3414				    (u_long)m->valid, (u_long)m->dirty,
3415				    m->wire_count);
3416				panic("biodone: page busy < 0\n");
3417			}
3418			vm_page_io_finish(m);
3419			vm_object_pip_subtract(obj, 1);
3420			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3421			iosize -= resid;
3422		}
3423		vm_object_pip_wakeupn(obj, 0);
3424		VM_OBJECT_UNLOCK(obj);
3425	}
3426
3427	/*
3428	 * For asynchronous completions, release the buffer now. The brelse
3429	 * will do a wakeup there if necessary - so no need to do a wakeup
3430	 * here in the async case. The sync case always needs to do a wakeup.
3431	 */
3432
3433	if (bp->b_flags & B_ASYNC) {
3434		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
3435			brelse(bp);
3436		else
3437			bqrelse(bp);
3438	} else
3439		bdone(bp);
3440}
3441
3442/*
3443 * This routine is called in lieu of iodone in the case of
3444 * incomplete I/O.  This keeps the busy status for pages
3445 * consistant.
3446 */
3447void
3448vfs_unbusy_pages(struct buf *bp)
3449{
3450	int i;
3451	vm_object_t obj;
3452	vm_page_t m;
3453
3454	runningbufwakeup(bp);
3455	if (!(bp->b_flags & B_VMIO))
3456		return;
3457
3458	obj = bp->b_bufobj->bo_object;
3459	VM_OBJECT_LOCK(obj);
3460	for (i = 0; i < bp->b_npages; i++) {
3461		m = bp->b_pages[i];
3462		if (m == bogus_page) {
3463			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3464			if (!m)
3465				panic("vfs_unbusy_pages: page missing\n");
3466			bp->b_pages[i] = m;
3467			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3468			    bp->b_pages, bp->b_npages);
3469		}
3470		vm_object_pip_subtract(obj, 1);
3471		vm_page_io_finish(m);
3472	}
3473	vm_object_pip_wakeupn(obj, 0);
3474	VM_OBJECT_UNLOCK(obj);
3475}
3476
3477/*
3478 * vfs_page_set_valid:
3479 *
3480 *	Set the valid bits in a page based on the supplied offset.   The
3481 *	range is restricted to the buffer's size.
3482 *
3483 *	This routine is typically called after a read completes.
3484 */
3485static void
3486vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3487{
3488	vm_ooffset_t eoff;
3489
3490	/*
3491	 * Compute the end offset, eoff, such that [off, eoff) does not span a
3492	 * page boundary and eoff is not greater than the end of the buffer.
3493	 * The end of the buffer, in this case, is our file EOF, not the
3494	 * allocation size of the buffer.
3495	 */
3496	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
3497	if (eoff > bp->b_offset + bp->b_bcount)
3498		eoff = bp->b_offset + bp->b_bcount;
3499
3500	/*
3501	 * Set valid range.  This is typically the entire buffer and thus the
3502	 * entire page.
3503	 */
3504	if (eoff > off)
3505		vm_page_set_valid(m, off & PAGE_MASK, eoff - off);
3506}
3507
3508/*
3509 * vfs_page_set_validclean:
3510 *
3511 *	Set the valid bits and clear the dirty bits in a page based on the
3512 *	supplied offset.   The range is restricted to the buffer's size.
3513 */
3514static void
3515vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3516{
3517	vm_ooffset_t soff, eoff;
3518
3519	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3520	/*
3521	 * Start and end offsets in buffer.  eoff - soff may not cross a
3522	 * page boundry or cross the end of the buffer.  The end of the
3523	 * buffer, in this case, is our file EOF, not the allocation size
3524	 * of the buffer.
3525	 */
3526	soff = off;
3527	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3528	if (eoff > bp->b_offset + bp->b_bcount)
3529		eoff = bp->b_offset + bp->b_bcount;
3530
3531	/*
3532	 * Set valid range.  This is typically the entire buffer and thus the
3533	 * entire page.
3534	 */
3535	if (eoff > soff) {
3536		vm_page_set_validclean(
3537		    m,
3538		   (vm_offset_t) (soff & PAGE_MASK),
3539		   (vm_offset_t) (eoff - soff)
3540		);
3541	}
3542}
3543
3544/*
3545 * This routine is called before a device strategy routine.
3546 * It is used to tell the VM system that paging I/O is in
3547 * progress, and treat the pages associated with the buffer
3548 * almost as being VPO_BUSY.  Also the object paging_in_progress
3549 * flag is handled to make sure that the object doesn't become
3550 * inconsistant.
3551 *
3552 * Since I/O has not been initiated yet, certain buffer flags
3553 * such as BIO_ERROR or B_INVAL may be in an inconsistant state
3554 * and should be ignored.
3555 */
3556void
3557vfs_busy_pages(struct buf *bp, int clear_modify)
3558{
3559	int i, bogus;
3560	vm_object_t obj;
3561	vm_ooffset_t foff;
3562	vm_page_t m;
3563
3564	if (!(bp->b_flags & B_VMIO))
3565		return;
3566
3567	obj = bp->b_bufobj->bo_object;
3568	foff = bp->b_offset;
3569	KASSERT(bp->b_offset != NOOFFSET,
3570	    ("vfs_busy_pages: no buffer offset"));
3571	VM_OBJECT_LOCK(obj);
3572	if (bp->b_bufsize != 0)
3573		vfs_setdirty_locked_object(bp);
3574retry:
3575	for (i = 0; i < bp->b_npages; i++) {
3576		m = bp->b_pages[i];
3577
3578		if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
3579			goto retry;
3580	}
3581	bogus = 0;
3582	if (clear_modify)
3583		vm_page_lock_queues();
3584	for (i = 0; i < bp->b_npages; i++) {
3585		m = bp->b_pages[i];
3586
3587		if ((bp->b_flags & B_CLUSTER) == 0) {
3588			vm_object_pip_add(obj, 1);
3589			vm_page_io_start(m);
3590		}
3591		/*
3592		 * When readying a buffer for a read ( i.e
3593		 * clear_modify == 0 ), it is important to do
3594		 * bogus_page replacement for valid pages in
3595		 * partially instantiated buffers.  Partially
3596		 * instantiated buffers can, in turn, occur when
3597		 * reconstituting a buffer from its VM backing store
3598		 * base.  We only have to do this if B_CACHE is
3599		 * clear ( which causes the I/O to occur in the
3600		 * first place ).  The replacement prevents the read
3601		 * I/O from overwriting potentially dirty VM-backed
3602		 * pages.  XXX bogus page replacement is, uh, bogus.
3603		 * It may not work properly with small-block devices.
3604		 * We need to find a better way.
3605		 */
3606		if (clear_modify) {
3607			pmap_remove_write(m);
3608			vfs_page_set_validclean(bp, foff, m);
3609		} else if (m->valid == VM_PAGE_BITS_ALL &&
3610		    (bp->b_flags & B_CACHE) == 0) {
3611			bp->b_pages[i] = bogus_page;
3612			bogus++;
3613		}
3614		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3615	}
3616	if (clear_modify)
3617		vm_page_unlock_queues();
3618	VM_OBJECT_UNLOCK(obj);
3619	if (bogus)
3620		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3621		    bp->b_pages, bp->b_npages);
3622}
3623
3624/*
3625 * Tell the VM system that the pages associated with this buffer
3626 * are clean.  This is used for delayed writes where the data is
3627 * going to go to disk eventually without additional VM intevention.
3628 *
3629 * Note that while we only really need to clean through to b_bcount, we
3630 * just go ahead and clean through to b_bufsize.
3631 */
3632static void
3633vfs_clean_pages(struct buf *bp)
3634{
3635	int i;
3636	vm_ooffset_t foff, noff, eoff;
3637	vm_page_t m;
3638
3639	if (!(bp->b_flags & B_VMIO))
3640		return;
3641
3642	foff = bp->b_offset;
3643	KASSERT(bp->b_offset != NOOFFSET,
3644	    ("vfs_clean_pages: no buffer offset"));
3645	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3646	vm_page_lock_queues();
3647	for (i = 0; i < bp->b_npages; i++) {
3648		m = bp->b_pages[i];
3649		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3650		eoff = noff;
3651
3652		if (eoff > bp->b_offset + bp->b_bufsize)
3653			eoff = bp->b_offset + bp->b_bufsize;
3654		vfs_page_set_validclean(bp, foff, m);
3655		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3656		foff = noff;
3657	}
3658	vm_page_unlock_queues();
3659	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3660}
3661
3662/*
3663 *	vfs_bio_set_valid:
3664 *
3665 *	Set the range within the buffer to valid.  The range is
3666 *	relative to the beginning of the buffer, b_offset.  Note that
3667 *	b_offset itself may be offset from the beginning of the first
3668 *	page.
3669 */
3670void
3671vfs_bio_set_valid(struct buf *bp, int base, int size)
3672{
3673	int i, n;
3674	vm_page_t m;
3675
3676	if (!(bp->b_flags & B_VMIO))
3677		return;
3678
3679	/*
3680	 * Fixup base to be relative to beginning of first page.
3681	 * Set initial n to be the maximum number of bytes in the
3682	 * first page that can be validated.
3683	 */
3684	base += (bp->b_offset & PAGE_MASK);
3685	n = PAGE_SIZE - (base & PAGE_MASK);
3686
3687	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3688	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
3689		m = bp->b_pages[i];
3690		if (n > size)
3691			n = size;
3692		vm_page_set_valid(m, base & PAGE_MASK, n);
3693		base += n;
3694		size -= n;
3695		n = PAGE_SIZE;
3696	}
3697	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3698}
3699
3700/*
3701 *	vfs_bio_clrbuf:
3702 *
3703 *	If the specified buffer is a non-VMIO buffer, clear the entire
3704 *	buffer.  If the specified buffer is a VMIO buffer, clear and
3705 *	validate only the previously invalid portions of the buffer.
3706 *	This routine essentially fakes an I/O, so we need to clear
3707 *	BIO_ERROR and B_INVAL.
3708 *
3709 *	Note that while we only theoretically need to clear through b_bcount,
3710 *	we go ahead and clear through b_bufsize.
3711 */
3712void
3713vfs_bio_clrbuf(struct buf *bp)
3714{
3715	int i, j, mask;
3716	caddr_t sa, ea;
3717
3718	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
3719		clrbuf(bp);
3720		return;
3721	}
3722	bp->b_flags &= ~B_INVAL;
3723	bp->b_ioflags &= ~BIO_ERROR;
3724	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3725	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
3726	    (bp->b_offset & PAGE_MASK) == 0) {
3727		if (bp->b_pages[0] == bogus_page)
3728			goto unlock;
3729		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
3730		VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
3731		if ((bp->b_pages[0]->valid & mask) == mask)
3732			goto unlock;
3733		if ((bp->b_pages[0]->valid & mask) == 0) {
3734			bzero(bp->b_data, bp->b_bufsize);
3735			bp->b_pages[0]->valid |= mask;
3736			goto unlock;
3737		}
3738	}
3739	ea = sa = bp->b_data;
3740	for(i = 0; i < bp->b_npages; i++, sa = ea) {
3741		ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
3742		ea = (caddr_t)(vm_offset_t)ulmin(
3743		    (u_long)(vm_offset_t)ea,
3744		    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
3745		if (bp->b_pages[i] == bogus_page)
3746			continue;
3747		j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
3748		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
3749		VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
3750		if ((bp->b_pages[i]->valid & mask) == mask)
3751			continue;
3752		if ((bp->b_pages[i]->valid & mask) == 0)
3753			bzero(sa, ea - sa);
3754		else {
3755			for (; sa < ea; sa += DEV_BSIZE, j++) {
3756				if ((bp->b_pages[i]->valid & (1 << j)) == 0)
3757					bzero(sa, DEV_BSIZE);
3758			}
3759		}
3760		bp->b_pages[i]->valid |= mask;
3761	}
3762unlock:
3763	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3764	bp->b_resid = 0;
3765}
3766
3767/*
3768 * vm_hold_load_pages and vm_hold_free_pages get pages into
3769 * a buffers address space.  The pages are anonymous and are
3770 * not associated with a file object.
3771 */
3772static void
3773vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
3774{
3775	vm_offset_t pg;
3776	vm_page_t p;
3777	int index;
3778
3779	to = round_page(to);
3780	from = round_page(from);
3781	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3782
3783	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3784tryagain:
3785		/*
3786		 * note: must allocate system pages since blocking here
3787		 * could interfere with paging I/O, no matter which
3788		 * process we are.
3789		 */
3790		p = vm_page_alloc(NULL, pg >> PAGE_SHIFT, VM_ALLOC_NOOBJ |
3791		    VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
3792		if (!p) {
3793			atomic_add_int(&vm_pageout_deficit,
3794			    (to - pg) >> PAGE_SHIFT);
3795			VM_WAIT;
3796			goto tryagain;
3797		}
3798		pmap_qenter(pg, &p, 1);
3799		bp->b_pages[index] = p;
3800	}
3801	bp->b_npages = index;
3802}
3803
3804/* Return pages associated with this buf to the vm system */
3805static void
3806vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
3807{
3808	vm_offset_t pg;
3809	vm_page_t p;
3810	int index, newnpages;
3811
3812	from = round_page(from);
3813	to = round_page(to);
3814	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3815
3816	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3817		p = bp->b_pages[index];
3818		if (p && (index < bp->b_npages)) {
3819			if (p->busy) {
3820				printf(
3821			    "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
3822				    (intmax_t)bp->b_blkno,
3823				    (intmax_t)bp->b_lblkno);
3824			}
3825			bp->b_pages[index] = NULL;
3826			pmap_qremove(pg, 1);
3827			p->wire_count--;
3828			vm_page_free(p);
3829			atomic_subtract_int(&cnt.v_wire_count, 1);
3830		}
3831	}
3832	bp->b_npages = newnpages;
3833}
3834
3835/*
3836 * Map an IO request into kernel virtual address space.
3837 *
3838 * All requests are (re)mapped into kernel VA space.
3839 * Notice that we use b_bufsize for the size of the buffer
3840 * to be mapped.  b_bcount might be modified by the driver.
3841 *
3842 * Note that even if the caller determines that the address space should
3843 * be valid, a race or a smaller-file mapped into a larger space may
3844 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
3845 * check the return value.
3846 */
3847int
3848vmapbuf(struct buf *bp)
3849{
3850	caddr_t addr, kva;
3851	vm_prot_t prot;
3852	int pidx, i;
3853	struct vm_page *m;
3854	struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
3855
3856	if (bp->b_bufsize < 0)
3857		return (-1);
3858	prot = VM_PROT_READ;
3859	if (bp->b_iocmd == BIO_READ)
3860		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
3861	for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
3862	     addr < bp->b_data + bp->b_bufsize;
3863	     addr += PAGE_SIZE, pidx++) {
3864		/*
3865		 * Do the vm_fault if needed; do the copy-on-write thing
3866		 * when reading stuff off device into memory.
3867		 *
3868		 * NOTE! Must use pmap_extract() because addr may be in
3869		 * the userland address space, and kextract is only guarenteed
3870		 * to work for the kernland address space (see: sparc64 port).
3871		 */
3872retry:
3873		if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data,
3874		    prot) < 0) {
3875			for (i = 0; i < pidx; ++i) {
3876				vm_page_lock(bp->b_pages[i]);
3877				vm_page_unhold(bp->b_pages[i]);
3878				vm_page_unlock(bp->b_pages[i]);
3879				bp->b_pages[i] = NULL;
3880			}
3881			return(-1);
3882		}
3883		m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot);
3884		if (m == NULL)
3885			goto retry;
3886		bp->b_pages[pidx] = m;
3887	}
3888	if (pidx > btoc(MAXPHYS))
3889		panic("vmapbuf: mapped more than MAXPHYS");
3890	pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
3891
3892	kva = bp->b_saveaddr;
3893	bp->b_npages = pidx;
3894	bp->b_saveaddr = bp->b_data;
3895	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
3896	return(0);
3897}
3898
3899/*
3900 * Free the io map PTEs associated with this IO operation.
3901 * We also invalidate the TLB entries and restore the original b_addr.
3902 */
3903void
3904vunmapbuf(struct buf *bp)
3905{
3906	int pidx;
3907	int npages;
3908
3909	npages = bp->b_npages;
3910	pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
3911	for (pidx = 0; pidx < npages; pidx++) {
3912		vm_page_lock(bp->b_pages[pidx]);
3913		vm_page_unhold(bp->b_pages[pidx]);
3914		vm_page_unlock(bp->b_pages[pidx]);
3915	}
3916
3917	bp->b_data = bp->b_saveaddr;
3918}
3919
3920void
3921bdone(struct buf *bp)
3922{
3923	struct mtx *mtxp;
3924
3925	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3926	mtx_lock(mtxp);
3927	bp->b_flags |= B_DONE;
3928	wakeup(bp);
3929	mtx_unlock(mtxp);
3930}
3931
3932void
3933bwait(struct buf *bp, u_char pri, const char *wchan)
3934{
3935	struct mtx *mtxp;
3936
3937	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3938	mtx_lock(mtxp);
3939	while ((bp->b_flags & B_DONE) == 0)
3940		msleep(bp, mtxp, pri, wchan, 0);
3941	mtx_unlock(mtxp);
3942}
3943
3944int
3945bufsync(struct bufobj *bo, int waitfor)
3946{
3947
3948	return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
3949}
3950
3951void
3952bufstrategy(struct bufobj *bo, struct buf *bp)
3953{
3954	int i = 0;
3955	struct vnode *vp;
3956
3957	vp = bp->b_vp;
3958	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
3959	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
3960	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
3961	i = VOP_STRATEGY(vp, bp);
3962	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
3963}
3964
3965void
3966bufobj_wrefl(struct bufobj *bo)
3967{
3968
3969	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3970	ASSERT_BO_LOCKED(bo);
3971	bo->bo_numoutput++;
3972}
3973
3974void
3975bufobj_wref(struct bufobj *bo)
3976{
3977
3978	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3979	BO_LOCK(bo);
3980	bo->bo_numoutput++;
3981	BO_UNLOCK(bo);
3982}
3983
3984void
3985bufobj_wdrop(struct bufobj *bo)
3986{
3987
3988	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
3989	BO_LOCK(bo);
3990	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
3991	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
3992		bo->bo_flag &= ~BO_WWAIT;
3993		wakeup(&bo->bo_numoutput);
3994	}
3995	BO_UNLOCK(bo);
3996}
3997
3998int
3999bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4000{
4001	int error;
4002
4003	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4004	ASSERT_BO_LOCKED(bo);
4005	error = 0;
4006	while (bo->bo_numoutput) {
4007		bo->bo_flag |= BO_WWAIT;
4008		error = msleep(&bo->bo_numoutput, BO_MTX(bo),
4009		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4010		if (error)
4011			break;
4012	}
4013	return (error);
4014}
4015
4016void
4017bpin(struct buf *bp)
4018{
4019	struct mtx *mtxp;
4020
4021	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4022	mtx_lock(mtxp);
4023	bp->b_pin_count++;
4024	mtx_unlock(mtxp);
4025}
4026
4027void
4028bunpin(struct buf *bp)
4029{
4030	struct mtx *mtxp;
4031
4032	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4033	mtx_lock(mtxp);
4034	if (--bp->b_pin_count == 0)
4035		wakeup(bp);
4036	mtx_unlock(mtxp);
4037}
4038
4039void
4040bunpin_wait(struct buf *bp)
4041{
4042	struct mtx *mtxp;
4043
4044	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4045	mtx_lock(mtxp);
4046	while (bp->b_pin_count > 0)
4047		msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
4048	mtx_unlock(mtxp);
4049}
4050
4051#include "opt_ddb.h"
4052#ifdef DDB
4053#include <ddb/ddb.h>
4054
4055/* DDB command to show buffer data */
4056DB_SHOW_COMMAND(buffer, db_show_buffer)
4057{
4058	/* get args */
4059	struct buf *bp = (struct buf *)addr;
4060
4061	if (!have_addr) {
4062		db_printf("usage: show buffer <addr>\n");
4063		return;
4064	}
4065
4066	db_printf("buf at %p\n", bp);
4067	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
4068	db_printf(
4069	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4070	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_dep = %p\n",
4071	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4072	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4073	    bp->b_dep.lh_first);
4074	if (bp->b_npages) {
4075		int i;
4076		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4077		for (i = 0; i < bp->b_npages; i++) {
4078			vm_page_t m;
4079			m = bp->b_pages[i];
4080			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
4081			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4082			if ((i + 1) < bp->b_npages)
4083				db_printf(",");
4084		}
4085		db_printf("\n");
4086	}
4087	db_printf(" ");
4088	lockmgr_printinfo(&bp->b_lock);
4089}
4090
4091DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4092{
4093	struct buf *bp;
4094	int i;
4095
4096	for (i = 0; i < nbuf; i++) {
4097		bp = &buf[i];
4098		if (BUF_ISLOCKED(bp)) {
4099			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4100			db_printf("\n");
4101		}
4102	}
4103}
4104
4105DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4106{
4107	struct vnode *vp;
4108	struct buf *bp;
4109
4110	if (!have_addr) {
4111		db_printf("usage: show vnodebufs <addr>\n");
4112		return;
4113	}
4114	vp = (struct vnode *)addr;
4115	db_printf("Clean buffers:\n");
4116	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4117		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4118		db_printf("\n");
4119	}
4120	db_printf("Dirty buffers:\n");
4121	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4122		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4123		db_printf("\n");
4124	}
4125}
4126#endif /* DDB */
4127