vfs_bio.c revision 236465
1/*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * this file contains a new buffer I/O scheme implementing a coherent
30 * VM object and buffer cache scheme.  Pains have been taken to make
31 * sure that the performance degradation associated with schemes such
32 * as this is not realized.
33 *
34 * Author:  John S. Dyson
35 * Significant help during the development and debugging phases
36 * had been provided by David Greenman, also of the FreeBSD core team.
37 *
38 * see man buf(9) for more info.
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/vfs_bio.c 236465 2012-06-02 18:44:40Z kib $");
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/bio.h>
47#include <sys/conf.h>
48#include <sys/buf.h>
49#include <sys/devicestat.h>
50#include <sys/eventhandler.h>
51#include <sys/fail.h>
52#include <sys/limits.h>
53#include <sys/lock.h>
54#include <sys/malloc.h>
55#include <sys/mount.h>
56#include <sys/mutex.h>
57#include <sys/kernel.h>
58#include <sys/kthread.h>
59#include <sys/proc.h>
60#include <sys/resourcevar.h>
61#include <sys/sysctl.h>
62#include <sys/vmmeter.h>
63#include <sys/vnode.h>
64#include <geom/geom.h>
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <vm/vm_kern.h>
68#include <vm/vm_pageout.h>
69#include <vm/vm_page.h>
70#include <vm/vm_object.h>
71#include <vm/vm_extern.h>
72#include <vm/vm_map.h>
73#include "opt_compat.h"
74#include "opt_directio.h"
75#include "opt_swap.h"
76
77static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
78
79struct	bio_ops bioops;		/* I/O operation notification */
80
81struct	buf_ops buf_ops_bio = {
82	.bop_name	=	"buf_ops_bio",
83	.bop_write	=	bufwrite,
84	.bop_strategy	=	bufstrategy,
85	.bop_sync	=	bufsync,
86	.bop_bdflush	=	bufbdflush,
87};
88
89/*
90 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
91 * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
92 */
93struct buf *buf;		/* buffer header pool */
94
95static struct proc *bufdaemonproc;
96
97static int inmem(struct vnode *vp, daddr_t blkno);
98static void vm_hold_free_pages(struct buf *bp, int newbsize);
99static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
100		vm_offset_t to);
101static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
102static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
103		vm_page_t m);
104static void vfs_drain_busy_pages(struct buf *bp);
105static void vfs_clean_pages_dirty_buf(struct buf *bp);
106static void vfs_setdirty_locked_object(struct buf *bp);
107static void vfs_vmio_release(struct buf *bp);
108static int vfs_bio_clcheck(struct vnode *vp, int size,
109		daddr_t lblkno, daddr_t blkno);
110static int buf_do_flush(struct vnode *vp);
111static int flushbufqueues(struct vnode *, int, int);
112static void buf_daemon(void);
113static void bremfreel(struct buf *bp);
114#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
115    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
116static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
117#endif
118
119int vmiodirenable = TRUE;
120SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
121    "Use the VM system for directory writes");
122long runningbufspace;
123SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
124    "Amount of presently outstanding async buffer io");
125static long bufspace;
126#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
127    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
128SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
129    &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
130#else
131SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
132    "Virtual memory used for buffers");
133#endif
134static long maxbufspace;
135SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
136    "Maximum allowed value of bufspace (including buf_daemon)");
137static long bufmallocspace;
138SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
139    "Amount of malloced memory for buffers");
140static long maxbufmallocspace;
141SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
142    "Maximum amount of malloced memory for buffers");
143static long lobufspace;
144SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
145    "Minimum amount of buffers we want to have");
146long hibufspace;
147SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
148    "Maximum allowed value of bufspace (excluding buf_daemon)");
149static int bufreusecnt;
150SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
151    "Number of times we have reused a buffer");
152static int buffreekvacnt;
153SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
154    "Number of times we have freed the KVA space from some buffer");
155static int bufdefragcnt;
156SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
157    "Number of times we have had to repeat buffer allocation to defragment");
158static long lorunningspace;
159SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
160    "Minimum preferred space used for in-progress I/O");
161static long hirunningspace;
162SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
163    "Maximum amount of space to use for in-progress I/O");
164int dirtybufferflushes;
165SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
166    0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
167int bdwriteskip;
168SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
169    0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
170int altbufferflushes;
171SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
172    0, "Number of fsync flushes to limit dirty buffers");
173static int recursiveflushes;
174SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
175    0, "Number of flushes skipped due to being recursive");
176static int numdirtybuffers;
177SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
178    "Number of buffers that are dirty (has unwritten changes) at the moment");
179static int lodirtybuffers;
180SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
181    "How many buffers we want to have free before bufdaemon can sleep");
182static int hidirtybuffers;
183SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
184    "When the number of dirty buffers is considered severe");
185int dirtybufthresh;
186SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
187    0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
188static int numfreebuffers;
189SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
190    "Number of free buffers");
191static int lofreebuffers;
192SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
193   "XXX Unused");
194static int hifreebuffers;
195SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
196   "XXX Complicatedly unused");
197static int getnewbufcalls;
198SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
199   "Number of calls to getnewbuf");
200static int getnewbufrestarts;
201SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
202    "Number of times getnewbuf has had to restart a buffer aquisition");
203static int flushbufqtarget = 100;
204SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
205    "Amount of work to do in flushbufqueues when helping bufdaemon");
206static long notbufdflashes;
207SYSCTL_LONG(_vfs, OID_AUTO, notbufdflashes, CTLFLAG_RD, &notbufdflashes, 0,
208    "Number of dirty buffer flushes done by the bufdaemon helpers");
209
210/*
211 * Wakeup point for bufdaemon, as well as indicator of whether it is already
212 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
213 * is idling.
214 */
215static int bd_request;
216
217/*
218 * Request for the buf daemon to write more buffers than is indicated by
219 * lodirtybuf.  This may be necessary to push out excess dependencies or
220 * defragment the address space where a simple count of the number of dirty
221 * buffers is insufficient to characterize the demand for flushing them.
222 */
223static int bd_speedupreq;
224
225/*
226 * This lock synchronizes access to bd_request.
227 */
228static struct mtx bdlock;
229
230/*
231 * bogus page -- for I/O to/from partially complete buffers
232 * this is a temporary solution to the problem, but it is not
233 * really that bad.  it would be better to split the buffer
234 * for input in the case of buffers partially already in memory,
235 * but the code is intricate enough already.
236 */
237vm_page_t bogus_page;
238
239/*
240 * Synchronization (sleep/wakeup) variable for active buffer space requests.
241 * Set when wait starts, cleared prior to wakeup().
242 * Used in runningbufwakeup() and waitrunningbufspace().
243 */
244static int runningbufreq;
245
246/*
247 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
248 * waitrunningbufspace().
249 */
250static struct mtx rbreqlock;
251
252/*
253 * Synchronization (sleep/wakeup) variable for buffer requests.
254 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
255 * by and/or.
256 * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
257 * getnewbuf(), and getblk().
258 */
259static int needsbuffer;
260
261/*
262 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
263 */
264static struct mtx nblock;
265
266/*
267 * Definitions for the buffer free lists.
268 */
269#define BUFFER_QUEUES	6	/* number of free buffer queues */
270
271#define QUEUE_NONE	0	/* on no queue */
272#define QUEUE_CLEAN	1	/* non-B_DELWRI buffers */
273#define QUEUE_DIRTY	2	/* B_DELWRI buffers */
274#define QUEUE_DIRTY_GIANT 3	/* B_DELWRI buffers that need giant */
275#define QUEUE_EMPTYKVA	4	/* empty buffer headers w/KVA assignment */
276#define QUEUE_EMPTY	5	/* empty buffer headers */
277#define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
278
279/* Queues for free buffers with various properties */
280static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
281
282/* Lock for the bufqueues */
283static struct mtx bqlock;
284
285/*
286 * Single global constant for BUF_WMESG, to avoid getting multiple references.
287 * buf_wmesg is referred from macros.
288 */
289const char *buf_wmesg = BUF_WMESG;
290
291#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
292#define VFS_BIO_NEED_DIRTYFLUSH	0x02	/* waiting for dirty buffer flush */
293#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
294#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
295
296#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
297    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
298static int
299sysctl_bufspace(SYSCTL_HANDLER_ARGS)
300{
301	long lvalue;
302	int ivalue;
303
304	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
305		return (sysctl_handle_long(oidp, arg1, arg2, req));
306	lvalue = *(long *)arg1;
307	if (lvalue > INT_MAX)
308		/* On overflow, still write out a long to trigger ENOMEM. */
309		return (sysctl_handle_long(oidp, &lvalue, 0, req));
310	ivalue = lvalue;
311	return (sysctl_handle_int(oidp, &ivalue, 0, req));
312}
313#endif
314
315#ifdef DIRECTIO
316extern void ffs_rawread_setup(void);
317#endif /* DIRECTIO */
318/*
319 *	numdirtywakeup:
320 *
321 *	If someone is blocked due to there being too many dirty buffers,
322 *	and numdirtybuffers is now reasonable, wake them up.
323 */
324
325static __inline void
326numdirtywakeup(int level)
327{
328
329	if (numdirtybuffers <= level) {
330		mtx_lock(&nblock);
331		if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
332			needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
333			wakeup(&needsbuffer);
334		}
335		mtx_unlock(&nblock);
336	}
337}
338
339/*
340 *	bufspacewakeup:
341 *
342 *	Called when buffer space is potentially available for recovery.
343 *	getnewbuf() will block on this flag when it is unable to free
344 *	sufficient buffer space.  Buffer space becomes recoverable when
345 *	bp's get placed back in the queues.
346 */
347
348static __inline void
349bufspacewakeup(void)
350{
351
352	/*
353	 * If someone is waiting for BUF space, wake them up.  Even
354	 * though we haven't freed the kva space yet, the waiting
355	 * process will be able to now.
356	 */
357	mtx_lock(&nblock);
358	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
359		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
360		wakeup(&needsbuffer);
361	}
362	mtx_unlock(&nblock);
363}
364
365/*
366 * runningbufwakeup() - in-progress I/O accounting.
367 *
368 */
369void
370runningbufwakeup(struct buf *bp)
371{
372
373	if (bp->b_runningbufspace) {
374		atomic_subtract_long(&runningbufspace, bp->b_runningbufspace);
375		bp->b_runningbufspace = 0;
376		mtx_lock(&rbreqlock);
377		if (runningbufreq && runningbufspace <= lorunningspace) {
378			runningbufreq = 0;
379			wakeup(&runningbufreq);
380		}
381		mtx_unlock(&rbreqlock);
382	}
383}
384
385/*
386 *	bufcountwakeup:
387 *
388 *	Called when a buffer has been added to one of the free queues to
389 *	account for the buffer and to wakeup anyone waiting for free buffers.
390 *	This typically occurs when large amounts of metadata are being handled
391 *	by the buffer cache ( else buffer space runs out first, usually ).
392 */
393
394static __inline void
395bufcountwakeup(struct buf *bp)
396{
397	int old;
398
399	KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
400	    ("buf %p already counted as free", bp));
401	if (bp->b_bufobj != NULL)
402		mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
403	bp->b_vflags |= BV_INFREECNT;
404	old = atomic_fetchadd_int(&numfreebuffers, 1);
405	KASSERT(old >= 0 && old < nbuf,
406	    ("numfreebuffers climbed to %d", old + 1));
407	mtx_lock(&nblock);
408	if (needsbuffer) {
409		needsbuffer &= ~VFS_BIO_NEED_ANY;
410		if (numfreebuffers >= hifreebuffers)
411			needsbuffer &= ~VFS_BIO_NEED_FREE;
412		wakeup(&needsbuffer);
413	}
414	mtx_unlock(&nblock);
415}
416
417/*
418 *	waitrunningbufspace()
419 *
420 *	runningbufspace is a measure of the amount of I/O currently
421 *	running.  This routine is used in async-write situations to
422 *	prevent creating huge backups of pending writes to a device.
423 *	Only asynchronous writes are governed by this function.
424 *
425 *	Reads will adjust runningbufspace, but will not block based on it.
426 *	The read load has a side effect of reducing the allowed write load.
427 *
428 *	This does NOT turn an async write into a sync write.  It waits
429 *	for earlier writes to complete and generally returns before the
430 *	caller's write has reached the device.
431 */
432void
433waitrunningbufspace(void)
434{
435
436	mtx_lock(&rbreqlock);
437	while (runningbufspace > hirunningspace) {
438		++runningbufreq;
439		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
440	}
441	mtx_unlock(&rbreqlock);
442}
443
444
445/*
446 *	vfs_buf_test_cache:
447 *
448 *	Called when a buffer is extended.  This function clears the B_CACHE
449 *	bit if the newly extended portion of the buffer does not contain
450 *	valid data.
451 */
452static __inline
453void
454vfs_buf_test_cache(struct buf *bp,
455		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
456		  vm_page_t m)
457{
458
459	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
460	if (bp->b_flags & B_CACHE) {
461		int base = (foff + off) & PAGE_MASK;
462		if (vm_page_is_valid(m, base, size) == 0)
463			bp->b_flags &= ~B_CACHE;
464	}
465}
466
467/* Wake up the buffer daemon if necessary */
468static __inline
469void
470bd_wakeup(int dirtybuflevel)
471{
472
473	mtx_lock(&bdlock);
474	if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
475		bd_request = 1;
476		wakeup(&bd_request);
477	}
478	mtx_unlock(&bdlock);
479}
480
481/*
482 * bd_speedup - speedup the buffer cache flushing code
483 */
484
485void
486bd_speedup(void)
487{
488	int needwake;
489
490	mtx_lock(&bdlock);
491	needwake = 0;
492	if (bd_speedupreq == 0 || bd_request == 0)
493		needwake = 1;
494	bd_speedupreq = 1;
495	bd_request = 1;
496	if (needwake)
497		wakeup(&bd_request);
498	mtx_unlock(&bdlock);
499}
500
501/*
502 * Calculating buffer cache scaling values and reserve space for buffer
503 * headers.  This is called during low level kernel initialization and
504 * may be called more then once.  We CANNOT write to the memory area
505 * being reserved at this time.
506 */
507caddr_t
508kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
509{
510	int tuned_nbuf;
511	long maxbuf;
512
513	/*
514	 * physmem_est is in pages.  Convert it to kilobytes (assumes
515	 * PAGE_SIZE is >= 1K)
516	 */
517	physmem_est = physmem_est * (PAGE_SIZE / 1024);
518
519	/*
520	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
521	 * For the first 64MB of ram nominally allocate sufficient buffers to
522	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
523	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
524	 * the buffer cache we limit the eventual kva reservation to
525	 * maxbcache bytes.
526	 *
527	 * factor represents the 1/4 x ram conversion.
528	 */
529	if (nbuf == 0) {
530		int factor = 4 * BKVASIZE / 1024;
531
532		nbuf = 50;
533		if (physmem_est > 4096)
534			nbuf += min((physmem_est - 4096) / factor,
535			    65536 / factor);
536		if (physmem_est > 65536)
537			nbuf += (physmem_est - 65536) * 2 / (factor * 5);
538
539		if (maxbcache && nbuf > maxbcache / BKVASIZE)
540			nbuf = maxbcache / BKVASIZE;
541		tuned_nbuf = 1;
542	} else
543		tuned_nbuf = 0;
544
545	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
546	maxbuf = (LONG_MAX / 3) / BKVASIZE;
547	if (nbuf > maxbuf) {
548		if (!tuned_nbuf)
549			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
550			    maxbuf);
551		nbuf = maxbuf;
552	}
553
554	/*
555	 * swbufs are used as temporary holders for I/O, such as paging I/O.
556	 * We have no less then 16 and no more then 256.
557	 */
558	nswbuf = max(min(nbuf/4, 256), 16);
559#ifdef NSWBUF_MIN
560	if (nswbuf < NSWBUF_MIN)
561		nswbuf = NSWBUF_MIN;
562#endif
563#ifdef DIRECTIO
564	ffs_rawread_setup();
565#endif
566
567	/*
568	 * Reserve space for the buffer cache buffers
569	 */
570	swbuf = (void *)v;
571	v = (caddr_t)(swbuf + nswbuf);
572	buf = (void *)v;
573	v = (caddr_t)(buf + nbuf);
574
575	return(v);
576}
577
578/* Initialize the buffer subsystem.  Called before use of any buffers. */
579void
580bufinit(void)
581{
582	struct buf *bp;
583	int i;
584
585	mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
586	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
587	mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
588	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
589
590	/* next, make a null set of free lists */
591	for (i = 0; i < BUFFER_QUEUES; i++)
592		TAILQ_INIT(&bufqueues[i]);
593
594	/* finally, initialize each buffer header and stick on empty q */
595	for (i = 0; i < nbuf; i++) {
596		bp = &buf[i];
597		bzero(bp, sizeof *bp);
598		bp->b_flags = B_INVAL;	/* we're just an empty header */
599		bp->b_rcred = NOCRED;
600		bp->b_wcred = NOCRED;
601		bp->b_qindex = QUEUE_EMPTY;
602		bp->b_vflags = BV_INFREECNT;	/* buf is counted as free */
603		bp->b_xflags = 0;
604		LIST_INIT(&bp->b_dep);
605		BUF_LOCKINIT(bp);
606		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
607	}
608
609	/*
610	 * maxbufspace is the absolute maximum amount of buffer space we are
611	 * allowed to reserve in KVM and in real terms.  The absolute maximum
612	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
613	 * used by most other processes.  The differential is required to
614	 * ensure that buf_daemon is able to run when other processes might
615	 * be blocked waiting for buffer space.
616	 *
617	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
618	 * this may result in KVM fragmentation which is not handled optimally
619	 * by the system.
620	 */
621	maxbufspace = (long)nbuf * BKVASIZE;
622	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
623	lobufspace = hibufspace - MAXBSIZE;
624
625	/*
626	 * Note: The 16 MiB upper limit for hirunningspace was chosen
627	 * arbitrarily and may need further tuning. It corresponds to
628	 * 128 outstanding write IO requests (if IO size is 128 KiB),
629	 * which fits with many RAID controllers' tagged queuing limits.
630	 * The lower 1 MiB limit is the historical upper limit for
631	 * hirunningspace.
632	 */
633	hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBSIZE),
634	    16 * 1024 * 1024), 1024 * 1024);
635	lorunningspace = roundup((hirunningspace * 2) / 3, MAXBSIZE);
636
637/*
638 * Limit the amount of malloc memory since it is wired permanently into
639 * the kernel space.  Even though this is accounted for in the buffer
640 * allocation, we don't want the malloced region to grow uncontrolled.
641 * The malloc scheme improves memory utilization significantly on average
642 * (small) directories.
643 */
644	maxbufmallocspace = hibufspace / 20;
645
646/*
647 * Reduce the chance of a deadlock occuring by limiting the number
648 * of delayed-write dirty buffers we allow to stack up.
649 */
650	hidirtybuffers = nbuf / 4 + 20;
651	dirtybufthresh = hidirtybuffers * 9 / 10;
652	numdirtybuffers = 0;
653/*
654 * To support extreme low-memory systems, make sure hidirtybuffers cannot
655 * eat up all available buffer space.  This occurs when our minimum cannot
656 * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
657 * BKVASIZE'd buffers.
658 */
659	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
660		hidirtybuffers >>= 1;
661	}
662	lodirtybuffers = hidirtybuffers / 2;
663
664/*
665 * Try to keep the number of free buffers in the specified range,
666 * and give special processes (e.g. like buf_daemon) access to an
667 * emergency reserve.
668 */
669	lofreebuffers = nbuf / 18 + 5;
670	hifreebuffers = 2 * lofreebuffers;
671	numfreebuffers = nbuf;
672
673	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
674	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
675}
676
677/*
678 * bfreekva() - free the kva allocation for a buffer.
679 *
680 *	Since this call frees up buffer space, we call bufspacewakeup().
681 */
682static void
683bfreekva(struct buf *bp)
684{
685
686	if (bp->b_kvasize) {
687		atomic_add_int(&buffreekvacnt, 1);
688		atomic_subtract_long(&bufspace, bp->b_kvasize);
689		vm_map_remove(buffer_map, (vm_offset_t) bp->b_kvabase,
690		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize);
691		bp->b_kvasize = 0;
692		bufspacewakeup();
693	}
694}
695
696/*
697 *	bremfree:
698 *
699 *	Mark the buffer for removal from the appropriate free list in brelse.
700 *
701 */
702void
703bremfree(struct buf *bp)
704{
705	int old;
706
707	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
708	KASSERT((bp->b_flags & B_REMFREE) == 0,
709	    ("bremfree: buffer %p already marked for delayed removal.", bp));
710	KASSERT(bp->b_qindex != QUEUE_NONE,
711	    ("bremfree: buffer %p not on a queue.", bp));
712	BUF_ASSERT_HELD(bp);
713
714	bp->b_flags |= B_REMFREE;
715	/* Fixup numfreebuffers count.  */
716	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
717		KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
718		    ("buf %p not counted in numfreebuffers", bp));
719		if (bp->b_bufobj != NULL)
720			mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
721		bp->b_vflags &= ~BV_INFREECNT;
722		old = atomic_fetchadd_int(&numfreebuffers, -1);
723		KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
724	}
725}
726
727/*
728 *	bremfreef:
729 *
730 *	Force an immediate removal from a free list.  Used only in nfs when
731 *	it abuses the b_freelist pointer.
732 */
733void
734bremfreef(struct buf *bp)
735{
736	mtx_lock(&bqlock);
737	bremfreel(bp);
738	mtx_unlock(&bqlock);
739}
740
741/*
742 *	bremfreel:
743 *
744 *	Removes a buffer from the free list, must be called with the
745 *	bqlock held.
746 */
747static void
748bremfreel(struct buf *bp)
749{
750	int old;
751
752	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
753	    bp, bp->b_vp, bp->b_flags);
754	KASSERT(bp->b_qindex != QUEUE_NONE,
755	    ("bremfreel: buffer %p not on a queue.", bp));
756	BUF_ASSERT_HELD(bp);
757	mtx_assert(&bqlock, MA_OWNED);
758
759	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
760	bp->b_qindex = QUEUE_NONE;
761	/*
762	 * If this was a delayed bremfree() we only need to remove the buffer
763	 * from the queue and return the stats are already done.
764	 */
765	if (bp->b_flags & B_REMFREE) {
766		bp->b_flags &= ~B_REMFREE;
767		return;
768	}
769	/*
770	 * Fixup numfreebuffers count.  If the buffer is invalid or not
771	 * delayed-write, the buffer was free and we must decrement
772	 * numfreebuffers.
773	 */
774	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
775		KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
776		    ("buf %p not counted in numfreebuffers", bp));
777		if (bp->b_bufobj != NULL)
778			mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
779		bp->b_vflags &= ~BV_INFREECNT;
780		old = atomic_fetchadd_int(&numfreebuffers, -1);
781		KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
782	}
783}
784
785/*
786 * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
787 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
788 * the buffer is valid and we do not have to do anything.
789 */
790void
791breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
792    int cnt, struct ucred * cred)
793{
794	struct buf *rabp;
795	int i;
796
797	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
798		if (inmem(vp, *rablkno))
799			continue;
800		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
801
802		if ((rabp->b_flags & B_CACHE) == 0) {
803			if (!TD_IS_IDLETHREAD(curthread))
804				curthread->td_ru.ru_inblock++;
805			rabp->b_flags |= B_ASYNC;
806			rabp->b_flags &= ~B_INVAL;
807			rabp->b_ioflags &= ~BIO_ERROR;
808			rabp->b_iocmd = BIO_READ;
809			if (rabp->b_rcred == NOCRED && cred != NOCRED)
810				rabp->b_rcred = crhold(cred);
811			vfs_busy_pages(rabp, 0);
812			BUF_KERNPROC(rabp);
813			rabp->b_iooffset = dbtob(rabp->b_blkno);
814			bstrategy(rabp);
815		} else {
816			brelse(rabp);
817		}
818	}
819}
820
821/*
822 * Entry point for bread() and breadn() via #defines in sys/buf.h.
823 *
824 * Get a buffer with the specified data.  Look in the cache first.  We
825 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
826 * is set, the buffer is valid and we do not have to do anything, see
827 * getblk(). Also starts asynchronous I/O on read-ahead blocks.
828 */
829int
830breadn_flags(struct vnode * vp, daddr_t blkno, int size,
831    daddr_t * rablkno, int *rabsize, int cnt,
832    struct ucred * cred, int flags, struct buf **bpp)
833{
834	struct buf *bp;
835	int rv = 0, readwait = 0;
836
837	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
838	/*
839	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
840	 */
841	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
842	if (bp == NULL)
843		return (EBUSY);
844
845	/* if not found in cache, do some I/O */
846	if ((bp->b_flags & B_CACHE) == 0) {
847		if (!TD_IS_IDLETHREAD(curthread))
848			curthread->td_ru.ru_inblock++;
849		bp->b_iocmd = BIO_READ;
850		bp->b_flags &= ~B_INVAL;
851		bp->b_ioflags &= ~BIO_ERROR;
852		if (bp->b_rcred == NOCRED && cred != NOCRED)
853			bp->b_rcred = crhold(cred);
854		vfs_busy_pages(bp, 0);
855		bp->b_iooffset = dbtob(bp->b_blkno);
856		bstrategy(bp);
857		++readwait;
858	}
859
860	breada(vp, rablkno, rabsize, cnt, cred);
861
862	if (readwait) {
863		rv = bufwait(bp);
864	}
865	return (rv);
866}
867
868/*
869 * Write, release buffer on completion.  (Done by iodone
870 * if async).  Do not bother writing anything if the buffer
871 * is invalid.
872 *
873 * Note that we set B_CACHE here, indicating that buffer is
874 * fully valid and thus cacheable.  This is true even of NFS
875 * now so we set it generally.  This could be set either here
876 * or in biodone() since the I/O is synchronous.  We put it
877 * here.
878 */
879int
880bufwrite(struct buf *bp)
881{
882	int oldflags;
883	struct vnode *vp;
884	int vp_md;
885
886	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
887	if (bp->b_flags & B_INVAL) {
888		brelse(bp);
889		return (0);
890	}
891
892	oldflags = bp->b_flags;
893
894	BUF_ASSERT_HELD(bp);
895
896	if (bp->b_pin_count > 0)
897		bunpin_wait(bp);
898
899	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
900	    ("FFS background buffer should not get here %p", bp));
901
902	vp = bp->b_vp;
903	if (vp)
904		vp_md = vp->v_vflag & VV_MD;
905	else
906		vp_md = 0;
907
908	/* Mark the buffer clean */
909	bundirty(bp);
910
911	bp->b_flags &= ~B_DONE;
912	bp->b_ioflags &= ~BIO_ERROR;
913	bp->b_flags |= B_CACHE;
914	bp->b_iocmd = BIO_WRITE;
915
916	bufobj_wref(bp->b_bufobj);
917	vfs_busy_pages(bp, 1);
918
919	/*
920	 * Normal bwrites pipeline writes
921	 */
922	bp->b_runningbufspace = bp->b_bufsize;
923	atomic_add_long(&runningbufspace, bp->b_runningbufspace);
924
925	if (!TD_IS_IDLETHREAD(curthread))
926		curthread->td_ru.ru_oublock++;
927	if (oldflags & B_ASYNC)
928		BUF_KERNPROC(bp);
929	bp->b_iooffset = dbtob(bp->b_blkno);
930	bstrategy(bp);
931
932	if ((oldflags & B_ASYNC) == 0) {
933		int rtval = bufwait(bp);
934		brelse(bp);
935		return (rtval);
936	} else {
937		/*
938		 * don't allow the async write to saturate the I/O
939		 * system.  We will not deadlock here because
940		 * we are blocking waiting for I/O that is already in-progress
941		 * to complete. We do not block here if it is the update
942		 * or syncer daemon trying to clean up as that can lead
943		 * to deadlock.
944		 */
945		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
946			waitrunningbufspace();
947	}
948
949	return (0);
950}
951
952void
953bufbdflush(struct bufobj *bo, struct buf *bp)
954{
955	struct buf *nbp;
956
957	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
958		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
959		altbufferflushes++;
960	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
961		BO_LOCK(bo);
962		/*
963		 * Try to find a buffer to flush.
964		 */
965		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
966			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
967			    BUF_LOCK(nbp,
968				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
969				continue;
970			if (bp == nbp)
971				panic("bdwrite: found ourselves");
972			BO_UNLOCK(bo);
973			/* Don't countdeps with the bo lock held. */
974			if (buf_countdeps(nbp, 0)) {
975				BO_LOCK(bo);
976				BUF_UNLOCK(nbp);
977				continue;
978			}
979			if (nbp->b_flags & B_CLUSTEROK) {
980				vfs_bio_awrite(nbp);
981			} else {
982				bremfree(nbp);
983				bawrite(nbp);
984			}
985			dirtybufferflushes++;
986			break;
987		}
988		if (nbp == NULL)
989			BO_UNLOCK(bo);
990	}
991}
992
993/*
994 * Delayed write. (Buffer is marked dirty).  Do not bother writing
995 * anything if the buffer is marked invalid.
996 *
997 * Note that since the buffer must be completely valid, we can safely
998 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
999 * biodone() in order to prevent getblk from writing the buffer
1000 * out synchronously.
1001 */
1002void
1003bdwrite(struct buf *bp)
1004{
1005	struct thread *td = curthread;
1006	struct vnode *vp;
1007	struct bufobj *bo;
1008
1009	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1010	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1011	BUF_ASSERT_HELD(bp);
1012
1013	if (bp->b_flags & B_INVAL) {
1014		brelse(bp);
1015		return;
1016	}
1017
1018	/*
1019	 * If we have too many dirty buffers, don't create any more.
1020	 * If we are wildly over our limit, then force a complete
1021	 * cleanup. Otherwise, just keep the situation from getting
1022	 * out of control. Note that we have to avoid a recursive
1023	 * disaster and not try to clean up after our own cleanup!
1024	 */
1025	vp = bp->b_vp;
1026	bo = bp->b_bufobj;
1027	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
1028		td->td_pflags |= TDP_INBDFLUSH;
1029		BO_BDFLUSH(bo, bp);
1030		td->td_pflags &= ~TDP_INBDFLUSH;
1031	} else
1032		recursiveflushes++;
1033
1034	bdirty(bp);
1035	/*
1036	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
1037	 * true even of NFS now.
1038	 */
1039	bp->b_flags |= B_CACHE;
1040
1041	/*
1042	 * This bmap keeps the system from needing to do the bmap later,
1043	 * perhaps when the system is attempting to do a sync.  Since it
1044	 * is likely that the indirect block -- or whatever other datastructure
1045	 * that the filesystem needs is still in memory now, it is a good
1046	 * thing to do this.  Note also, that if the pageout daemon is
1047	 * requesting a sync -- there might not be enough memory to do
1048	 * the bmap then...  So, this is important to do.
1049	 */
1050	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
1051		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1052	}
1053
1054	/*
1055	 * Set the *dirty* buffer range based upon the VM system dirty
1056	 * pages.
1057	 *
1058	 * Mark the buffer pages as clean.  We need to do this here to
1059	 * satisfy the vnode_pager and the pageout daemon, so that it
1060	 * thinks that the pages have been "cleaned".  Note that since
1061	 * the pages are in a delayed write buffer -- the VFS layer
1062	 * "will" see that the pages get written out on the next sync,
1063	 * or perhaps the cluster will be completed.
1064	 */
1065	vfs_clean_pages_dirty_buf(bp);
1066	bqrelse(bp);
1067
1068	/*
1069	 * Wakeup the buffer flushing daemon if we have a lot of dirty
1070	 * buffers (midpoint between our recovery point and our stall
1071	 * point).
1072	 */
1073	bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1074
1075	/*
1076	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
1077	 * due to the softdep code.
1078	 */
1079}
1080
1081/*
1082 *	bdirty:
1083 *
1084 *	Turn buffer into delayed write request.  We must clear BIO_READ and
1085 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
1086 *	itself to properly update it in the dirty/clean lists.  We mark it
1087 *	B_DONE to ensure that any asynchronization of the buffer properly
1088 *	clears B_DONE ( else a panic will occur later ).
1089 *
1090 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
1091 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
1092 *	should only be called if the buffer is known-good.
1093 *
1094 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1095 *	count.
1096 *
1097 *	The buffer must be on QUEUE_NONE.
1098 */
1099void
1100bdirty(struct buf *bp)
1101{
1102
1103	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
1104	    bp, bp->b_vp, bp->b_flags);
1105	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1106	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1107	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1108	BUF_ASSERT_HELD(bp);
1109	bp->b_flags &= ~(B_RELBUF);
1110	bp->b_iocmd = BIO_WRITE;
1111
1112	if ((bp->b_flags & B_DELWRI) == 0) {
1113		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
1114		reassignbuf(bp);
1115		atomic_add_int(&numdirtybuffers, 1);
1116		bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1117	}
1118}
1119
1120/*
1121 *	bundirty:
1122 *
1123 *	Clear B_DELWRI for buffer.
1124 *
1125 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1126 *	count.
1127 *
1128 *	The buffer must be on QUEUE_NONE.
1129 */
1130
1131void
1132bundirty(struct buf *bp)
1133{
1134
1135	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1136	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1137	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1138	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1139	BUF_ASSERT_HELD(bp);
1140
1141	if (bp->b_flags & B_DELWRI) {
1142		bp->b_flags &= ~B_DELWRI;
1143		reassignbuf(bp);
1144		atomic_subtract_int(&numdirtybuffers, 1);
1145		numdirtywakeup(lodirtybuffers);
1146	}
1147	/*
1148	 * Since it is now being written, we can clear its deferred write flag.
1149	 */
1150	bp->b_flags &= ~B_DEFERRED;
1151}
1152
1153/*
1154 *	bawrite:
1155 *
1156 *	Asynchronous write.  Start output on a buffer, but do not wait for
1157 *	it to complete.  The buffer is released when the output completes.
1158 *
1159 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1160 *	B_INVAL buffers.  Not us.
1161 */
1162void
1163bawrite(struct buf *bp)
1164{
1165
1166	bp->b_flags |= B_ASYNC;
1167	(void) bwrite(bp);
1168}
1169
1170/*
1171 *	bwillwrite:
1172 *
1173 *	Called prior to the locking of any vnodes when we are expecting to
1174 *	write.  We do not want to starve the buffer cache with too many
1175 *	dirty buffers so we block here.  By blocking prior to the locking
1176 *	of any vnodes we attempt to avoid the situation where a locked vnode
1177 *	prevents the various system daemons from flushing related buffers.
1178 */
1179
1180void
1181bwillwrite(void)
1182{
1183
1184	if (numdirtybuffers >= hidirtybuffers) {
1185		mtx_lock(&nblock);
1186		while (numdirtybuffers >= hidirtybuffers) {
1187			bd_wakeup(1);
1188			needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
1189			msleep(&needsbuffer, &nblock,
1190			    (PRIBIO + 4), "flswai", 0);
1191		}
1192		mtx_unlock(&nblock);
1193	}
1194}
1195
1196/*
1197 * Return true if we have too many dirty buffers.
1198 */
1199int
1200buf_dirty_count_severe(void)
1201{
1202
1203	return(numdirtybuffers >= hidirtybuffers);
1204}
1205
1206static __noinline int
1207buf_vm_page_count_severe(void)
1208{
1209
1210	KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1);
1211
1212	return vm_page_count_severe();
1213}
1214
1215/*
1216 *	brelse:
1217 *
1218 *	Release a busy buffer and, if requested, free its resources.  The
1219 *	buffer will be stashed in the appropriate bufqueue[] allowing it
1220 *	to be accessed later as a cache entity or reused for other purposes.
1221 */
1222void
1223brelse(struct buf *bp)
1224{
1225	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
1226	    bp, bp->b_vp, bp->b_flags);
1227	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1228	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1229
1230	if (bp->b_flags & B_MANAGED) {
1231		bqrelse(bp);
1232		return;
1233	}
1234
1235	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
1236	    bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
1237		/*
1238		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
1239		 * pages from being scrapped.  If the error is anything
1240		 * other than an I/O error (EIO), assume that retrying
1241		 * is futile.
1242		 */
1243		bp->b_ioflags &= ~BIO_ERROR;
1244		bdirty(bp);
1245	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1246	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
1247		/*
1248		 * Either a failed I/O or we were asked to free or not
1249		 * cache the buffer.
1250		 */
1251		bp->b_flags |= B_INVAL;
1252		if (!LIST_EMPTY(&bp->b_dep))
1253			buf_deallocate(bp);
1254		if (bp->b_flags & B_DELWRI) {
1255			atomic_subtract_int(&numdirtybuffers, 1);
1256			numdirtywakeup(lodirtybuffers);
1257		}
1258		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1259		if ((bp->b_flags & B_VMIO) == 0) {
1260			if (bp->b_bufsize)
1261				allocbuf(bp, 0);
1262			if (bp->b_vp)
1263				brelvp(bp);
1264		}
1265	}
1266
1267	/*
1268	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
1269	 * is called with B_DELWRI set, the underlying pages may wind up
1270	 * getting freed causing a previous write (bdwrite()) to get 'lost'
1271	 * because pages associated with a B_DELWRI bp are marked clean.
1272	 *
1273	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1274	 * if B_DELWRI is set.
1275	 *
1276	 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1277	 * on pages to return pages to the VM page queues.
1278	 */
1279	if (bp->b_flags & B_DELWRI)
1280		bp->b_flags &= ~B_RELBUF;
1281	else if (buf_vm_page_count_severe()) {
1282		/*
1283		 * The locking of the BO_LOCK is not necessary since
1284		 * BKGRDINPROG cannot be set while we hold the buf
1285		 * lock, it can only be cleared if it is already
1286		 * pending.
1287		 */
1288		if (bp->b_vp) {
1289			if (!(bp->b_vflags & BV_BKGRDINPROG))
1290				bp->b_flags |= B_RELBUF;
1291		} else
1292			bp->b_flags |= B_RELBUF;
1293	}
1294
1295	/*
1296	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
1297	 * constituted, not even NFS buffers now.  Two flags effect this.  If
1298	 * B_INVAL, the struct buf is invalidated but the VM object is kept
1299	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1300	 *
1301	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1302	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
1303	 * buffer is also B_INVAL because it hits the re-dirtying code above.
1304	 *
1305	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
1306	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1307	 * the commit state and we cannot afford to lose the buffer. If the
1308	 * buffer has a background write in progress, we need to keep it
1309	 * around to prevent it from being reconstituted and starting a second
1310	 * background write.
1311	 */
1312	if ((bp->b_flags & B_VMIO)
1313	    && !(bp->b_vp->v_mount != NULL &&
1314		 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1315		 !vn_isdisk(bp->b_vp, NULL) &&
1316		 (bp->b_flags & B_DELWRI))
1317	    ) {
1318
1319		int i, j, resid;
1320		vm_page_t m;
1321		off_t foff;
1322		vm_pindex_t poff;
1323		vm_object_t obj;
1324
1325		obj = bp->b_bufobj->bo_object;
1326
1327		/*
1328		 * Get the base offset and length of the buffer.  Note that
1329		 * in the VMIO case if the buffer block size is not
1330		 * page-aligned then b_data pointer may not be page-aligned.
1331		 * But our b_pages[] array *IS* page aligned.
1332		 *
1333		 * block sizes less then DEV_BSIZE (usually 512) are not
1334		 * supported due to the page granularity bits (m->valid,
1335		 * m->dirty, etc...).
1336		 *
1337		 * See man buf(9) for more information
1338		 */
1339		resid = bp->b_bufsize;
1340		foff = bp->b_offset;
1341		VM_OBJECT_LOCK(obj);
1342		for (i = 0; i < bp->b_npages; i++) {
1343			int had_bogus = 0;
1344
1345			m = bp->b_pages[i];
1346
1347			/*
1348			 * If we hit a bogus page, fixup *all* the bogus pages
1349			 * now.
1350			 */
1351			if (m == bogus_page) {
1352				poff = OFF_TO_IDX(bp->b_offset);
1353				had_bogus = 1;
1354
1355				for (j = i; j < bp->b_npages; j++) {
1356					vm_page_t mtmp;
1357					mtmp = bp->b_pages[j];
1358					if (mtmp == bogus_page) {
1359						mtmp = vm_page_lookup(obj, poff + j);
1360						if (!mtmp) {
1361							panic("brelse: page missing\n");
1362						}
1363						bp->b_pages[j] = mtmp;
1364					}
1365				}
1366
1367				if ((bp->b_flags & B_INVAL) == 0) {
1368					pmap_qenter(
1369					    trunc_page((vm_offset_t)bp->b_data),
1370					    bp->b_pages, bp->b_npages);
1371				}
1372				m = bp->b_pages[i];
1373			}
1374			if ((bp->b_flags & B_NOCACHE) ||
1375			    (bp->b_ioflags & BIO_ERROR &&
1376			     bp->b_iocmd == BIO_READ)) {
1377				int poffset = foff & PAGE_MASK;
1378				int presid = resid > (PAGE_SIZE - poffset) ?
1379					(PAGE_SIZE - poffset) : resid;
1380
1381				KASSERT(presid >= 0, ("brelse: extra page"));
1382				vm_page_set_invalid(m, poffset, presid);
1383				if (had_bogus)
1384					printf("avoided corruption bug in bogus_page/brelse code\n");
1385			}
1386			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1387			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1388		}
1389		VM_OBJECT_UNLOCK(obj);
1390		if (bp->b_flags & (B_INVAL | B_RELBUF))
1391			vfs_vmio_release(bp);
1392
1393	} else if (bp->b_flags & B_VMIO) {
1394
1395		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1396			vfs_vmio_release(bp);
1397		}
1398
1399	} else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
1400		if (bp->b_bufsize != 0)
1401			allocbuf(bp, 0);
1402		if (bp->b_vp != NULL)
1403			brelvp(bp);
1404	}
1405
1406	if (BUF_LOCKRECURSED(bp)) {
1407		/* do not release to free list */
1408		BUF_UNLOCK(bp);
1409		return;
1410	}
1411
1412	/* enqueue */
1413	mtx_lock(&bqlock);
1414	/* Handle delayed bremfree() processing. */
1415	if (bp->b_flags & B_REMFREE) {
1416		struct bufobj *bo;
1417
1418		bo = bp->b_bufobj;
1419		if (bo != NULL)
1420			BO_LOCK(bo);
1421		bremfreel(bp);
1422		if (bo != NULL)
1423			BO_UNLOCK(bo);
1424	}
1425	if (bp->b_qindex != QUEUE_NONE)
1426		panic("brelse: free buffer onto another queue???");
1427
1428	/*
1429	 * If the buffer has junk contents signal it and eventually
1430	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
1431	 * doesn't find it.
1432	 */
1433	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
1434	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
1435		bp->b_flags |= B_INVAL;
1436	if (bp->b_flags & B_INVAL) {
1437		if (bp->b_flags & B_DELWRI)
1438			bundirty(bp);
1439		if (bp->b_vp)
1440			brelvp(bp);
1441	}
1442
1443	/* buffers with no memory */
1444	if (bp->b_bufsize == 0) {
1445		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1446		if (bp->b_vflags & BV_BKGRDINPROG)
1447			panic("losing buffer 1");
1448		if (bp->b_kvasize) {
1449			bp->b_qindex = QUEUE_EMPTYKVA;
1450		} else {
1451			bp->b_qindex = QUEUE_EMPTY;
1452		}
1453		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1454	/* buffers with junk contents */
1455	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1456	    (bp->b_ioflags & BIO_ERROR)) {
1457		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1458		if (bp->b_vflags & BV_BKGRDINPROG)
1459			panic("losing buffer 2");
1460		bp->b_qindex = QUEUE_CLEAN;
1461		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1462	/* remaining buffers */
1463	} else {
1464		if ((bp->b_flags & (B_DELWRI|B_NEEDSGIANT)) ==
1465		    (B_DELWRI|B_NEEDSGIANT))
1466			bp->b_qindex = QUEUE_DIRTY_GIANT;
1467		else if (bp->b_flags & B_DELWRI)
1468			bp->b_qindex = QUEUE_DIRTY;
1469		else
1470			bp->b_qindex = QUEUE_CLEAN;
1471		if (bp->b_flags & B_AGE)
1472			TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1473		else
1474			TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1475	}
1476	mtx_unlock(&bqlock);
1477
1478	/*
1479	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
1480	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
1481	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1482	 * if B_INVAL is set ).
1483	 */
1484
1485	if (!(bp->b_flags & B_DELWRI)) {
1486		struct bufobj *bo;
1487
1488		bo = bp->b_bufobj;
1489		if (bo != NULL)
1490			BO_LOCK(bo);
1491		bufcountwakeup(bp);
1492		if (bo != NULL)
1493			BO_UNLOCK(bo);
1494	}
1495
1496	/*
1497	 * Something we can maybe free or reuse
1498	 */
1499	if (bp->b_bufsize || bp->b_kvasize)
1500		bufspacewakeup();
1501
1502	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
1503	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1504		panic("brelse: not dirty");
1505	/* unlock */
1506	BUF_UNLOCK(bp);
1507}
1508
1509/*
1510 * Release a buffer back to the appropriate queue but do not try to free
1511 * it.  The buffer is expected to be used again soon.
1512 *
1513 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1514 * biodone() to requeue an async I/O on completion.  It is also used when
1515 * known good buffers need to be requeued but we think we may need the data
1516 * again soon.
1517 *
1518 * XXX we should be able to leave the B_RELBUF hint set on completion.
1519 */
1520void
1521bqrelse(struct buf *bp)
1522{
1523	struct bufobj *bo;
1524
1525	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1526	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1527	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1528
1529	if (BUF_LOCKRECURSED(bp)) {
1530		/* do not release to free list */
1531		BUF_UNLOCK(bp);
1532		return;
1533	}
1534
1535	bo = bp->b_bufobj;
1536	if (bp->b_flags & B_MANAGED) {
1537		if (bp->b_flags & B_REMFREE) {
1538			mtx_lock(&bqlock);
1539			if (bo != NULL)
1540				BO_LOCK(bo);
1541			bremfreel(bp);
1542			if (bo != NULL)
1543				BO_UNLOCK(bo);
1544			mtx_unlock(&bqlock);
1545		}
1546		bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1547		BUF_UNLOCK(bp);
1548		return;
1549	}
1550
1551	mtx_lock(&bqlock);
1552	/* Handle delayed bremfree() processing. */
1553	if (bp->b_flags & B_REMFREE) {
1554		if (bo != NULL)
1555			BO_LOCK(bo);
1556		bremfreel(bp);
1557		if (bo != NULL)
1558			BO_UNLOCK(bo);
1559	}
1560	if (bp->b_qindex != QUEUE_NONE)
1561		panic("bqrelse: free buffer onto another queue???");
1562	/* buffers with stale but valid contents */
1563	if (bp->b_flags & B_DELWRI) {
1564		if (bp->b_flags & B_NEEDSGIANT)
1565			bp->b_qindex = QUEUE_DIRTY_GIANT;
1566		else
1567			bp->b_qindex = QUEUE_DIRTY;
1568		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1569	} else {
1570		/*
1571		 * The locking of the BO_LOCK for checking of the
1572		 * BV_BKGRDINPROG is not necessary since the
1573		 * BV_BKGRDINPROG cannot be set while we hold the buf
1574		 * lock, it can only be cleared if it is already
1575		 * pending.
1576		 */
1577		if (!buf_vm_page_count_severe() || (bp->b_vflags & BV_BKGRDINPROG)) {
1578			bp->b_qindex = QUEUE_CLEAN;
1579			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
1580			    b_freelist);
1581		} else {
1582			/*
1583			 * We are too low on memory, we have to try to free
1584			 * the buffer (most importantly: the wired pages
1585			 * making up its backing store) *now*.
1586			 */
1587			mtx_unlock(&bqlock);
1588			brelse(bp);
1589			return;
1590		}
1591	}
1592	mtx_unlock(&bqlock);
1593
1594	if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) {
1595		if (bo != NULL)
1596			BO_LOCK(bo);
1597		bufcountwakeup(bp);
1598		if (bo != NULL)
1599			BO_UNLOCK(bo);
1600	}
1601
1602	/*
1603	 * Something we can maybe free or reuse.
1604	 */
1605	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1606		bufspacewakeup();
1607
1608	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1609	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1610		panic("bqrelse: not dirty");
1611	/* unlock */
1612	BUF_UNLOCK(bp);
1613}
1614
1615/* Give pages used by the bp back to the VM system (where possible) */
1616static void
1617vfs_vmio_release(struct buf *bp)
1618{
1619	int i;
1620	vm_page_t m;
1621
1622	pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
1623	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
1624	for (i = 0; i < bp->b_npages; i++) {
1625		m = bp->b_pages[i];
1626		bp->b_pages[i] = NULL;
1627		/*
1628		 * In order to keep page LRU ordering consistent, put
1629		 * everything on the inactive queue.
1630		 */
1631		vm_page_lock(m);
1632		vm_page_unwire(m, 0);
1633		/*
1634		 * We don't mess with busy pages, it is
1635		 * the responsibility of the process that
1636		 * busied the pages to deal with them.
1637		 */
1638		if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
1639		    m->wire_count == 0) {
1640			/*
1641			 * Might as well free the page if we can and it has
1642			 * no valid data.  We also free the page if the
1643			 * buffer was used for direct I/O
1644			 */
1645			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) {
1646				vm_page_free(m);
1647			} else if (bp->b_flags & B_DIRECT) {
1648				vm_page_try_to_free(m);
1649			} else if (buf_vm_page_count_severe()) {
1650				vm_page_try_to_cache(m);
1651			}
1652		}
1653		vm_page_unlock(m);
1654	}
1655	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
1656
1657	if (bp->b_bufsize) {
1658		bufspacewakeup();
1659		bp->b_bufsize = 0;
1660	}
1661	bp->b_npages = 0;
1662	bp->b_flags &= ~B_VMIO;
1663	if (bp->b_vp)
1664		brelvp(bp);
1665}
1666
1667/*
1668 * Check to see if a block at a particular lbn is available for a clustered
1669 * write.
1670 */
1671static int
1672vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
1673{
1674	struct buf *bpa;
1675	int match;
1676
1677	match = 0;
1678
1679	/* If the buf isn't in core skip it */
1680	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
1681		return (0);
1682
1683	/* If the buf is busy we don't want to wait for it */
1684	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1685		return (0);
1686
1687	/* Only cluster with valid clusterable delayed write buffers */
1688	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
1689	    (B_DELWRI | B_CLUSTEROK))
1690		goto done;
1691
1692	if (bpa->b_bufsize != size)
1693		goto done;
1694
1695	/*
1696	 * Check to see if it is in the expected place on disk and that the
1697	 * block has been mapped.
1698	 */
1699	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
1700		match = 1;
1701done:
1702	BUF_UNLOCK(bpa);
1703	return (match);
1704}
1705
1706/*
1707 *	vfs_bio_awrite:
1708 *
1709 *	Implement clustered async writes for clearing out B_DELWRI buffers.
1710 *	This is much better then the old way of writing only one buffer at
1711 *	a time.  Note that we may not be presented with the buffers in the
1712 *	correct order, so we search for the cluster in both directions.
1713 */
1714int
1715vfs_bio_awrite(struct buf *bp)
1716{
1717	struct bufobj *bo;
1718	int i;
1719	int j;
1720	daddr_t lblkno = bp->b_lblkno;
1721	struct vnode *vp = bp->b_vp;
1722	int ncl;
1723	int nwritten;
1724	int size;
1725	int maxcl;
1726
1727	bo = &vp->v_bufobj;
1728	/*
1729	 * right now we support clustered writing only to regular files.  If
1730	 * we find a clusterable block we could be in the middle of a cluster
1731	 * rather then at the beginning.
1732	 */
1733	if ((vp->v_type == VREG) &&
1734	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1735	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1736
1737		size = vp->v_mount->mnt_stat.f_iosize;
1738		maxcl = MAXPHYS / size;
1739
1740		BO_LOCK(bo);
1741		for (i = 1; i < maxcl; i++)
1742			if (vfs_bio_clcheck(vp, size, lblkno + i,
1743			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
1744				break;
1745
1746		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
1747			if (vfs_bio_clcheck(vp, size, lblkno - j,
1748			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
1749				break;
1750		BO_UNLOCK(bo);
1751		--j;
1752		ncl = i + j;
1753		/*
1754		 * this is a possible cluster write
1755		 */
1756		if (ncl != 1) {
1757			BUF_UNLOCK(bp);
1758			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
1759			return nwritten;
1760		}
1761	}
1762	bremfree(bp);
1763	bp->b_flags |= B_ASYNC;
1764	/*
1765	 * default (old) behavior, writing out only one block
1766	 *
1767	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1768	 */
1769	nwritten = bp->b_bufsize;
1770	(void) bwrite(bp);
1771
1772	return nwritten;
1773}
1774
1775/*
1776 *	getnewbuf:
1777 *
1778 *	Find and initialize a new buffer header, freeing up existing buffers
1779 *	in the bufqueues as necessary.  The new buffer is returned locked.
1780 *
1781 *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1782 *	buffer away, the caller must set B_INVAL prior to calling brelse().
1783 *
1784 *	We block if:
1785 *		We have insufficient buffer headers
1786 *		We have insufficient buffer space
1787 *		buffer_map is too fragmented ( space reservation fails )
1788 *		If we have to flush dirty buffers ( but we try to avoid this )
1789 *
1790 *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1791 *	Instead we ask the buf daemon to do it for us.  We attempt to
1792 *	avoid piecemeal wakeups of the pageout daemon.
1793 */
1794
1795static struct buf *
1796getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
1797    int gbflags)
1798{
1799	struct thread *td;
1800	struct buf *bp;
1801	struct buf *nbp;
1802	int defrag = 0;
1803	int nqindex;
1804	static int flushingbufs;
1805
1806	td = curthread;
1807	/*
1808	 * We can't afford to block since we might be holding a vnode lock,
1809	 * which may prevent system daemons from running.  We deal with
1810	 * low-memory situations by proactively returning memory and running
1811	 * async I/O rather then sync I/O.
1812	 */
1813	atomic_add_int(&getnewbufcalls, 1);
1814	atomic_subtract_int(&getnewbufrestarts, 1);
1815restart:
1816	atomic_add_int(&getnewbufrestarts, 1);
1817
1818	/*
1819	 * Setup for scan.  If we do not have enough free buffers,
1820	 * we setup a degenerate case that immediately fails.  Note
1821	 * that if we are specially marked process, we are allowed to
1822	 * dip into our reserves.
1823	 *
1824	 * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
1825	 *
1826	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
1827	 * However, there are a number of cases (defragging, reusing, ...)
1828	 * where we cannot backup.
1829	 */
1830	mtx_lock(&bqlock);
1831	nqindex = QUEUE_EMPTYKVA;
1832	nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
1833
1834	if (nbp == NULL) {
1835		/*
1836		 * If no EMPTYKVA buffers and we are either
1837		 * defragging or reusing, locate a CLEAN buffer
1838		 * to free or reuse.  If bufspace useage is low
1839		 * skip this step so we can allocate a new buffer.
1840		 */
1841		if (defrag || bufspace >= lobufspace) {
1842			nqindex = QUEUE_CLEAN;
1843			nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
1844		}
1845
1846		/*
1847		 * If we could not find or were not allowed to reuse a
1848		 * CLEAN buffer, check to see if it is ok to use an EMPTY
1849		 * buffer.  We can only use an EMPTY buffer if allocating
1850		 * its KVA would not otherwise run us out of buffer space.
1851		 */
1852		if (nbp == NULL && defrag == 0 &&
1853		    bufspace + maxsize < hibufspace) {
1854			nqindex = QUEUE_EMPTY;
1855			nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1856		}
1857	}
1858
1859	/*
1860	 * Run scan, possibly freeing data and/or kva mappings on the fly
1861	 * depending.
1862	 */
1863
1864	while ((bp = nbp) != NULL) {
1865		int qindex = nqindex;
1866
1867		/*
1868		 * Calculate next bp ( we can only use it if we do not block
1869		 * or do other fancy things ).
1870		 */
1871		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1872			switch(qindex) {
1873			case QUEUE_EMPTY:
1874				nqindex = QUEUE_EMPTYKVA;
1875				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
1876					break;
1877				/* FALLTHROUGH */
1878			case QUEUE_EMPTYKVA:
1879				nqindex = QUEUE_CLEAN;
1880				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
1881					break;
1882				/* FALLTHROUGH */
1883			case QUEUE_CLEAN:
1884				/*
1885				 * nbp is NULL.
1886				 */
1887				break;
1888			}
1889		}
1890		/*
1891		 * If we are defragging then we need a buffer with
1892		 * b_kvasize != 0.  XXX this situation should no longer
1893		 * occur, if defrag is non-zero the buffer's b_kvasize
1894		 * should also be non-zero at this point.  XXX
1895		 */
1896		if (defrag && bp->b_kvasize == 0) {
1897			printf("Warning: defrag empty buffer %p\n", bp);
1898			continue;
1899		}
1900
1901		/*
1902		 * Start freeing the bp.  This is somewhat involved.  nbp
1903		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1904		 */
1905		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1906			continue;
1907		if (bp->b_vp) {
1908			BO_LOCK(bp->b_bufobj);
1909			if (bp->b_vflags & BV_BKGRDINPROG) {
1910				BO_UNLOCK(bp->b_bufobj);
1911				BUF_UNLOCK(bp);
1912				continue;
1913			}
1914			BO_UNLOCK(bp->b_bufobj);
1915		}
1916		CTR6(KTR_BUF,
1917		    "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
1918		    "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
1919		    bp->b_kvasize, bp->b_bufsize, qindex);
1920
1921		/*
1922		 * Sanity Checks
1923		 */
1924		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1925
1926		/*
1927		 * Note: we no longer distinguish between VMIO and non-VMIO
1928		 * buffers.
1929		 */
1930
1931		KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
1932
1933		if (bp->b_bufobj != NULL)
1934			BO_LOCK(bp->b_bufobj);
1935		bremfreel(bp);
1936		if (bp->b_bufobj != NULL)
1937			BO_UNLOCK(bp->b_bufobj);
1938		mtx_unlock(&bqlock);
1939
1940		if (qindex == QUEUE_CLEAN) {
1941			if (bp->b_flags & B_VMIO) {
1942				bp->b_flags &= ~B_ASYNC;
1943				vfs_vmio_release(bp);
1944			}
1945			if (bp->b_vp)
1946				brelvp(bp);
1947		}
1948
1949		/*
1950		 * NOTE:  nbp is now entirely invalid.  We can only restart
1951		 * the scan from this point on.
1952		 *
1953		 * Get the rest of the buffer freed up.  b_kva* is still
1954		 * valid after this operation.
1955		 */
1956
1957		if (bp->b_rcred != NOCRED) {
1958			crfree(bp->b_rcred);
1959			bp->b_rcred = NOCRED;
1960		}
1961		if (bp->b_wcred != NOCRED) {
1962			crfree(bp->b_wcred);
1963			bp->b_wcred = NOCRED;
1964		}
1965		if (!LIST_EMPTY(&bp->b_dep))
1966			buf_deallocate(bp);
1967		if (bp->b_vflags & BV_BKGRDINPROG)
1968			panic("losing buffer 3");
1969		KASSERT(bp->b_vp == NULL,
1970		    ("bp: %p still has vnode %p.  qindex: %d",
1971		    bp, bp->b_vp, qindex));
1972		KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1973		   ("bp: %p still on a buffer list. xflags %X",
1974		    bp, bp->b_xflags));
1975
1976		if (bp->b_bufsize)
1977			allocbuf(bp, 0);
1978
1979		bp->b_flags = 0;
1980		bp->b_ioflags = 0;
1981		bp->b_xflags = 0;
1982		KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
1983		    ("buf %p still counted as free?", bp));
1984		bp->b_vflags = 0;
1985		bp->b_vp = NULL;
1986		bp->b_blkno = bp->b_lblkno = 0;
1987		bp->b_offset = NOOFFSET;
1988		bp->b_iodone = 0;
1989		bp->b_error = 0;
1990		bp->b_resid = 0;
1991		bp->b_bcount = 0;
1992		bp->b_npages = 0;
1993		bp->b_dirtyoff = bp->b_dirtyend = 0;
1994		bp->b_bufobj = NULL;
1995		bp->b_pin_count = 0;
1996		bp->b_fsprivate1 = NULL;
1997		bp->b_fsprivate2 = NULL;
1998		bp->b_fsprivate3 = NULL;
1999
2000		LIST_INIT(&bp->b_dep);
2001
2002		/*
2003		 * If we are defragging then free the buffer.
2004		 */
2005		if (defrag) {
2006			bp->b_flags |= B_INVAL;
2007			bfreekva(bp);
2008			brelse(bp);
2009			defrag = 0;
2010			goto restart;
2011		}
2012
2013		/*
2014		 * Notify any waiters for the buffer lock about
2015		 * identity change by freeing the buffer.
2016		 */
2017		if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
2018			bp->b_flags |= B_INVAL;
2019			bfreekva(bp);
2020			brelse(bp);
2021			goto restart;
2022		}
2023
2024		/*
2025		 * If we are overcomitted then recover the buffer and its
2026		 * KVM space.  This occurs in rare situations when multiple
2027		 * processes are blocked in getnewbuf() or allocbuf().
2028		 */
2029		if (bufspace >= hibufspace)
2030			flushingbufs = 1;
2031		if (flushingbufs && bp->b_kvasize != 0) {
2032			bp->b_flags |= B_INVAL;
2033			bfreekva(bp);
2034			brelse(bp);
2035			goto restart;
2036		}
2037		if (bufspace < lobufspace)
2038			flushingbufs = 0;
2039		break;
2040	}
2041
2042	/*
2043	 * If we exhausted our list, sleep as appropriate.  We may have to
2044	 * wakeup various daemons and write out some dirty buffers.
2045	 *
2046	 * Generally we are sleeping due to insufficient buffer space.
2047	 */
2048
2049	if (bp == NULL) {
2050		int flags, norunbuf;
2051		char *waitmsg;
2052		int fl;
2053
2054		if (defrag) {
2055			flags = VFS_BIO_NEED_BUFSPACE;
2056			waitmsg = "nbufkv";
2057		} else if (bufspace >= hibufspace) {
2058			waitmsg = "nbufbs";
2059			flags = VFS_BIO_NEED_BUFSPACE;
2060		} else {
2061			waitmsg = "newbuf";
2062			flags = VFS_BIO_NEED_ANY;
2063		}
2064		mtx_lock(&nblock);
2065		needsbuffer |= flags;
2066		mtx_unlock(&nblock);
2067		mtx_unlock(&bqlock);
2068
2069		bd_speedup();	/* heeeelp */
2070		if (gbflags & GB_NOWAIT_BD)
2071			return (NULL);
2072
2073		mtx_lock(&nblock);
2074		while (needsbuffer & flags) {
2075			if (vp != NULL && (td->td_pflags & TDP_BUFNEED) == 0) {
2076				mtx_unlock(&nblock);
2077				/*
2078				 * getblk() is called with a vnode
2079				 * locked, and some majority of the
2080				 * dirty buffers may as well belong to
2081				 * the vnode. Flushing the buffers
2082				 * there would make a progress that
2083				 * cannot be achieved by the
2084				 * buf_daemon, that cannot lock the
2085				 * vnode.
2086				 */
2087				norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
2088				    (td->td_pflags & TDP_NORUNNINGBUF);
2089				/* play bufdaemon */
2090				td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
2091				fl = buf_do_flush(vp);
2092				td->td_pflags &= norunbuf;
2093				mtx_lock(&nblock);
2094				if (fl != 0)
2095					continue;
2096				if ((needsbuffer & flags) == 0)
2097					break;
2098			}
2099			if (msleep(&needsbuffer, &nblock,
2100			    (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
2101				mtx_unlock(&nblock);
2102				return (NULL);
2103			}
2104		}
2105		mtx_unlock(&nblock);
2106	} else {
2107		/*
2108		 * We finally have a valid bp.  We aren't quite out of the
2109		 * woods, we still have to reserve kva space.  In order
2110		 * to keep fragmentation sane we only allocate kva in
2111		 * BKVASIZE chunks.
2112		 */
2113		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2114
2115		if (maxsize != bp->b_kvasize) {
2116			vm_offset_t addr = 0;
2117
2118			bfreekva(bp);
2119
2120			vm_map_lock(buffer_map);
2121			if (vm_map_findspace(buffer_map,
2122				vm_map_min(buffer_map), maxsize, &addr)) {
2123				/*
2124				 * Uh oh.  Buffer map is to fragmented.  We
2125				 * must defragment the map.
2126				 */
2127				atomic_add_int(&bufdefragcnt, 1);
2128				vm_map_unlock(buffer_map);
2129				defrag = 1;
2130				bp->b_flags |= B_INVAL;
2131				brelse(bp);
2132				goto restart;
2133			}
2134			if (addr) {
2135				vm_map_insert(buffer_map, NULL, 0,
2136					addr, addr + maxsize,
2137					VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
2138
2139				bp->b_kvabase = (caddr_t) addr;
2140				bp->b_kvasize = maxsize;
2141				atomic_add_long(&bufspace, bp->b_kvasize);
2142				atomic_add_int(&bufreusecnt, 1);
2143			}
2144			vm_map_unlock(buffer_map);
2145		}
2146		bp->b_saveaddr = bp->b_kvabase;
2147		bp->b_data = bp->b_saveaddr;
2148	}
2149	return(bp);
2150}
2151
2152/*
2153 *	buf_daemon:
2154 *
2155 *	buffer flushing daemon.  Buffers are normally flushed by the
2156 *	update daemon but if it cannot keep up this process starts to
2157 *	take the load in an attempt to prevent getnewbuf() from blocking.
2158 */
2159
2160static struct kproc_desc buf_kp = {
2161	"bufdaemon",
2162	buf_daemon,
2163	&bufdaemonproc
2164};
2165SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2166
2167static int
2168buf_do_flush(struct vnode *vp)
2169{
2170	int flushed;
2171
2172	flushed = flushbufqueues(vp, QUEUE_DIRTY, 0);
2173	/* The list empty check here is slightly racy */
2174	if (!TAILQ_EMPTY(&bufqueues[QUEUE_DIRTY_GIANT])) {
2175		mtx_lock(&Giant);
2176		flushed += flushbufqueues(vp, QUEUE_DIRTY_GIANT, 0);
2177		mtx_unlock(&Giant);
2178	}
2179	if (flushed == 0) {
2180		/*
2181		 * Could not find any buffers without rollback
2182		 * dependencies, so just write the first one
2183		 * in the hopes of eventually making progress.
2184		 */
2185		flushbufqueues(vp, QUEUE_DIRTY, 1);
2186		if (!TAILQ_EMPTY(
2187			    &bufqueues[QUEUE_DIRTY_GIANT])) {
2188			mtx_lock(&Giant);
2189			flushbufqueues(vp, QUEUE_DIRTY_GIANT, 1);
2190			mtx_unlock(&Giant);
2191		}
2192	}
2193	return (flushed);
2194}
2195
2196static void
2197buf_daemon()
2198{
2199	int lodirtysave;
2200
2201	/*
2202	 * This process needs to be suspended prior to shutdown sync.
2203	 */
2204	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
2205	    SHUTDOWN_PRI_LAST);
2206
2207	/*
2208	 * This process is allowed to take the buffer cache to the limit
2209	 */
2210	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
2211	mtx_lock(&bdlock);
2212	for (;;) {
2213		bd_request = 0;
2214		mtx_unlock(&bdlock);
2215
2216		kproc_suspend_check(bufdaemonproc);
2217		lodirtysave = lodirtybuffers;
2218		if (bd_speedupreq) {
2219			lodirtybuffers = numdirtybuffers / 2;
2220			bd_speedupreq = 0;
2221		}
2222		/*
2223		 * Do the flush.  Limit the amount of in-transit I/O we
2224		 * allow to build up, otherwise we would completely saturate
2225		 * the I/O system.  Wakeup any waiting processes before we
2226		 * normally would so they can run in parallel with our drain.
2227		 */
2228		while (numdirtybuffers > lodirtybuffers) {
2229			if (buf_do_flush(NULL) == 0)
2230				break;
2231			kern_yield(PRI_UNCHANGED);
2232		}
2233		lodirtybuffers = lodirtysave;
2234
2235		/*
2236		 * Only clear bd_request if we have reached our low water
2237		 * mark.  The buf_daemon normally waits 1 second and
2238		 * then incrementally flushes any dirty buffers that have
2239		 * built up, within reason.
2240		 *
2241		 * If we were unable to hit our low water mark and couldn't
2242		 * find any flushable buffers, we sleep half a second.
2243		 * Otherwise we loop immediately.
2244		 */
2245		mtx_lock(&bdlock);
2246		if (numdirtybuffers <= lodirtybuffers) {
2247			/*
2248			 * We reached our low water mark, reset the
2249			 * request and sleep until we are needed again.
2250			 * The sleep is just so the suspend code works.
2251			 */
2252			bd_request = 0;
2253			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
2254		} else {
2255			/*
2256			 * We couldn't find any flushable dirty buffers but
2257			 * still have too many dirty buffers, we
2258			 * have to sleep and try again.  (rare)
2259			 */
2260			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
2261		}
2262	}
2263}
2264
2265/*
2266 *	flushbufqueues:
2267 *
2268 *	Try to flush a buffer in the dirty queue.  We must be careful to
2269 *	free up B_INVAL buffers instead of write them, which NFS is
2270 *	particularly sensitive to.
2271 */
2272static int flushwithdeps = 0;
2273SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
2274    0, "Number of buffers flushed with dependecies that require rollbacks");
2275
2276static int
2277flushbufqueues(struct vnode *lvp, int queue, int flushdeps)
2278{
2279	struct buf *sentinel;
2280	struct vnode *vp;
2281	struct mount *mp;
2282	struct buf *bp;
2283	int hasdeps;
2284	int flushed;
2285	int target;
2286
2287	if (lvp == NULL) {
2288		target = numdirtybuffers - lodirtybuffers;
2289		if (flushdeps && target > 2)
2290			target /= 2;
2291	} else
2292		target = flushbufqtarget;
2293	flushed = 0;
2294	bp = NULL;
2295	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
2296	sentinel->b_qindex = QUEUE_SENTINEL;
2297	mtx_lock(&bqlock);
2298	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
2299	while (flushed != target) {
2300		bp = TAILQ_NEXT(sentinel, b_freelist);
2301		if (bp != NULL) {
2302			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2303			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
2304			    b_freelist);
2305		} else
2306			break;
2307		/*
2308		 * Skip sentinels inserted by other invocations of the
2309		 * flushbufqueues(), taking care to not reorder them.
2310		 */
2311		if (bp->b_qindex == QUEUE_SENTINEL)
2312			continue;
2313		/*
2314		 * Only flush the buffers that belong to the
2315		 * vnode locked by the curthread.
2316		 */
2317		if (lvp != NULL && bp->b_vp != lvp)
2318			continue;
2319		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2320			continue;
2321		if (bp->b_pin_count > 0) {
2322			BUF_UNLOCK(bp);
2323			continue;
2324		}
2325		BO_LOCK(bp->b_bufobj);
2326		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
2327		    (bp->b_flags & B_DELWRI) == 0) {
2328			BO_UNLOCK(bp->b_bufobj);
2329			BUF_UNLOCK(bp);
2330			continue;
2331		}
2332		BO_UNLOCK(bp->b_bufobj);
2333		if (bp->b_flags & B_INVAL) {
2334			bremfreel(bp);
2335			mtx_unlock(&bqlock);
2336			brelse(bp);
2337			flushed++;
2338			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2339			mtx_lock(&bqlock);
2340			continue;
2341		}
2342
2343		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
2344			if (flushdeps == 0) {
2345				BUF_UNLOCK(bp);
2346				continue;
2347			}
2348			hasdeps = 1;
2349		} else
2350			hasdeps = 0;
2351		/*
2352		 * We must hold the lock on a vnode before writing
2353		 * one of its buffers. Otherwise we may confuse, or
2354		 * in the case of a snapshot vnode, deadlock the
2355		 * system.
2356		 *
2357		 * The lock order here is the reverse of the normal
2358		 * of vnode followed by buf lock.  This is ok because
2359		 * the NOWAIT will prevent deadlock.
2360		 */
2361		vp = bp->b_vp;
2362		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2363			BUF_UNLOCK(bp);
2364			continue;
2365		}
2366		if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_CANRECURSE) == 0) {
2367			mtx_unlock(&bqlock);
2368			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
2369			    bp, bp->b_vp, bp->b_flags);
2370			if (curproc == bufdaemonproc)
2371				vfs_bio_awrite(bp);
2372			else {
2373				bremfree(bp);
2374				bwrite(bp);
2375				notbufdflashes++;
2376			}
2377			vn_finished_write(mp);
2378			VOP_UNLOCK(vp, 0);
2379			flushwithdeps += hasdeps;
2380			flushed++;
2381
2382			/*
2383			 * Sleeping on runningbufspace while holding
2384			 * vnode lock leads to deadlock.
2385			 */
2386			if (curproc == bufdaemonproc)
2387				waitrunningbufspace();
2388			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2389			mtx_lock(&bqlock);
2390			continue;
2391		}
2392		vn_finished_write(mp);
2393		BUF_UNLOCK(bp);
2394	}
2395	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2396	mtx_unlock(&bqlock);
2397	free(sentinel, M_TEMP);
2398	return (flushed);
2399}
2400
2401/*
2402 * Check to see if a block is currently memory resident.
2403 */
2404struct buf *
2405incore(struct bufobj *bo, daddr_t blkno)
2406{
2407	struct buf *bp;
2408
2409	BO_LOCK(bo);
2410	bp = gbincore(bo, blkno);
2411	BO_UNLOCK(bo);
2412	return (bp);
2413}
2414
2415/*
2416 * Returns true if no I/O is needed to access the
2417 * associated VM object.  This is like incore except
2418 * it also hunts around in the VM system for the data.
2419 */
2420
2421static int
2422inmem(struct vnode * vp, daddr_t blkno)
2423{
2424	vm_object_t obj;
2425	vm_offset_t toff, tinc, size;
2426	vm_page_t m;
2427	vm_ooffset_t off;
2428
2429	ASSERT_VOP_LOCKED(vp, "inmem");
2430
2431	if (incore(&vp->v_bufobj, blkno))
2432		return 1;
2433	if (vp->v_mount == NULL)
2434		return 0;
2435	obj = vp->v_object;
2436	if (obj == NULL)
2437		return (0);
2438
2439	size = PAGE_SIZE;
2440	if (size > vp->v_mount->mnt_stat.f_iosize)
2441		size = vp->v_mount->mnt_stat.f_iosize;
2442	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2443
2444	VM_OBJECT_LOCK(obj);
2445	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2446		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2447		if (!m)
2448			goto notinmem;
2449		tinc = size;
2450		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2451			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2452		if (vm_page_is_valid(m,
2453		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2454			goto notinmem;
2455	}
2456	VM_OBJECT_UNLOCK(obj);
2457	return 1;
2458
2459notinmem:
2460	VM_OBJECT_UNLOCK(obj);
2461	return (0);
2462}
2463
2464/*
2465 * Set the dirty range for a buffer based on the status of the dirty
2466 * bits in the pages comprising the buffer.  The range is limited
2467 * to the size of the buffer.
2468 *
2469 * Tell the VM system that the pages associated with this buffer
2470 * are clean.  This is used for delayed writes where the data is
2471 * going to go to disk eventually without additional VM intevention.
2472 *
2473 * Note that while we only really need to clean through to b_bcount, we
2474 * just go ahead and clean through to b_bufsize.
2475 */
2476static void
2477vfs_clean_pages_dirty_buf(struct buf *bp)
2478{
2479	vm_ooffset_t foff, noff, eoff;
2480	vm_page_t m;
2481	int i;
2482
2483	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
2484		return;
2485
2486	foff = bp->b_offset;
2487	KASSERT(bp->b_offset != NOOFFSET,
2488	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
2489
2490	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2491	vfs_drain_busy_pages(bp);
2492	vfs_setdirty_locked_object(bp);
2493	for (i = 0; i < bp->b_npages; i++) {
2494		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2495		eoff = noff;
2496		if (eoff > bp->b_offset + bp->b_bufsize)
2497			eoff = bp->b_offset + bp->b_bufsize;
2498		m = bp->b_pages[i];
2499		vfs_page_set_validclean(bp, foff, m);
2500		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2501		foff = noff;
2502	}
2503	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2504}
2505
2506static void
2507vfs_setdirty_locked_object(struct buf *bp)
2508{
2509	vm_object_t object;
2510	int i;
2511
2512	object = bp->b_bufobj->bo_object;
2513	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2514
2515	/*
2516	 * We qualify the scan for modified pages on whether the
2517	 * object has been flushed yet.
2518	 */
2519	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
2520		vm_offset_t boffset;
2521		vm_offset_t eoffset;
2522
2523		/*
2524		 * test the pages to see if they have been modified directly
2525		 * by users through the VM system.
2526		 */
2527		for (i = 0; i < bp->b_npages; i++)
2528			vm_page_test_dirty(bp->b_pages[i]);
2529
2530		/*
2531		 * Calculate the encompassing dirty range, boffset and eoffset,
2532		 * (eoffset - boffset) bytes.
2533		 */
2534
2535		for (i = 0; i < bp->b_npages; i++) {
2536			if (bp->b_pages[i]->dirty)
2537				break;
2538		}
2539		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2540
2541		for (i = bp->b_npages - 1; i >= 0; --i) {
2542			if (bp->b_pages[i]->dirty) {
2543				break;
2544			}
2545		}
2546		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2547
2548		/*
2549		 * Fit it to the buffer.
2550		 */
2551
2552		if (eoffset > bp->b_bcount)
2553			eoffset = bp->b_bcount;
2554
2555		/*
2556		 * If we have a good dirty range, merge with the existing
2557		 * dirty range.
2558		 */
2559
2560		if (boffset < eoffset) {
2561			if (bp->b_dirtyoff > boffset)
2562				bp->b_dirtyoff = boffset;
2563			if (bp->b_dirtyend < eoffset)
2564				bp->b_dirtyend = eoffset;
2565		}
2566	}
2567}
2568
2569/*
2570 *	getblk:
2571 *
2572 *	Get a block given a specified block and offset into a file/device.
2573 *	The buffers B_DONE bit will be cleared on return, making it almost
2574 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
2575 *	return.  The caller should clear B_INVAL prior to initiating a
2576 *	READ.
2577 *
2578 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2579 *	an existing buffer.
2580 *
2581 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
2582 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2583 *	and then cleared based on the backing VM.  If the previous buffer is
2584 *	non-0-sized but invalid, B_CACHE will be cleared.
2585 *
2586 *	If getblk() must create a new buffer, the new buffer is returned with
2587 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2588 *	case it is returned with B_INVAL clear and B_CACHE set based on the
2589 *	backing VM.
2590 *
2591 *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
2592 *	B_CACHE bit is clear.
2593 *
2594 *	What this means, basically, is that the caller should use B_CACHE to
2595 *	determine whether the buffer is fully valid or not and should clear
2596 *	B_INVAL prior to issuing a read.  If the caller intends to validate
2597 *	the buffer by loading its data area with something, the caller needs
2598 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
2599 *	the caller should set B_CACHE ( as an optimization ), else the caller
2600 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
2601 *	a write attempt or if it was a successfull read.  If the caller
2602 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
2603 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
2604 */
2605struct buf *
2606getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
2607    int flags)
2608{
2609	struct buf *bp;
2610	struct bufobj *bo;
2611	int error;
2612
2613	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
2614	ASSERT_VOP_LOCKED(vp, "getblk");
2615	if (size > MAXBSIZE)
2616		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
2617
2618	bo = &vp->v_bufobj;
2619loop:
2620	/*
2621	 * Block if we are low on buffers.   Certain processes are allowed
2622	 * to completely exhaust the buffer cache.
2623         *
2624         * If this check ever becomes a bottleneck it may be better to
2625         * move it into the else, when gbincore() fails.  At the moment
2626         * it isn't a problem.
2627	 *
2628	 * XXX remove if 0 sections (clean this up after its proven)
2629         */
2630	if (numfreebuffers == 0) {
2631		if (TD_IS_IDLETHREAD(curthread))
2632			return NULL;
2633		mtx_lock(&nblock);
2634		needsbuffer |= VFS_BIO_NEED_ANY;
2635		mtx_unlock(&nblock);
2636	}
2637
2638	BO_LOCK(bo);
2639	bp = gbincore(bo, blkno);
2640	if (bp != NULL) {
2641		int lockflags;
2642		/*
2643		 * Buffer is in-core.  If the buffer is not busy nor managed,
2644		 * it must be on a queue.
2645		 */
2646		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
2647
2648		if (flags & GB_LOCK_NOWAIT)
2649			lockflags |= LK_NOWAIT;
2650
2651		error = BUF_TIMELOCK(bp, lockflags,
2652		    BO_MTX(bo), "getblk", slpflag, slptimeo);
2653
2654		/*
2655		 * If we slept and got the lock we have to restart in case
2656		 * the buffer changed identities.
2657		 */
2658		if (error == ENOLCK)
2659			goto loop;
2660		/* We timed out or were interrupted. */
2661		else if (error)
2662			return (NULL);
2663
2664		/*
2665		 * The buffer is locked.  B_CACHE is cleared if the buffer is
2666		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
2667		 * and for a VMIO buffer B_CACHE is adjusted according to the
2668		 * backing VM cache.
2669		 */
2670		if (bp->b_flags & B_INVAL)
2671			bp->b_flags &= ~B_CACHE;
2672		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
2673			bp->b_flags |= B_CACHE;
2674		if (bp->b_flags & B_MANAGED)
2675			MPASS(bp->b_qindex == QUEUE_NONE);
2676		else {
2677			BO_LOCK(bo);
2678			bremfree(bp);
2679			BO_UNLOCK(bo);
2680		}
2681
2682		/*
2683		 * check for size inconsistancies for non-VMIO case.
2684		 */
2685
2686		if (bp->b_bcount != size) {
2687			if ((bp->b_flags & B_VMIO) == 0 ||
2688			    (size > bp->b_kvasize)) {
2689				if (bp->b_flags & B_DELWRI) {
2690					/*
2691					 * If buffer is pinned and caller does
2692					 * not want sleep  waiting for it to be
2693					 * unpinned, bail out
2694					 * */
2695					if (bp->b_pin_count > 0) {
2696						if (flags & GB_LOCK_NOWAIT) {
2697							bqrelse(bp);
2698							return (NULL);
2699						} else {
2700							bunpin_wait(bp);
2701						}
2702					}
2703					bp->b_flags |= B_NOCACHE;
2704					bwrite(bp);
2705				} else {
2706					if (LIST_EMPTY(&bp->b_dep)) {
2707						bp->b_flags |= B_RELBUF;
2708						brelse(bp);
2709					} else {
2710						bp->b_flags |= B_NOCACHE;
2711						bwrite(bp);
2712					}
2713				}
2714				goto loop;
2715			}
2716		}
2717
2718		/*
2719		 * If the size is inconsistant in the VMIO case, we can resize
2720		 * the buffer.  This might lead to B_CACHE getting set or
2721		 * cleared.  If the size has not changed, B_CACHE remains
2722		 * unchanged from its previous state.
2723		 */
2724
2725		if (bp->b_bcount != size)
2726			allocbuf(bp, size);
2727
2728		KASSERT(bp->b_offset != NOOFFSET,
2729		    ("getblk: no buffer offset"));
2730
2731		/*
2732		 * A buffer with B_DELWRI set and B_CACHE clear must
2733		 * be committed before we can return the buffer in
2734		 * order to prevent the caller from issuing a read
2735		 * ( due to B_CACHE not being set ) and overwriting
2736		 * it.
2737		 *
2738		 * Most callers, including NFS and FFS, need this to
2739		 * operate properly either because they assume they
2740		 * can issue a read if B_CACHE is not set, or because
2741		 * ( for example ) an uncached B_DELWRI might loop due
2742		 * to softupdates re-dirtying the buffer.  In the latter
2743		 * case, B_CACHE is set after the first write completes,
2744		 * preventing further loops.
2745		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
2746		 * above while extending the buffer, we cannot allow the
2747		 * buffer to remain with B_CACHE set after the write
2748		 * completes or it will represent a corrupt state.  To
2749		 * deal with this we set B_NOCACHE to scrap the buffer
2750		 * after the write.
2751		 *
2752		 * We might be able to do something fancy, like setting
2753		 * B_CACHE in bwrite() except if B_DELWRI is already set,
2754		 * so the below call doesn't set B_CACHE, but that gets real
2755		 * confusing.  This is much easier.
2756		 */
2757
2758		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
2759			bp->b_flags |= B_NOCACHE;
2760			bwrite(bp);
2761			goto loop;
2762		}
2763		bp->b_flags &= ~B_DONE;
2764	} else {
2765		int bsize, maxsize, vmio;
2766		off_t offset;
2767
2768		/*
2769		 * Buffer is not in-core, create new buffer.  The buffer
2770		 * returned by getnewbuf() is locked.  Note that the returned
2771		 * buffer is also considered valid (not marked B_INVAL).
2772		 */
2773		BO_UNLOCK(bo);
2774		/*
2775		 * If the user does not want us to create the buffer, bail out
2776		 * here.
2777		 */
2778		if (flags & GB_NOCREAT)
2779			return NULL;
2780		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
2781		offset = blkno * bsize;
2782		vmio = vp->v_object != NULL;
2783		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
2784		maxsize = imax(maxsize, bsize);
2785
2786		bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags);
2787		if (bp == NULL) {
2788			if (slpflag || slptimeo)
2789				return NULL;
2790			goto loop;
2791		}
2792
2793		/*
2794		 * This code is used to make sure that a buffer is not
2795		 * created while the getnewbuf routine is blocked.
2796		 * This can be a problem whether the vnode is locked or not.
2797		 * If the buffer is created out from under us, we have to
2798		 * throw away the one we just created.
2799		 *
2800		 * Note: this must occur before we associate the buffer
2801		 * with the vp especially considering limitations in
2802		 * the splay tree implementation when dealing with duplicate
2803		 * lblkno's.
2804		 */
2805		BO_LOCK(bo);
2806		if (gbincore(bo, blkno)) {
2807			BO_UNLOCK(bo);
2808			bp->b_flags |= B_INVAL;
2809			brelse(bp);
2810			goto loop;
2811		}
2812
2813		/*
2814		 * Insert the buffer into the hash, so that it can
2815		 * be found by incore.
2816		 */
2817		bp->b_blkno = bp->b_lblkno = blkno;
2818		bp->b_offset = offset;
2819		bgetvp(vp, bp);
2820		BO_UNLOCK(bo);
2821
2822		/*
2823		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2824		 * buffer size starts out as 0, B_CACHE will be set by
2825		 * allocbuf() for the VMIO case prior to it testing the
2826		 * backing store for validity.
2827		 */
2828
2829		if (vmio) {
2830			bp->b_flags |= B_VMIO;
2831			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
2832			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
2833			    bp, vp->v_object, bp->b_bufobj->bo_object));
2834		} else {
2835			bp->b_flags &= ~B_VMIO;
2836			KASSERT(bp->b_bufobj->bo_object == NULL,
2837			    ("ARGH! has b_bufobj->bo_object %p %p\n",
2838			    bp, bp->b_bufobj->bo_object));
2839		}
2840
2841		allocbuf(bp, size);
2842		bp->b_flags &= ~B_DONE;
2843	}
2844	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
2845	BUF_ASSERT_HELD(bp);
2846	KASSERT(bp->b_bufobj == bo,
2847	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2848	return (bp);
2849}
2850
2851/*
2852 * Get an empty, disassociated buffer of given size.  The buffer is initially
2853 * set to B_INVAL.
2854 */
2855struct buf *
2856geteblk(int size, int flags)
2857{
2858	struct buf *bp;
2859	int maxsize;
2860
2861	maxsize = (size + BKVAMASK) & ~BKVAMASK;
2862	while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) {
2863		if ((flags & GB_NOWAIT_BD) &&
2864		    (curthread->td_pflags & TDP_BUFNEED) != 0)
2865			return (NULL);
2866	}
2867	allocbuf(bp, size);
2868	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2869	BUF_ASSERT_HELD(bp);
2870	return (bp);
2871}
2872
2873
2874/*
2875 * This code constitutes the buffer memory from either anonymous system
2876 * memory (in the case of non-VMIO operations) or from an associated
2877 * VM object (in the case of VMIO operations).  This code is able to
2878 * resize a buffer up or down.
2879 *
2880 * Note that this code is tricky, and has many complications to resolve
2881 * deadlock or inconsistant data situations.  Tread lightly!!!
2882 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2883 * the caller.  Calling this code willy nilly can result in the loss of data.
2884 *
2885 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2886 * B_CACHE for the non-VMIO case.
2887 */
2888
2889int
2890allocbuf(struct buf *bp, int size)
2891{
2892	int newbsize, mbsize;
2893	int i;
2894
2895	BUF_ASSERT_HELD(bp);
2896
2897	if (bp->b_kvasize < size)
2898		panic("allocbuf: buffer too small");
2899
2900	if ((bp->b_flags & B_VMIO) == 0) {
2901		caddr_t origbuf;
2902		int origbufsize;
2903		/*
2904		 * Just get anonymous memory from the kernel.  Don't
2905		 * mess with B_CACHE.
2906		 */
2907		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2908		if (bp->b_flags & B_MALLOC)
2909			newbsize = mbsize;
2910		else
2911			newbsize = round_page(size);
2912
2913		if (newbsize < bp->b_bufsize) {
2914			/*
2915			 * malloced buffers are not shrunk
2916			 */
2917			if (bp->b_flags & B_MALLOC) {
2918				if (newbsize) {
2919					bp->b_bcount = size;
2920				} else {
2921					free(bp->b_data, M_BIOBUF);
2922					if (bp->b_bufsize) {
2923						atomic_subtract_long(
2924						    &bufmallocspace,
2925						    bp->b_bufsize);
2926						bufspacewakeup();
2927						bp->b_bufsize = 0;
2928					}
2929					bp->b_saveaddr = bp->b_kvabase;
2930					bp->b_data = bp->b_saveaddr;
2931					bp->b_bcount = 0;
2932					bp->b_flags &= ~B_MALLOC;
2933				}
2934				return 1;
2935			}
2936			vm_hold_free_pages(bp, newbsize);
2937		} else if (newbsize > bp->b_bufsize) {
2938			/*
2939			 * We only use malloced memory on the first allocation.
2940			 * and revert to page-allocated memory when the buffer
2941			 * grows.
2942			 */
2943			/*
2944			 * There is a potential smp race here that could lead
2945			 * to bufmallocspace slightly passing the max.  It
2946			 * is probably extremely rare and not worth worrying
2947			 * over.
2948			 */
2949			if ( (bufmallocspace < maxbufmallocspace) &&
2950				(bp->b_bufsize == 0) &&
2951				(mbsize <= PAGE_SIZE/2)) {
2952
2953				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2954				bp->b_bufsize = mbsize;
2955				bp->b_bcount = size;
2956				bp->b_flags |= B_MALLOC;
2957				atomic_add_long(&bufmallocspace, mbsize);
2958				return 1;
2959			}
2960			origbuf = NULL;
2961			origbufsize = 0;
2962			/*
2963			 * If the buffer is growing on its other-than-first allocation,
2964			 * then we revert to the page-allocation scheme.
2965			 */
2966			if (bp->b_flags & B_MALLOC) {
2967				origbuf = bp->b_data;
2968				origbufsize = bp->b_bufsize;
2969				bp->b_data = bp->b_kvabase;
2970				if (bp->b_bufsize) {
2971					atomic_subtract_long(&bufmallocspace,
2972					    bp->b_bufsize);
2973					bufspacewakeup();
2974					bp->b_bufsize = 0;
2975				}
2976				bp->b_flags &= ~B_MALLOC;
2977				newbsize = round_page(newbsize);
2978			}
2979			vm_hold_load_pages(
2980			    bp,
2981			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2982			    (vm_offset_t) bp->b_data + newbsize);
2983			if (origbuf) {
2984				bcopy(origbuf, bp->b_data, origbufsize);
2985				free(origbuf, M_BIOBUF);
2986			}
2987		}
2988	} else {
2989		int desiredpages;
2990
2991		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2992		desiredpages = (size == 0) ? 0 :
2993			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2994
2995		if (bp->b_flags & B_MALLOC)
2996			panic("allocbuf: VMIO buffer can't be malloced");
2997		/*
2998		 * Set B_CACHE initially if buffer is 0 length or will become
2999		 * 0-length.
3000		 */
3001		if (size == 0 || bp->b_bufsize == 0)
3002			bp->b_flags |= B_CACHE;
3003
3004		if (newbsize < bp->b_bufsize) {
3005			/*
3006			 * DEV_BSIZE aligned new buffer size is less then the
3007			 * DEV_BSIZE aligned existing buffer size.  Figure out
3008			 * if we have to remove any pages.
3009			 */
3010			if (desiredpages < bp->b_npages) {
3011				vm_page_t m;
3012
3013				pmap_qremove((vm_offset_t)trunc_page(
3014				    (vm_offset_t)bp->b_data) +
3015				    (desiredpages << PAGE_SHIFT),
3016				    (bp->b_npages - desiredpages));
3017				VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3018				for (i = desiredpages; i < bp->b_npages; i++) {
3019					/*
3020					 * the page is not freed here -- it
3021					 * is the responsibility of
3022					 * vnode_pager_setsize
3023					 */
3024					m = bp->b_pages[i];
3025					KASSERT(m != bogus_page,
3026					    ("allocbuf: bogus page found"));
3027					while (vm_page_sleep_if_busy(m, TRUE,
3028					    "biodep"))
3029						continue;
3030
3031					bp->b_pages[i] = NULL;
3032					vm_page_lock(m);
3033					vm_page_unwire(m, 0);
3034					vm_page_unlock(m);
3035				}
3036				VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3037				bp->b_npages = desiredpages;
3038			}
3039		} else if (size > bp->b_bcount) {
3040			/*
3041			 * We are growing the buffer, possibly in a
3042			 * byte-granular fashion.
3043			 */
3044			vm_object_t obj;
3045			vm_offset_t toff;
3046			vm_offset_t tinc;
3047
3048			/*
3049			 * Step 1, bring in the VM pages from the object,
3050			 * allocating them if necessary.  We must clear
3051			 * B_CACHE if these pages are not valid for the
3052			 * range covered by the buffer.
3053			 */
3054
3055			obj = bp->b_bufobj->bo_object;
3056
3057			VM_OBJECT_LOCK(obj);
3058			while (bp->b_npages < desiredpages) {
3059				vm_page_t m;
3060
3061				/*
3062				 * We must allocate system pages since blocking
3063				 * here could interfere with paging I/O, no
3064				 * matter which process we are.
3065				 *
3066				 * We can only test VPO_BUSY here.  Blocking on
3067				 * m->busy might lead to a deadlock:
3068				 *  vm_fault->getpages->cluster_read->allocbuf
3069				 * Thus, we specify VM_ALLOC_IGN_SBUSY.
3070				 */
3071				m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
3072				    bp->b_npages, VM_ALLOC_NOBUSY |
3073				    VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
3074				    VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY |
3075				    VM_ALLOC_COUNT(desiredpages - bp->b_npages));
3076				if (m->valid == 0)
3077					bp->b_flags &= ~B_CACHE;
3078				bp->b_pages[bp->b_npages] = m;
3079				++bp->b_npages;
3080			}
3081
3082			/*
3083			 * Step 2.  We've loaded the pages into the buffer,
3084			 * we have to figure out if we can still have B_CACHE
3085			 * set.  Note that B_CACHE is set according to the
3086			 * byte-granular range ( bcount and size ), new the
3087			 * aligned range ( newbsize ).
3088			 *
3089			 * The VM test is against m->valid, which is DEV_BSIZE
3090			 * aligned.  Needless to say, the validity of the data
3091			 * needs to also be DEV_BSIZE aligned.  Note that this
3092			 * fails with NFS if the server or some other client
3093			 * extends the file's EOF.  If our buffer is resized,
3094			 * B_CACHE may remain set! XXX
3095			 */
3096
3097			toff = bp->b_bcount;
3098			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3099
3100			while ((bp->b_flags & B_CACHE) && toff < size) {
3101				vm_pindex_t pi;
3102
3103				if (tinc > (size - toff))
3104					tinc = size - toff;
3105
3106				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
3107				    PAGE_SHIFT;
3108
3109				vfs_buf_test_cache(
3110				    bp,
3111				    bp->b_offset,
3112				    toff,
3113				    tinc,
3114				    bp->b_pages[pi]
3115				);
3116				toff += tinc;
3117				tinc = PAGE_SIZE;
3118			}
3119			VM_OBJECT_UNLOCK(obj);
3120
3121			/*
3122			 * Step 3, fixup the KVM pmap.  Remember that
3123			 * bp->b_data is relative to bp->b_offset, but
3124			 * bp->b_offset may be offset into the first page.
3125			 */
3126
3127			bp->b_data = (caddr_t)
3128			    trunc_page((vm_offset_t)bp->b_data);
3129			pmap_qenter(
3130			    (vm_offset_t)bp->b_data,
3131			    bp->b_pages,
3132			    bp->b_npages
3133			);
3134
3135			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
3136			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
3137		}
3138	}
3139	if (newbsize < bp->b_bufsize)
3140		bufspacewakeup();
3141	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
3142	bp->b_bcount = size;		/* requested buffer size	*/
3143	return 1;
3144}
3145
3146void
3147biodone(struct bio *bp)
3148{
3149	struct mtx *mtxp;
3150	void (*done)(struct bio *);
3151
3152	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3153	mtx_lock(mtxp);
3154	bp->bio_flags |= BIO_DONE;
3155	done = bp->bio_done;
3156	if (done == NULL)
3157		wakeup(bp);
3158	mtx_unlock(mtxp);
3159	if (done != NULL)
3160		done(bp);
3161}
3162
3163/*
3164 * Wait for a BIO to finish.
3165 *
3166 * XXX: resort to a timeout for now.  The optimal locking (if any) for this
3167 * case is not yet clear.
3168 */
3169int
3170biowait(struct bio *bp, const char *wchan)
3171{
3172	struct mtx *mtxp;
3173
3174	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3175	mtx_lock(mtxp);
3176	while ((bp->bio_flags & BIO_DONE) == 0)
3177		msleep(bp, mtxp, PRIBIO, wchan, hz / 10);
3178	mtx_unlock(mtxp);
3179	if (bp->bio_error != 0)
3180		return (bp->bio_error);
3181	if (!(bp->bio_flags & BIO_ERROR))
3182		return (0);
3183	return (EIO);
3184}
3185
3186void
3187biofinish(struct bio *bp, struct devstat *stat, int error)
3188{
3189
3190	if (error) {
3191		bp->bio_error = error;
3192		bp->bio_flags |= BIO_ERROR;
3193	}
3194	if (stat != NULL)
3195		devstat_end_transaction_bio(stat, bp);
3196	biodone(bp);
3197}
3198
3199/*
3200 *	bufwait:
3201 *
3202 *	Wait for buffer I/O completion, returning error status.  The buffer
3203 *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3204 *	error and cleared.
3205 */
3206int
3207bufwait(struct buf *bp)
3208{
3209	if (bp->b_iocmd == BIO_READ)
3210		bwait(bp, PRIBIO, "biord");
3211	else
3212		bwait(bp, PRIBIO, "biowr");
3213	if (bp->b_flags & B_EINTR) {
3214		bp->b_flags &= ~B_EINTR;
3215		return (EINTR);
3216	}
3217	if (bp->b_ioflags & BIO_ERROR) {
3218		return (bp->b_error ? bp->b_error : EIO);
3219	} else {
3220		return (0);
3221	}
3222}
3223
3224 /*
3225  * Call back function from struct bio back up to struct buf.
3226  */
3227static void
3228bufdonebio(struct bio *bip)
3229{
3230	struct buf *bp;
3231
3232	bp = bip->bio_caller2;
3233	bp->b_resid = bp->b_bcount - bip->bio_completed;
3234	bp->b_resid = bip->bio_resid;	/* XXX: remove */
3235	bp->b_ioflags = bip->bio_flags;
3236	bp->b_error = bip->bio_error;
3237	if (bp->b_error)
3238		bp->b_ioflags |= BIO_ERROR;
3239	bufdone(bp);
3240	g_destroy_bio(bip);
3241}
3242
3243void
3244dev_strategy(struct cdev *dev, struct buf *bp)
3245{
3246	struct cdevsw *csw;
3247	struct bio *bip;
3248	int ref;
3249
3250	if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
3251		panic("b_iocmd botch");
3252	for (;;) {
3253		bip = g_new_bio();
3254		if (bip != NULL)
3255			break;
3256		/* Try again later */
3257		tsleep(&bp, PRIBIO, "dev_strat", hz/10);
3258	}
3259	bip->bio_cmd = bp->b_iocmd;
3260	bip->bio_offset = bp->b_iooffset;
3261	bip->bio_length = bp->b_bcount;
3262	bip->bio_bcount = bp->b_bcount;	/* XXX: remove */
3263	bip->bio_data = bp->b_data;
3264	bip->bio_done = bufdonebio;
3265	bip->bio_caller2 = bp;
3266	bip->bio_dev = dev;
3267	KASSERT(dev->si_refcount > 0,
3268	    ("dev_strategy on un-referenced struct cdev *(%s)",
3269	    devtoname(dev)));
3270	csw = dev_refthread(dev, &ref);
3271	if (csw == NULL) {
3272		g_destroy_bio(bip);
3273		bp->b_error = ENXIO;
3274		bp->b_ioflags = BIO_ERROR;
3275		bufdone(bp);
3276		return;
3277	}
3278	(*csw->d_strategy)(bip);
3279	dev_relthread(dev, ref);
3280}
3281
3282/*
3283 *	bufdone:
3284 *
3285 *	Finish I/O on a buffer, optionally calling a completion function.
3286 *	This is usually called from an interrupt so process blocking is
3287 *	not allowed.
3288 *
3289 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3290 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3291 *	assuming B_INVAL is clear.
3292 *
3293 *	For the VMIO case, we set B_CACHE if the op was a read and no
3294 *	read error occured, or if the op was a write.  B_CACHE is never
3295 *	set if the buffer is invalid or otherwise uncacheable.
3296 *
3297 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
3298 *	initiator to leave B_INVAL set to brelse the buffer out of existance
3299 *	in the biodone routine.
3300 */
3301void
3302bufdone(struct buf *bp)
3303{
3304	struct bufobj *dropobj;
3305	void    (*biodone)(struct buf *);
3306
3307	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3308	dropobj = NULL;
3309
3310	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
3311	BUF_ASSERT_HELD(bp);
3312
3313	runningbufwakeup(bp);
3314	if (bp->b_iocmd == BIO_WRITE)
3315		dropobj = bp->b_bufobj;
3316	/* call optional completion function if requested */
3317	if (bp->b_iodone != NULL) {
3318		biodone = bp->b_iodone;
3319		bp->b_iodone = NULL;
3320		(*biodone) (bp);
3321		if (dropobj)
3322			bufobj_wdrop(dropobj);
3323		return;
3324	}
3325
3326	bufdone_finish(bp);
3327
3328	if (dropobj)
3329		bufobj_wdrop(dropobj);
3330}
3331
3332void
3333bufdone_finish(struct buf *bp)
3334{
3335	BUF_ASSERT_HELD(bp);
3336
3337	if (!LIST_EMPTY(&bp->b_dep))
3338		buf_complete(bp);
3339
3340	if (bp->b_flags & B_VMIO) {
3341		vm_ooffset_t foff;
3342		vm_page_t m;
3343		vm_object_t obj;
3344		struct vnode *vp;
3345		int bogus, i, iosize;
3346
3347		obj = bp->b_bufobj->bo_object;
3348		KASSERT(obj->paging_in_progress >= bp->b_npages,
3349		    ("biodone_finish: paging in progress(%d) < b_npages(%d)",
3350		    obj->paging_in_progress, bp->b_npages));
3351
3352		vp = bp->b_vp;
3353		KASSERT(vp->v_holdcnt > 0,
3354		    ("biodone_finish: vnode %p has zero hold count", vp));
3355		KASSERT(vp->v_object != NULL,
3356		    ("biodone_finish: vnode %p has no vm_object", vp));
3357
3358		foff = bp->b_offset;
3359		KASSERT(bp->b_offset != NOOFFSET,
3360		    ("biodone_finish: bp %p has no buffer offset", bp));
3361
3362		/*
3363		 * Set B_CACHE if the op was a normal read and no error
3364		 * occured.  B_CACHE is set for writes in the b*write()
3365		 * routines.
3366		 */
3367		iosize = bp->b_bcount - bp->b_resid;
3368		if (bp->b_iocmd == BIO_READ &&
3369		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
3370		    !(bp->b_ioflags & BIO_ERROR)) {
3371			bp->b_flags |= B_CACHE;
3372		}
3373		bogus = 0;
3374		VM_OBJECT_LOCK(obj);
3375		for (i = 0; i < bp->b_npages; i++) {
3376			int bogusflag = 0;
3377			int resid;
3378
3379			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3380			if (resid > iosize)
3381				resid = iosize;
3382
3383			/*
3384			 * cleanup bogus pages, restoring the originals
3385			 */
3386			m = bp->b_pages[i];
3387			if (m == bogus_page) {
3388				bogus = bogusflag = 1;
3389				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3390				if (m == NULL)
3391					panic("biodone: page disappeared!");
3392				bp->b_pages[i] = m;
3393			}
3394			KASSERT(OFF_TO_IDX(foff) == m->pindex,
3395			    ("biodone_finish: foff(%jd)/pindex(%ju) mismatch",
3396			    (intmax_t)foff, (uintmax_t)m->pindex));
3397
3398			/*
3399			 * In the write case, the valid and clean bits are
3400			 * already changed correctly ( see bdwrite() ), so we
3401			 * only need to do this here in the read case.
3402			 */
3403			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
3404				KASSERT((m->dirty & vm_page_bits(foff &
3405				    PAGE_MASK, resid)) == 0, ("bufdone_finish:"
3406				    " page %p has unexpected dirty bits", m));
3407				vfs_page_set_valid(bp, foff, m);
3408			}
3409
3410			vm_page_io_finish(m);
3411			vm_object_pip_subtract(obj, 1);
3412			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3413			iosize -= resid;
3414		}
3415		vm_object_pip_wakeupn(obj, 0);
3416		VM_OBJECT_UNLOCK(obj);
3417		if (bogus)
3418			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3419			    bp->b_pages, bp->b_npages);
3420	}
3421
3422	/*
3423	 * For asynchronous completions, release the buffer now. The brelse
3424	 * will do a wakeup there if necessary - so no need to do a wakeup
3425	 * here in the async case. The sync case always needs to do a wakeup.
3426	 */
3427
3428	if (bp->b_flags & B_ASYNC) {
3429		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
3430			brelse(bp);
3431		else
3432			bqrelse(bp);
3433	} else
3434		bdone(bp);
3435}
3436
3437/*
3438 * This routine is called in lieu of iodone in the case of
3439 * incomplete I/O.  This keeps the busy status for pages
3440 * consistant.
3441 */
3442void
3443vfs_unbusy_pages(struct buf *bp)
3444{
3445	int i;
3446	vm_object_t obj;
3447	vm_page_t m;
3448
3449	runningbufwakeup(bp);
3450	if (!(bp->b_flags & B_VMIO))
3451		return;
3452
3453	obj = bp->b_bufobj->bo_object;
3454	VM_OBJECT_LOCK(obj);
3455	for (i = 0; i < bp->b_npages; i++) {
3456		m = bp->b_pages[i];
3457		if (m == bogus_page) {
3458			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3459			if (!m)
3460				panic("vfs_unbusy_pages: page missing\n");
3461			bp->b_pages[i] = m;
3462			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3463			    bp->b_pages, bp->b_npages);
3464		}
3465		vm_object_pip_subtract(obj, 1);
3466		vm_page_io_finish(m);
3467	}
3468	vm_object_pip_wakeupn(obj, 0);
3469	VM_OBJECT_UNLOCK(obj);
3470}
3471
3472/*
3473 * vfs_page_set_valid:
3474 *
3475 *	Set the valid bits in a page based on the supplied offset.   The
3476 *	range is restricted to the buffer's size.
3477 *
3478 *	This routine is typically called after a read completes.
3479 */
3480static void
3481vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3482{
3483	vm_ooffset_t eoff;
3484
3485	/*
3486	 * Compute the end offset, eoff, such that [off, eoff) does not span a
3487	 * page boundary and eoff is not greater than the end of the buffer.
3488	 * The end of the buffer, in this case, is our file EOF, not the
3489	 * allocation size of the buffer.
3490	 */
3491	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
3492	if (eoff > bp->b_offset + bp->b_bcount)
3493		eoff = bp->b_offset + bp->b_bcount;
3494
3495	/*
3496	 * Set valid range.  This is typically the entire buffer and thus the
3497	 * entire page.
3498	 */
3499	if (eoff > off)
3500		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
3501}
3502
3503/*
3504 * vfs_page_set_validclean:
3505 *
3506 *	Set the valid bits and clear the dirty bits in a page based on the
3507 *	supplied offset.   The range is restricted to the buffer's size.
3508 */
3509static void
3510vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3511{
3512	vm_ooffset_t soff, eoff;
3513
3514	/*
3515	 * Start and end offsets in buffer.  eoff - soff may not cross a
3516	 * page boundry or cross the end of the buffer.  The end of the
3517	 * buffer, in this case, is our file EOF, not the allocation size
3518	 * of the buffer.
3519	 */
3520	soff = off;
3521	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3522	if (eoff > bp->b_offset + bp->b_bcount)
3523		eoff = bp->b_offset + bp->b_bcount;
3524
3525	/*
3526	 * Set valid range.  This is typically the entire buffer and thus the
3527	 * entire page.
3528	 */
3529	if (eoff > soff) {
3530		vm_page_set_validclean(
3531		    m,
3532		   (vm_offset_t) (soff & PAGE_MASK),
3533		   (vm_offset_t) (eoff - soff)
3534		);
3535	}
3536}
3537
3538/*
3539 * Ensure that all buffer pages are not busied by VPO_BUSY flag. If
3540 * any page is busy, drain the flag.
3541 */
3542static void
3543vfs_drain_busy_pages(struct buf *bp)
3544{
3545	vm_page_t m;
3546	int i, last_busied;
3547
3548	VM_OBJECT_LOCK_ASSERT(bp->b_bufobj->bo_object, MA_OWNED);
3549	last_busied = 0;
3550	for (i = 0; i < bp->b_npages; i++) {
3551		m = bp->b_pages[i];
3552		if ((m->oflags & VPO_BUSY) != 0) {
3553			for (; last_busied < i; last_busied++)
3554				vm_page_busy(bp->b_pages[last_busied]);
3555			while ((m->oflags & VPO_BUSY) != 0)
3556				vm_page_sleep(m, "vbpage");
3557		}
3558	}
3559	for (i = 0; i < last_busied; i++)
3560		vm_page_wakeup(bp->b_pages[i]);
3561}
3562
3563/*
3564 * This routine is called before a device strategy routine.
3565 * It is used to tell the VM system that paging I/O is in
3566 * progress, and treat the pages associated with the buffer
3567 * almost as being VPO_BUSY.  Also the object paging_in_progress
3568 * flag is handled to make sure that the object doesn't become
3569 * inconsistant.
3570 *
3571 * Since I/O has not been initiated yet, certain buffer flags
3572 * such as BIO_ERROR or B_INVAL may be in an inconsistant state
3573 * and should be ignored.
3574 */
3575void
3576vfs_busy_pages(struct buf *bp, int clear_modify)
3577{
3578	int i, bogus;
3579	vm_object_t obj;
3580	vm_ooffset_t foff;
3581	vm_page_t m;
3582
3583	if (!(bp->b_flags & B_VMIO))
3584		return;
3585
3586	obj = bp->b_bufobj->bo_object;
3587	foff = bp->b_offset;
3588	KASSERT(bp->b_offset != NOOFFSET,
3589	    ("vfs_busy_pages: no buffer offset"));
3590	VM_OBJECT_LOCK(obj);
3591	vfs_drain_busy_pages(bp);
3592	if (bp->b_bufsize != 0)
3593		vfs_setdirty_locked_object(bp);
3594	bogus = 0;
3595	for (i = 0; i < bp->b_npages; i++) {
3596		m = bp->b_pages[i];
3597
3598		if ((bp->b_flags & B_CLUSTER) == 0) {
3599			vm_object_pip_add(obj, 1);
3600			vm_page_io_start(m);
3601		}
3602		/*
3603		 * When readying a buffer for a read ( i.e
3604		 * clear_modify == 0 ), it is important to do
3605		 * bogus_page replacement for valid pages in
3606		 * partially instantiated buffers.  Partially
3607		 * instantiated buffers can, in turn, occur when
3608		 * reconstituting a buffer from its VM backing store
3609		 * base.  We only have to do this if B_CACHE is
3610		 * clear ( which causes the I/O to occur in the
3611		 * first place ).  The replacement prevents the read
3612		 * I/O from overwriting potentially dirty VM-backed
3613		 * pages.  XXX bogus page replacement is, uh, bogus.
3614		 * It may not work properly with small-block devices.
3615		 * We need to find a better way.
3616		 */
3617		if (clear_modify) {
3618			pmap_remove_write(m);
3619			vfs_page_set_validclean(bp, foff, m);
3620		} else if (m->valid == VM_PAGE_BITS_ALL &&
3621		    (bp->b_flags & B_CACHE) == 0) {
3622			bp->b_pages[i] = bogus_page;
3623			bogus++;
3624		}
3625		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3626	}
3627	VM_OBJECT_UNLOCK(obj);
3628	if (bogus)
3629		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3630		    bp->b_pages, bp->b_npages);
3631}
3632
3633/*
3634 *	vfs_bio_set_valid:
3635 *
3636 *	Set the range within the buffer to valid.  The range is
3637 *	relative to the beginning of the buffer, b_offset.  Note that
3638 *	b_offset itself may be offset from the beginning of the first
3639 *	page.
3640 */
3641void
3642vfs_bio_set_valid(struct buf *bp, int base, int size)
3643{
3644	int i, n;
3645	vm_page_t m;
3646
3647	if (!(bp->b_flags & B_VMIO))
3648		return;
3649
3650	/*
3651	 * Fixup base to be relative to beginning of first page.
3652	 * Set initial n to be the maximum number of bytes in the
3653	 * first page that can be validated.
3654	 */
3655	base += (bp->b_offset & PAGE_MASK);
3656	n = PAGE_SIZE - (base & PAGE_MASK);
3657
3658	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3659	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
3660		m = bp->b_pages[i];
3661		if (n > size)
3662			n = size;
3663		vm_page_set_valid_range(m, base & PAGE_MASK, n);
3664		base += n;
3665		size -= n;
3666		n = PAGE_SIZE;
3667	}
3668	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3669}
3670
3671/*
3672 *	vfs_bio_clrbuf:
3673 *
3674 *	If the specified buffer is a non-VMIO buffer, clear the entire
3675 *	buffer.  If the specified buffer is a VMIO buffer, clear and
3676 *	validate only the previously invalid portions of the buffer.
3677 *	This routine essentially fakes an I/O, so we need to clear
3678 *	BIO_ERROR and B_INVAL.
3679 *
3680 *	Note that while we only theoretically need to clear through b_bcount,
3681 *	we go ahead and clear through b_bufsize.
3682 */
3683void
3684vfs_bio_clrbuf(struct buf *bp)
3685{
3686	int i, j, mask;
3687	caddr_t sa, ea;
3688
3689	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
3690		clrbuf(bp);
3691		return;
3692	}
3693	bp->b_flags &= ~B_INVAL;
3694	bp->b_ioflags &= ~BIO_ERROR;
3695	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3696	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
3697	    (bp->b_offset & PAGE_MASK) == 0) {
3698		if (bp->b_pages[0] == bogus_page)
3699			goto unlock;
3700		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
3701		VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
3702		if ((bp->b_pages[0]->valid & mask) == mask)
3703			goto unlock;
3704		if ((bp->b_pages[0]->valid & mask) == 0) {
3705			bzero(bp->b_data, bp->b_bufsize);
3706			bp->b_pages[0]->valid |= mask;
3707			goto unlock;
3708		}
3709	}
3710	ea = sa = bp->b_data;
3711	for(i = 0; i < bp->b_npages; i++, sa = ea) {
3712		ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
3713		ea = (caddr_t)(vm_offset_t)ulmin(
3714		    (u_long)(vm_offset_t)ea,
3715		    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
3716		if (bp->b_pages[i] == bogus_page)
3717			continue;
3718		j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
3719		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
3720		VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
3721		if ((bp->b_pages[i]->valid & mask) == mask)
3722			continue;
3723		if ((bp->b_pages[i]->valid & mask) == 0)
3724			bzero(sa, ea - sa);
3725		else {
3726			for (; sa < ea; sa += DEV_BSIZE, j++) {
3727				if ((bp->b_pages[i]->valid & (1 << j)) == 0)
3728					bzero(sa, DEV_BSIZE);
3729			}
3730		}
3731		bp->b_pages[i]->valid |= mask;
3732	}
3733unlock:
3734	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3735	bp->b_resid = 0;
3736}
3737
3738/*
3739 * vm_hold_load_pages and vm_hold_free_pages get pages into
3740 * a buffers address space.  The pages are anonymous and are
3741 * not associated with a file object.
3742 */
3743static void
3744vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
3745{
3746	vm_offset_t pg;
3747	vm_page_t p;
3748	int index;
3749
3750	to = round_page(to);
3751	from = round_page(from);
3752	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3753
3754	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3755tryagain:
3756		/*
3757		 * note: must allocate system pages since blocking here
3758		 * could interfere with paging I/O, no matter which
3759		 * process we are.
3760		 */
3761		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
3762		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
3763		if (p == NULL) {
3764			VM_WAIT;
3765			goto tryagain;
3766		}
3767		pmap_qenter(pg, &p, 1);
3768		bp->b_pages[index] = p;
3769	}
3770	bp->b_npages = index;
3771}
3772
3773/* Return pages associated with this buf to the vm system */
3774static void
3775vm_hold_free_pages(struct buf *bp, int newbsize)
3776{
3777	vm_offset_t from;
3778	vm_page_t p;
3779	int index, newnpages;
3780
3781	from = round_page((vm_offset_t)bp->b_data + newbsize);
3782	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3783	if (bp->b_npages > newnpages)
3784		pmap_qremove(from, bp->b_npages - newnpages);
3785	for (index = newnpages; index < bp->b_npages; index++) {
3786		p = bp->b_pages[index];
3787		bp->b_pages[index] = NULL;
3788		if (p->busy != 0)
3789			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
3790			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
3791		p->wire_count--;
3792		vm_page_free(p);
3793		atomic_subtract_int(&cnt.v_wire_count, 1);
3794	}
3795	bp->b_npages = newnpages;
3796}
3797
3798/*
3799 * Map an IO request into kernel virtual address space.
3800 *
3801 * All requests are (re)mapped into kernel VA space.
3802 * Notice that we use b_bufsize for the size of the buffer
3803 * to be mapped.  b_bcount might be modified by the driver.
3804 *
3805 * Note that even if the caller determines that the address space should
3806 * be valid, a race or a smaller-file mapped into a larger space may
3807 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
3808 * check the return value.
3809 */
3810int
3811vmapbuf(struct buf *bp)
3812{
3813	caddr_t kva;
3814	vm_prot_t prot;
3815	int pidx;
3816
3817	if (bp->b_bufsize < 0)
3818		return (-1);
3819	prot = VM_PROT_READ;
3820	if (bp->b_iocmd == BIO_READ)
3821		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
3822	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
3823	    (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
3824	    btoc(MAXPHYS))) < 0)
3825		return (-1);
3826	pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
3827
3828	kva = bp->b_saveaddr;
3829	bp->b_npages = pidx;
3830	bp->b_saveaddr = bp->b_data;
3831	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
3832	return(0);
3833}
3834
3835/*
3836 * Free the io map PTEs associated with this IO operation.
3837 * We also invalidate the TLB entries and restore the original b_addr.
3838 */
3839void
3840vunmapbuf(struct buf *bp)
3841{
3842	int npages;
3843
3844	npages = bp->b_npages;
3845	pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
3846	vm_page_unhold_pages(bp->b_pages, npages);
3847
3848	bp->b_data = bp->b_saveaddr;
3849}
3850
3851void
3852bdone(struct buf *bp)
3853{
3854	struct mtx *mtxp;
3855
3856	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3857	mtx_lock(mtxp);
3858	bp->b_flags |= B_DONE;
3859	wakeup(bp);
3860	mtx_unlock(mtxp);
3861}
3862
3863void
3864bwait(struct buf *bp, u_char pri, const char *wchan)
3865{
3866	struct mtx *mtxp;
3867
3868	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3869	mtx_lock(mtxp);
3870	while ((bp->b_flags & B_DONE) == 0)
3871		msleep(bp, mtxp, pri, wchan, 0);
3872	mtx_unlock(mtxp);
3873}
3874
3875int
3876bufsync(struct bufobj *bo, int waitfor)
3877{
3878
3879	return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
3880}
3881
3882void
3883bufstrategy(struct bufobj *bo, struct buf *bp)
3884{
3885	int i = 0;
3886	struct vnode *vp;
3887
3888	vp = bp->b_vp;
3889	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
3890	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
3891	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
3892	i = VOP_STRATEGY(vp, bp);
3893	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
3894}
3895
3896void
3897bufobj_wrefl(struct bufobj *bo)
3898{
3899
3900	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3901	ASSERT_BO_LOCKED(bo);
3902	bo->bo_numoutput++;
3903}
3904
3905void
3906bufobj_wref(struct bufobj *bo)
3907{
3908
3909	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3910	BO_LOCK(bo);
3911	bo->bo_numoutput++;
3912	BO_UNLOCK(bo);
3913}
3914
3915void
3916bufobj_wdrop(struct bufobj *bo)
3917{
3918
3919	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
3920	BO_LOCK(bo);
3921	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
3922	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
3923		bo->bo_flag &= ~BO_WWAIT;
3924		wakeup(&bo->bo_numoutput);
3925	}
3926	BO_UNLOCK(bo);
3927}
3928
3929int
3930bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
3931{
3932	int error;
3933
3934	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
3935	ASSERT_BO_LOCKED(bo);
3936	error = 0;
3937	while (bo->bo_numoutput) {
3938		bo->bo_flag |= BO_WWAIT;
3939		error = msleep(&bo->bo_numoutput, BO_MTX(bo),
3940		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
3941		if (error)
3942			break;
3943	}
3944	return (error);
3945}
3946
3947void
3948bpin(struct buf *bp)
3949{
3950	struct mtx *mtxp;
3951
3952	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3953	mtx_lock(mtxp);
3954	bp->b_pin_count++;
3955	mtx_unlock(mtxp);
3956}
3957
3958void
3959bunpin(struct buf *bp)
3960{
3961	struct mtx *mtxp;
3962
3963	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3964	mtx_lock(mtxp);
3965	if (--bp->b_pin_count == 0)
3966		wakeup(bp);
3967	mtx_unlock(mtxp);
3968}
3969
3970void
3971bunpin_wait(struct buf *bp)
3972{
3973	struct mtx *mtxp;
3974
3975	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3976	mtx_lock(mtxp);
3977	while (bp->b_pin_count > 0)
3978		msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
3979	mtx_unlock(mtxp);
3980}
3981
3982#include "opt_ddb.h"
3983#ifdef DDB
3984#include <ddb/ddb.h>
3985
3986/* DDB command to show buffer data */
3987DB_SHOW_COMMAND(buffer, db_show_buffer)
3988{
3989	/* get args */
3990	struct buf *bp = (struct buf *)addr;
3991
3992	if (!have_addr) {
3993		db_printf("usage: show buffer <addr>\n");
3994		return;
3995	}
3996
3997	db_printf("buf at %p\n", bp);
3998	db_printf("b_flags = 0x%b b_xflags=0x%b b_vflags=0x%b\n",
3999	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4000	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4001	db_printf(
4002	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4003	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4004	    "b_dep = %p\n",
4005	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4006	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4007	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4008	if (bp->b_npages) {
4009		int i;
4010		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4011		for (i = 0; i < bp->b_npages; i++) {
4012			vm_page_t m;
4013			m = bp->b_pages[i];
4014			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
4015			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4016			if ((i + 1) < bp->b_npages)
4017				db_printf(",");
4018		}
4019		db_printf("\n");
4020	}
4021	db_printf(" ");
4022	BUF_LOCKPRINTINFO(bp);
4023}
4024
4025DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4026{
4027	struct buf *bp;
4028	int i;
4029
4030	for (i = 0; i < nbuf; i++) {
4031		bp = &buf[i];
4032		if (BUF_ISLOCKED(bp)) {
4033			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4034			db_printf("\n");
4035		}
4036	}
4037}
4038
4039DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4040{
4041	struct vnode *vp;
4042	struct buf *bp;
4043
4044	if (!have_addr) {
4045		db_printf("usage: show vnodebufs <addr>\n");
4046		return;
4047	}
4048	vp = (struct vnode *)addr;
4049	db_printf("Clean buffers:\n");
4050	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4051		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4052		db_printf("\n");
4053	}
4054	db_printf("Dirty buffers:\n");
4055	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4056		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4057		db_printf("\n");
4058	}
4059}
4060
4061DB_COMMAND(countfreebufs, db_coundfreebufs)
4062{
4063	struct buf *bp;
4064	int i, used = 0, nfree = 0;
4065
4066	if (have_addr) {
4067		db_printf("usage: countfreebufs\n");
4068		return;
4069	}
4070
4071	for (i = 0; i < nbuf; i++) {
4072		bp = &buf[i];
4073		if ((bp->b_vflags & BV_INFREECNT) != 0)
4074			nfree++;
4075		else
4076			used++;
4077	}
4078
4079	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
4080	    nfree + used);
4081	db_printf("numfreebuffers is %d\n", numfreebuffers);
4082}
4083#endif /* DDB */
4084