vfs_bio.c revision 175294
1/*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * this file contains a new buffer I/O scheme implementing a coherent
30 * VM object and buffer cache scheme.  Pains have been taken to make
31 * sure that the performance degradation associated with schemes such
32 * as this is not realized.
33 *
34 * Author:  John S. Dyson
35 * Significant help during the development and debugging phases
36 * had been provided by David Greenman, also of the FreeBSD core team.
37 *
38 * see man buf(9) for more info.
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/vfs_bio.c 175294 2008-01-13 14:44:15Z attilio $");
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/bio.h>
47#include <sys/conf.h>
48#include <sys/buf.h>
49#include <sys/devicestat.h>
50#include <sys/eventhandler.h>
51#include <sys/limits.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mount.h>
55#include <sys/mutex.h>
56#include <sys/kernel.h>
57#include <sys/kthread.h>
58#include <sys/proc.h>
59#include <sys/resourcevar.h>
60#include <sys/sysctl.h>
61#include <sys/vmmeter.h>
62#include <sys/vnode.h>
63#include <geom/geom.h>
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/vm_kern.h>
67#include <vm/vm_pageout.h>
68#include <vm/vm_page.h>
69#include <vm/vm_object.h>
70#include <vm/vm_extern.h>
71#include <vm/vm_map.h>
72#include "opt_directio.h"
73#include "opt_swap.h"
74
75static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
76
77struct	bio_ops bioops;		/* I/O operation notification */
78
79struct	buf_ops buf_ops_bio = {
80	.bop_name	=	"buf_ops_bio",
81	.bop_write	=	bufwrite,
82	.bop_strategy	=	bufstrategy,
83	.bop_sync	=	bufsync,
84	.bop_bdflush	=	bufbdflush,
85};
86
87/*
88 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
89 * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
90 */
91struct buf *buf;		/* buffer header pool */
92
93static struct proc *bufdaemonproc;
94
95static int inmem(struct vnode *vp, daddr_t blkno);
96static void vm_hold_free_pages(struct buf *bp, vm_offset_t from,
97		vm_offset_t to);
98static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
99		vm_offset_t to);
100static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
101		vm_page_t m);
102static void vfs_clean_pages(struct buf *bp);
103static void vfs_setdirty(struct buf *bp);
104static void vfs_setdirty_locked_object(struct buf *bp);
105static void vfs_vmio_release(struct buf *bp);
106static int vfs_bio_clcheck(struct vnode *vp, int size,
107		daddr_t lblkno, daddr_t blkno);
108static int flushbufqueues(int, int);
109static void buf_daemon(void);
110static void bremfreel(struct buf *bp);
111
112int vmiodirenable = TRUE;
113SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
114    "Use the VM system for directory writes");
115int runningbufspace;
116SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
117    "Amount of presently outstanding async buffer io");
118static int bufspace;
119SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
120    "KVA memory used for bufs");
121static int maxbufspace;
122SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
123    "Maximum allowed value of bufspace (including buf_daemon)");
124static int bufmallocspace;
125SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
126    "Amount of malloced memory for buffers");
127static int maxbufmallocspace;
128SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
129    "Maximum amount of malloced memory for buffers");
130static int lobufspace;
131SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
132    "Minimum amount of buffers we want to have");
133int hibufspace;
134SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
135    "Maximum allowed value of bufspace (excluding buf_daemon)");
136static int bufreusecnt;
137SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
138    "Number of times we have reused a buffer");
139static int buffreekvacnt;
140SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
141    "Number of times we have freed the KVA space from some buffer");
142static int bufdefragcnt;
143SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
144    "Number of times we have had to repeat buffer allocation to defragment");
145static int lorunningspace;
146SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
147    "Minimum preferred space used for in-progress I/O");
148static int hirunningspace;
149SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
150    "Maximum amount of space to use for in-progress I/O");
151int dirtybufferflushes;
152SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
153    0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
154int bdwriteskip;
155SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
156    0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
157int altbufferflushes;
158SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
159    0, "Number of fsync flushes to limit dirty buffers");
160static int recursiveflushes;
161SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
162    0, "Number of flushes skipped due to being recursive");
163static int numdirtybuffers;
164SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
165    "Number of buffers that are dirty (has unwritten changes) at the moment");
166static int lodirtybuffers;
167SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
168    "How many buffers we want to have free before bufdaemon can sleep");
169static int hidirtybuffers;
170SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
171    "When the number of dirty buffers is considered severe");
172int dirtybufthresh;
173SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
174    0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
175static int numfreebuffers;
176SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
177    "Number of free buffers");
178static int lofreebuffers;
179SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
180   "XXX Unused");
181static int hifreebuffers;
182SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
183   "XXX Complicatedly unused");
184static int getnewbufcalls;
185SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
186   "Number of calls to getnewbuf");
187static int getnewbufrestarts;
188SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
189    "Number of times getnewbuf has had to restart a buffer aquisition");
190
191/*
192 * Wakeup point for bufdaemon, as well as indicator of whether it is already
193 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
194 * is idling.
195 */
196static int bd_request;
197
198/*
199 * This lock synchronizes access to bd_request.
200 */
201static struct mtx bdlock;
202
203/*
204 * bogus page -- for I/O to/from partially complete buffers
205 * this is a temporary solution to the problem, but it is not
206 * really that bad.  it would be better to split the buffer
207 * for input in the case of buffers partially already in memory,
208 * but the code is intricate enough already.
209 */
210vm_page_t bogus_page;
211
212/*
213 * Synchronization (sleep/wakeup) variable for active buffer space requests.
214 * Set when wait starts, cleared prior to wakeup().
215 * Used in runningbufwakeup() and waitrunningbufspace().
216 */
217static int runningbufreq;
218
219/*
220 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
221 * waitrunningbufspace().
222 */
223static struct mtx rbreqlock;
224
225/*
226 * Synchronization (sleep/wakeup) variable for buffer requests.
227 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
228 * by and/or.
229 * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
230 * getnewbuf(), and getblk().
231 */
232static int needsbuffer;
233
234/*
235 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
236 */
237static struct mtx nblock;
238
239/*
240 * Lock that protects against bwait()/bdone()/B_DONE races.
241 */
242
243static struct mtx bdonelock;
244
245/*
246 * Lock that protects against bwait()/bdone()/B_DONE races.
247 */
248static struct mtx bpinlock;
249
250/*
251 * Definitions for the buffer free lists.
252 */
253#define BUFFER_QUEUES	6	/* number of free buffer queues */
254
255#define QUEUE_NONE	0	/* on no queue */
256#define QUEUE_CLEAN	1	/* non-B_DELWRI buffers */
257#define QUEUE_DIRTY	2	/* B_DELWRI buffers */
258#define QUEUE_DIRTY_GIANT 3	/* B_DELWRI buffers that need giant */
259#define QUEUE_EMPTYKVA	4	/* empty buffer headers w/KVA assignment */
260#define QUEUE_EMPTY	5	/* empty buffer headers */
261
262/* Queues for free buffers with various properties */
263static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
264
265/* Lock for the bufqueues */
266static struct mtx bqlock;
267
268/*
269 * Single global constant for BUF_WMESG, to avoid getting multiple references.
270 * buf_wmesg is referred from macros.
271 */
272const char *buf_wmesg = BUF_WMESG;
273
274#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
275#define VFS_BIO_NEED_DIRTYFLUSH	0x02	/* waiting for dirty buffer flush */
276#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
277#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
278
279#ifdef DIRECTIO
280extern void ffs_rawread_setup(void);
281#endif /* DIRECTIO */
282/*
283 *	numdirtywakeup:
284 *
285 *	If someone is blocked due to there being too many dirty buffers,
286 *	and numdirtybuffers is now reasonable, wake them up.
287 */
288
289static __inline void
290numdirtywakeup(int level)
291{
292
293	if (numdirtybuffers <= level) {
294		mtx_lock(&nblock);
295		if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
296			needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
297			wakeup(&needsbuffer);
298		}
299		mtx_unlock(&nblock);
300	}
301}
302
303/*
304 *	bufspacewakeup:
305 *
306 *	Called when buffer space is potentially available for recovery.
307 *	getnewbuf() will block on this flag when it is unable to free
308 *	sufficient buffer space.  Buffer space becomes recoverable when
309 *	bp's get placed back in the queues.
310 */
311
312static __inline void
313bufspacewakeup(void)
314{
315
316	/*
317	 * If someone is waiting for BUF space, wake them up.  Even
318	 * though we haven't freed the kva space yet, the waiting
319	 * process will be able to now.
320	 */
321	mtx_lock(&nblock);
322	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
323		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
324		wakeup(&needsbuffer);
325	}
326	mtx_unlock(&nblock);
327}
328
329/*
330 * runningbufwakeup() - in-progress I/O accounting.
331 *
332 */
333void
334runningbufwakeup(struct buf *bp)
335{
336
337	if (bp->b_runningbufspace) {
338		atomic_subtract_int(&runningbufspace, bp->b_runningbufspace);
339		bp->b_runningbufspace = 0;
340		mtx_lock(&rbreqlock);
341		if (runningbufreq && runningbufspace <= lorunningspace) {
342			runningbufreq = 0;
343			wakeup(&runningbufreq);
344		}
345		mtx_unlock(&rbreqlock);
346	}
347}
348
349/*
350 *	bufcountwakeup:
351 *
352 *	Called when a buffer has been added to one of the free queues to
353 *	account for the buffer and to wakeup anyone waiting for free buffers.
354 *	This typically occurs when large amounts of metadata are being handled
355 *	by the buffer cache ( else buffer space runs out first, usually ).
356 */
357
358static __inline void
359bufcountwakeup(void)
360{
361
362	atomic_add_int(&numfreebuffers, 1);
363	mtx_lock(&nblock);
364	if (needsbuffer) {
365		needsbuffer &= ~VFS_BIO_NEED_ANY;
366		if (numfreebuffers >= hifreebuffers)
367			needsbuffer &= ~VFS_BIO_NEED_FREE;
368		wakeup(&needsbuffer);
369	}
370	mtx_unlock(&nblock);
371}
372
373/*
374 *	waitrunningbufspace()
375 *
376 *	runningbufspace is a measure of the amount of I/O currently
377 *	running.  This routine is used in async-write situations to
378 *	prevent creating huge backups of pending writes to a device.
379 *	Only asynchronous writes are governed by this function.
380 *
381 *	Reads will adjust runningbufspace, but will not block based on it.
382 *	The read load has a side effect of reducing the allowed write load.
383 *
384 *	This does NOT turn an async write into a sync write.  It waits
385 *	for earlier writes to complete and generally returns before the
386 *	caller's write has reached the device.
387 */
388void
389waitrunningbufspace(void)
390{
391
392	mtx_lock(&rbreqlock);
393	while (runningbufspace > hirunningspace) {
394		++runningbufreq;
395		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
396	}
397	mtx_unlock(&rbreqlock);
398}
399
400
401/*
402 *	vfs_buf_test_cache:
403 *
404 *	Called when a buffer is extended.  This function clears the B_CACHE
405 *	bit if the newly extended portion of the buffer does not contain
406 *	valid data.
407 */
408static __inline
409void
410vfs_buf_test_cache(struct buf *bp,
411		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
412		  vm_page_t m)
413{
414
415	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
416	if (bp->b_flags & B_CACHE) {
417		int base = (foff + off) & PAGE_MASK;
418		if (vm_page_is_valid(m, base, size) == 0)
419			bp->b_flags &= ~B_CACHE;
420	}
421}
422
423/* Wake up the buffer daemon if necessary */
424static __inline
425void
426bd_wakeup(int dirtybuflevel)
427{
428
429	mtx_lock(&bdlock);
430	if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
431		bd_request = 1;
432		wakeup(&bd_request);
433	}
434	mtx_unlock(&bdlock);
435}
436
437/*
438 * bd_speedup - speedup the buffer cache flushing code
439 */
440
441static __inline
442void
443bd_speedup(void)
444{
445
446	bd_wakeup(1);
447}
448
449/*
450 * Calculating buffer cache scaling values and reserve space for buffer
451 * headers.  This is called during low level kernel initialization and
452 * may be called more then once.  We CANNOT write to the memory area
453 * being reserved at this time.
454 */
455caddr_t
456kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
457{
458	int maxbuf;
459
460	/*
461	 * physmem_est is in pages.  Convert it to kilobytes (assumes
462	 * PAGE_SIZE is >= 1K)
463	 */
464	physmem_est = physmem_est * (PAGE_SIZE / 1024);
465
466	/*
467	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
468	 * For the first 64MB of ram nominally allocate sufficient buffers to
469	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
470	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
471	 * the buffer cache we limit the eventual kva reservation to
472	 * maxbcache bytes.
473	 *
474	 * factor represents the 1/4 x ram conversion.
475	 */
476	if (nbuf == 0) {
477		int factor = 4 * BKVASIZE / 1024;
478
479		nbuf = 50;
480		if (physmem_est > 4096)
481			nbuf += min((physmem_est - 4096) / factor,
482			    65536 / factor);
483		if (physmem_est > 65536)
484			nbuf += (physmem_est - 65536) * 2 / (factor * 5);
485
486		if (maxbcache && nbuf > maxbcache / BKVASIZE)
487			nbuf = maxbcache / BKVASIZE;
488
489		/* XXX Avoid integer overflows later on with maxbufspace. */
490		maxbuf = (INT_MAX / 3) / BKVASIZE;
491		if (nbuf > maxbuf)
492			nbuf = maxbuf;
493	}
494
495#if 0
496	/*
497	 * Do not allow the buffer_map to be more then 1/2 the size of the
498	 * kernel_map.
499	 */
500	if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
501	    (BKVASIZE * 2)) {
502		nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
503		    (BKVASIZE * 2);
504		printf("Warning: nbufs capped at %d\n", nbuf);
505	}
506#endif
507
508	/*
509	 * swbufs are used as temporary holders for I/O, such as paging I/O.
510	 * We have no less then 16 and no more then 256.
511	 */
512	nswbuf = max(min(nbuf/4, 256), 16);
513#ifdef NSWBUF_MIN
514	if (nswbuf < NSWBUF_MIN)
515		nswbuf = NSWBUF_MIN;
516#endif
517#ifdef DIRECTIO
518	ffs_rawread_setup();
519#endif
520
521	/*
522	 * Reserve space for the buffer cache buffers
523	 */
524	swbuf = (void *)v;
525	v = (caddr_t)(swbuf + nswbuf);
526	buf = (void *)v;
527	v = (caddr_t)(buf + nbuf);
528
529	return(v);
530}
531
532/* Initialize the buffer subsystem.  Called before use of any buffers. */
533void
534bufinit(void)
535{
536	struct buf *bp;
537	int i;
538
539	mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
540	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
541	mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
542	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
543	mtx_init(&bdonelock, "bdone lock", NULL, MTX_DEF);
544	mtx_init(&bpinlock, "bpin lock", NULL, MTX_DEF);
545
546	/* next, make a null set of free lists */
547	for (i = 0; i < BUFFER_QUEUES; i++)
548		TAILQ_INIT(&bufqueues[i]);
549
550	/* finally, initialize each buffer header and stick on empty q */
551	for (i = 0; i < nbuf; i++) {
552		bp = &buf[i];
553		bzero(bp, sizeof *bp);
554		bp->b_flags = B_INVAL;	/* we're just an empty header */
555		bp->b_rcred = NOCRED;
556		bp->b_wcred = NOCRED;
557		bp->b_qindex = QUEUE_EMPTY;
558		bp->b_vflags = 0;
559		bp->b_xflags = 0;
560		LIST_INIT(&bp->b_dep);
561		BUF_LOCKINIT(bp);
562		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
563	}
564
565	/*
566	 * maxbufspace is the absolute maximum amount of buffer space we are
567	 * allowed to reserve in KVM and in real terms.  The absolute maximum
568	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
569	 * used by most other processes.  The differential is required to
570	 * ensure that buf_daemon is able to run when other processes might
571	 * be blocked waiting for buffer space.
572	 *
573	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
574	 * this may result in KVM fragmentation which is not handled optimally
575	 * by the system.
576	 */
577	maxbufspace = nbuf * BKVASIZE;
578	hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
579	lobufspace = hibufspace - MAXBSIZE;
580
581	lorunningspace = 512 * 1024;
582	hirunningspace = 1024 * 1024;
583
584/*
585 * Limit the amount of malloc memory since it is wired permanently into
586 * the kernel space.  Even though this is accounted for in the buffer
587 * allocation, we don't want the malloced region to grow uncontrolled.
588 * The malloc scheme improves memory utilization significantly on average
589 * (small) directories.
590 */
591	maxbufmallocspace = hibufspace / 20;
592
593/*
594 * Reduce the chance of a deadlock occuring by limiting the number
595 * of delayed-write dirty buffers we allow to stack up.
596 */
597	hidirtybuffers = nbuf / 4 + 20;
598	dirtybufthresh = hidirtybuffers * 9 / 10;
599	numdirtybuffers = 0;
600/*
601 * To support extreme low-memory systems, make sure hidirtybuffers cannot
602 * eat up all available buffer space.  This occurs when our minimum cannot
603 * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
604 * BKVASIZE'd (8K) buffers.
605 */
606	while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
607		hidirtybuffers >>= 1;
608	}
609	lodirtybuffers = hidirtybuffers / 2;
610
611/*
612 * Try to keep the number of free buffers in the specified range,
613 * and give special processes (e.g. like buf_daemon) access to an
614 * emergency reserve.
615 */
616	lofreebuffers = nbuf / 18 + 5;
617	hifreebuffers = 2 * lofreebuffers;
618	numfreebuffers = nbuf;
619
620/*
621 * Maximum number of async ops initiated per buf_daemon loop.  This is
622 * somewhat of a hack at the moment, we really need to limit ourselves
623 * based on the number of bytes of I/O in-transit that were initiated
624 * from buf_daemon.
625 */
626
627	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
628	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
629}
630
631/*
632 * bfreekva() - free the kva allocation for a buffer.
633 *
634 *	Since this call frees up buffer space, we call bufspacewakeup().
635 */
636static void
637bfreekva(struct buf *bp)
638{
639
640	if (bp->b_kvasize) {
641		atomic_add_int(&buffreekvacnt, 1);
642		atomic_subtract_int(&bufspace, bp->b_kvasize);
643		vm_map_remove(buffer_map, (vm_offset_t) bp->b_kvabase,
644		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize);
645		bp->b_kvasize = 0;
646		bufspacewakeup();
647	}
648}
649
650/*
651 *	bremfree:
652 *
653 *	Mark the buffer for removal from the appropriate free list in brelse.
654 *
655 */
656void
657bremfree(struct buf *bp)
658{
659
660	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
661	KASSERT(BUF_REFCNT(bp), ("bremfree: buf must be locked."));
662	KASSERT((bp->b_flags & B_REMFREE) == 0,
663	    ("bremfree: buffer %p already marked for delayed removal.", bp));
664	KASSERT(bp->b_qindex != QUEUE_NONE,
665	    ("bremfree: buffer %p not on a queue.", bp));
666
667	bp->b_flags |= B_REMFREE;
668	/* Fixup numfreebuffers count.  */
669	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
670		atomic_subtract_int(&numfreebuffers, 1);
671}
672
673/*
674 *	bremfreef:
675 *
676 *	Force an immediate removal from a free list.  Used only in nfs when
677 *	it abuses the b_freelist pointer.
678 */
679void
680bremfreef(struct buf *bp)
681{
682	mtx_lock(&bqlock);
683	bremfreel(bp);
684	mtx_unlock(&bqlock);
685}
686
687/*
688 *	bremfreel:
689 *
690 *	Removes a buffer from the free list, must be called with the
691 *	bqlock held.
692 */
693static void
694bremfreel(struct buf *bp)
695{
696	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
697	    bp, bp->b_vp, bp->b_flags);
698	KASSERT(BUF_REFCNT(bp), ("bremfreel: buffer %p not locked.", bp));
699	KASSERT(bp->b_qindex != QUEUE_NONE,
700	    ("bremfreel: buffer %p not on a queue.", bp));
701	mtx_assert(&bqlock, MA_OWNED);
702
703	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
704	bp->b_qindex = QUEUE_NONE;
705	/*
706	 * If this was a delayed bremfree() we only need to remove the buffer
707	 * from the queue and return the stats are already done.
708	 */
709	if (bp->b_flags & B_REMFREE) {
710		bp->b_flags &= ~B_REMFREE;
711		return;
712	}
713	/*
714	 * Fixup numfreebuffers count.  If the buffer is invalid or not
715	 * delayed-write, the buffer was free and we must decrement
716	 * numfreebuffers.
717	 */
718	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
719		atomic_subtract_int(&numfreebuffers, 1);
720}
721
722
723/*
724 * Get a buffer with the specified data.  Look in the cache first.  We
725 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
726 * is set, the buffer is valid and we do not have to do anything ( see
727 * getblk() ).  This is really just a special case of breadn().
728 */
729int
730bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
731    struct buf **bpp)
732{
733
734	return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
735}
736
737/*
738 * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
739 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
740 * the buffer is valid and we do not have to do anything.
741 */
742void
743breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
744    int cnt, struct ucred * cred)
745{
746	struct buf *rabp;
747	int i;
748
749	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
750		if (inmem(vp, *rablkno))
751			continue;
752		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
753
754		if ((rabp->b_flags & B_CACHE) == 0) {
755			if (!TD_IS_IDLETHREAD(curthread))
756				curthread->td_ru.ru_inblock++;
757			rabp->b_flags |= B_ASYNC;
758			rabp->b_flags &= ~B_INVAL;
759			rabp->b_ioflags &= ~BIO_ERROR;
760			rabp->b_iocmd = BIO_READ;
761			if (rabp->b_rcred == NOCRED && cred != NOCRED)
762				rabp->b_rcred = crhold(cred);
763			vfs_busy_pages(rabp, 0);
764			BUF_KERNPROC(rabp);
765			rabp->b_iooffset = dbtob(rabp->b_blkno);
766			bstrategy(rabp);
767		} else {
768			brelse(rabp);
769		}
770	}
771}
772
773/*
774 * Operates like bread, but also starts asynchronous I/O on
775 * read-ahead blocks.
776 */
777int
778breadn(struct vnode * vp, daddr_t blkno, int size,
779    daddr_t * rablkno, int *rabsize,
780    int cnt, struct ucred * cred, struct buf **bpp)
781{
782	struct buf *bp;
783	int rv = 0, readwait = 0;
784
785	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
786	*bpp = bp = getblk(vp, blkno, size, 0, 0, 0);
787
788	/* if not found in cache, do some I/O */
789	if ((bp->b_flags & B_CACHE) == 0) {
790		if (!TD_IS_IDLETHREAD(curthread))
791			curthread->td_ru.ru_inblock++;
792		bp->b_iocmd = BIO_READ;
793		bp->b_flags &= ~B_INVAL;
794		bp->b_ioflags &= ~BIO_ERROR;
795		if (bp->b_rcred == NOCRED && cred != NOCRED)
796			bp->b_rcred = crhold(cred);
797		vfs_busy_pages(bp, 0);
798		bp->b_iooffset = dbtob(bp->b_blkno);
799		bstrategy(bp);
800		++readwait;
801	}
802
803	breada(vp, rablkno, rabsize, cnt, cred);
804
805	if (readwait) {
806		rv = bufwait(bp);
807	}
808	return (rv);
809}
810
811/*
812 * Write, release buffer on completion.  (Done by iodone
813 * if async).  Do not bother writing anything if the buffer
814 * is invalid.
815 *
816 * Note that we set B_CACHE here, indicating that buffer is
817 * fully valid and thus cacheable.  This is true even of NFS
818 * now so we set it generally.  This could be set either here
819 * or in biodone() since the I/O is synchronous.  We put it
820 * here.
821 */
822int
823bufwrite(struct buf *bp)
824{
825	int oldflags;
826	struct vnode *vp;
827	int vp_md;
828
829	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
830	if (bp->b_flags & B_INVAL) {
831		brelse(bp);
832		return (0);
833	}
834
835	oldflags = bp->b_flags;
836
837	if (BUF_REFCNT(bp) == 0)
838		panic("bufwrite: buffer is not busy???");
839
840	if (bp->b_pin_count > 0)
841		bunpin_wait(bp);
842
843	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
844	    ("FFS background buffer should not get here %p", bp));
845
846	vp = bp->b_vp;
847	if (vp)
848		vp_md = vp->v_vflag & VV_MD;
849	else
850		vp_md = 0;
851
852	/* Mark the buffer clean */
853	bundirty(bp);
854
855	bp->b_flags &= ~B_DONE;
856	bp->b_ioflags &= ~BIO_ERROR;
857	bp->b_flags |= B_CACHE;
858	bp->b_iocmd = BIO_WRITE;
859
860	bufobj_wref(bp->b_bufobj);
861	vfs_busy_pages(bp, 1);
862
863	/*
864	 * Normal bwrites pipeline writes
865	 */
866	bp->b_runningbufspace = bp->b_bufsize;
867	atomic_add_int(&runningbufspace, bp->b_runningbufspace);
868
869	if (!TD_IS_IDLETHREAD(curthread))
870		curthread->td_ru.ru_oublock++;
871	if (oldflags & B_ASYNC)
872		BUF_KERNPROC(bp);
873	bp->b_iooffset = dbtob(bp->b_blkno);
874	bstrategy(bp);
875
876	if ((oldflags & B_ASYNC) == 0) {
877		int rtval = bufwait(bp);
878		brelse(bp);
879		return (rtval);
880	} else {
881		/*
882		 * don't allow the async write to saturate the I/O
883		 * system.  We will not deadlock here because
884		 * we are blocking waiting for I/O that is already in-progress
885		 * to complete. We do not block here if it is the update
886		 * or syncer daemon trying to clean up as that can lead
887		 * to deadlock.
888		 */
889		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
890			waitrunningbufspace();
891	}
892
893	return (0);
894}
895
896void
897bufbdflush(struct bufobj *bo, struct buf *bp)
898{
899	struct buf *nbp;
900
901	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
902		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
903		altbufferflushes++;
904	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
905		BO_LOCK(bo);
906		/*
907		 * Try to find a buffer to flush.
908		 */
909		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
910			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
911			    BUF_LOCK(nbp,
912				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
913				continue;
914			if (bp == nbp)
915				panic("bdwrite: found ourselves");
916			BO_UNLOCK(bo);
917			/* Don't countdeps with the bo lock held. */
918			if (buf_countdeps(nbp, 0)) {
919				BO_LOCK(bo);
920				BUF_UNLOCK(nbp);
921				continue;
922			}
923			if (nbp->b_flags & B_CLUSTEROK) {
924				vfs_bio_awrite(nbp);
925			} else {
926				bremfree(nbp);
927				bawrite(nbp);
928			}
929			dirtybufferflushes++;
930			break;
931		}
932		if (nbp == NULL)
933			BO_UNLOCK(bo);
934	}
935}
936
937/*
938 * Delayed write. (Buffer is marked dirty).  Do not bother writing
939 * anything if the buffer is marked invalid.
940 *
941 * Note that since the buffer must be completely valid, we can safely
942 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
943 * biodone() in order to prevent getblk from writing the buffer
944 * out synchronously.
945 */
946void
947bdwrite(struct buf *bp)
948{
949	struct thread *td = curthread;
950	struct vnode *vp;
951	struct bufobj *bo;
952
953	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
954	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
955	KASSERT(BUF_REFCNT(bp) != 0, ("bdwrite: buffer is not busy"));
956
957	if (bp->b_flags & B_INVAL) {
958		brelse(bp);
959		return;
960	}
961
962	/*
963	 * If we have too many dirty buffers, don't create any more.
964	 * If we are wildly over our limit, then force a complete
965	 * cleanup. Otherwise, just keep the situation from getting
966	 * out of control. Note that we have to avoid a recursive
967	 * disaster and not try to clean up after our own cleanup!
968	 */
969	vp = bp->b_vp;
970	bo = bp->b_bufobj;
971	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
972		td->td_pflags |= TDP_INBDFLUSH;
973		BO_BDFLUSH(bo, bp);
974		td->td_pflags &= ~TDP_INBDFLUSH;
975	} else
976		recursiveflushes++;
977
978	bdirty(bp);
979	/*
980	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
981	 * true even of NFS now.
982	 */
983	bp->b_flags |= B_CACHE;
984
985	/*
986	 * This bmap keeps the system from needing to do the bmap later,
987	 * perhaps when the system is attempting to do a sync.  Since it
988	 * is likely that the indirect block -- or whatever other datastructure
989	 * that the filesystem needs is still in memory now, it is a good
990	 * thing to do this.  Note also, that if the pageout daemon is
991	 * requesting a sync -- there might not be enough memory to do
992	 * the bmap then...  So, this is important to do.
993	 */
994	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
995		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
996	}
997
998	/*
999	 * Set the *dirty* buffer range based upon the VM system dirty pages.
1000	 */
1001	vfs_setdirty(bp);
1002
1003	/*
1004	 * We need to do this here to satisfy the vnode_pager and the
1005	 * pageout daemon, so that it thinks that the pages have been
1006	 * "cleaned".  Note that since the pages are in a delayed write
1007	 * buffer -- the VFS layer "will" see that the pages get written
1008	 * out on the next sync, or perhaps the cluster will be completed.
1009	 */
1010	vfs_clean_pages(bp);
1011	bqrelse(bp);
1012
1013	/*
1014	 * Wakeup the buffer flushing daemon if we have a lot of dirty
1015	 * buffers (midpoint between our recovery point and our stall
1016	 * point).
1017	 */
1018	bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1019
1020	/*
1021	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
1022	 * due to the softdep code.
1023	 */
1024}
1025
1026/*
1027 *	bdirty:
1028 *
1029 *	Turn buffer into delayed write request.  We must clear BIO_READ and
1030 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
1031 *	itself to properly update it in the dirty/clean lists.  We mark it
1032 *	B_DONE to ensure that any asynchronization of the buffer properly
1033 *	clears B_DONE ( else a panic will occur later ).
1034 *
1035 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
1036 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
1037 *	should only be called if the buffer is known-good.
1038 *
1039 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1040 *	count.
1041 *
1042 *	The buffer must be on QUEUE_NONE.
1043 */
1044void
1045bdirty(struct buf *bp)
1046{
1047
1048	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
1049	    bp, bp->b_vp, bp->b_flags);
1050	KASSERT(BUF_REFCNT(bp) == 1, ("bdirty: bp %p not locked",bp));
1051	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1052	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1053	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1054	bp->b_flags &= ~(B_RELBUF);
1055	bp->b_iocmd = BIO_WRITE;
1056
1057	if ((bp->b_flags & B_DELWRI) == 0) {
1058		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
1059		reassignbuf(bp);
1060		atomic_add_int(&numdirtybuffers, 1);
1061		bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1062	}
1063}
1064
1065/*
1066 *	bundirty:
1067 *
1068 *	Clear B_DELWRI for buffer.
1069 *
1070 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1071 *	count.
1072 *
1073 *	The buffer must be on QUEUE_NONE.
1074 */
1075
1076void
1077bundirty(struct buf *bp)
1078{
1079
1080	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1081	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1082	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1083	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1084	KASSERT(BUF_REFCNT(bp) == 1, ("bundirty: bp %p not locked",bp));
1085
1086	if (bp->b_flags & B_DELWRI) {
1087		bp->b_flags &= ~B_DELWRI;
1088		reassignbuf(bp);
1089		atomic_subtract_int(&numdirtybuffers, 1);
1090		numdirtywakeup(lodirtybuffers);
1091	}
1092	/*
1093	 * Since it is now being written, we can clear its deferred write flag.
1094	 */
1095	bp->b_flags &= ~B_DEFERRED;
1096}
1097
1098/*
1099 *	bawrite:
1100 *
1101 *	Asynchronous write.  Start output on a buffer, but do not wait for
1102 *	it to complete.  The buffer is released when the output completes.
1103 *
1104 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1105 *	B_INVAL buffers.  Not us.
1106 */
1107void
1108bawrite(struct buf *bp)
1109{
1110
1111	bp->b_flags |= B_ASYNC;
1112	(void) bwrite(bp);
1113}
1114
1115/*
1116 *	bwillwrite:
1117 *
1118 *	Called prior to the locking of any vnodes when we are expecting to
1119 *	write.  We do not want to starve the buffer cache with too many
1120 *	dirty buffers so we block here.  By blocking prior to the locking
1121 *	of any vnodes we attempt to avoid the situation where a locked vnode
1122 *	prevents the various system daemons from flushing related buffers.
1123 */
1124
1125void
1126bwillwrite(void)
1127{
1128
1129	if (numdirtybuffers >= hidirtybuffers) {
1130		mtx_lock(&nblock);
1131		while (numdirtybuffers >= hidirtybuffers) {
1132			bd_wakeup(1);
1133			needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
1134			msleep(&needsbuffer, &nblock,
1135			    (PRIBIO + 4), "flswai", 0);
1136		}
1137		mtx_unlock(&nblock);
1138	}
1139}
1140
1141/*
1142 * Return true if we have too many dirty buffers.
1143 */
1144int
1145buf_dirty_count_severe(void)
1146{
1147
1148	return(numdirtybuffers >= hidirtybuffers);
1149}
1150
1151/*
1152 *	brelse:
1153 *
1154 *	Release a busy buffer and, if requested, free its resources.  The
1155 *	buffer will be stashed in the appropriate bufqueue[] allowing it
1156 *	to be accessed later as a cache entity or reused for other purposes.
1157 */
1158void
1159brelse(struct buf *bp)
1160{
1161	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
1162	    bp, bp->b_vp, bp->b_flags);
1163	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1164	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1165
1166	if (bp->b_flags & B_MANAGED) {
1167		bqrelse(bp);
1168		return;
1169	}
1170
1171	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
1172	    bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
1173		/*
1174		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
1175		 * pages from being scrapped.  If the error is anything
1176		 * other than an I/O error (EIO), assume that retryingi
1177		 * is futile.
1178		 */
1179		bp->b_ioflags &= ~BIO_ERROR;
1180		bdirty(bp);
1181	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1182	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
1183		/*
1184		 * Either a failed I/O or we were asked to free or not
1185		 * cache the buffer.
1186		 */
1187		bp->b_flags |= B_INVAL;
1188		if (!LIST_EMPTY(&bp->b_dep))
1189			buf_deallocate(bp);
1190		if (bp->b_flags & B_DELWRI) {
1191			atomic_subtract_int(&numdirtybuffers, 1);
1192			numdirtywakeup(lodirtybuffers);
1193		}
1194		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1195		if ((bp->b_flags & B_VMIO) == 0) {
1196			if (bp->b_bufsize)
1197				allocbuf(bp, 0);
1198			if (bp->b_vp)
1199				brelvp(bp);
1200		}
1201	}
1202
1203	/*
1204	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
1205	 * is called with B_DELWRI set, the underlying pages may wind up
1206	 * getting freed causing a previous write (bdwrite()) to get 'lost'
1207	 * because pages associated with a B_DELWRI bp are marked clean.
1208	 *
1209	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1210	 * if B_DELWRI is set.
1211	 *
1212	 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1213	 * on pages to return pages to the VM page queues.
1214	 */
1215	if (bp->b_flags & B_DELWRI)
1216		bp->b_flags &= ~B_RELBUF;
1217	else if (vm_page_count_severe()) {
1218		/*
1219		 * XXX This lock may not be necessary since BKGRDINPROG
1220		 * cannot be set while we hold the buf lock, it can only be
1221		 * cleared if it is already pending.
1222		 */
1223		if (bp->b_vp) {
1224			BO_LOCK(bp->b_bufobj);
1225			if (!(bp->b_vflags & BV_BKGRDINPROG))
1226				bp->b_flags |= B_RELBUF;
1227			BO_UNLOCK(bp->b_bufobj);
1228		} else
1229			bp->b_flags |= B_RELBUF;
1230	}
1231
1232	/*
1233	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
1234	 * constituted, not even NFS buffers now.  Two flags effect this.  If
1235	 * B_INVAL, the struct buf is invalidated but the VM object is kept
1236	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1237	 *
1238	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1239	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
1240	 * buffer is also B_INVAL because it hits the re-dirtying code above.
1241	 *
1242	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
1243	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1244	 * the commit state and we cannot afford to lose the buffer. If the
1245	 * buffer has a background write in progress, we need to keep it
1246	 * around to prevent it from being reconstituted and starting a second
1247	 * background write.
1248	 */
1249	if ((bp->b_flags & B_VMIO)
1250	    && !(bp->b_vp->v_mount != NULL &&
1251		 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1252		 !vn_isdisk(bp->b_vp, NULL) &&
1253		 (bp->b_flags & B_DELWRI))
1254	    ) {
1255
1256		int i, j, resid;
1257		vm_page_t m;
1258		off_t foff;
1259		vm_pindex_t poff;
1260		vm_object_t obj;
1261
1262		obj = bp->b_bufobj->bo_object;
1263
1264		/*
1265		 * Get the base offset and length of the buffer.  Note that
1266		 * in the VMIO case if the buffer block size is not
1267		 * page-aligned then b_data pointer may not be page-aligned.
1268		 * But our b_pages[] array *IS* page aligned.
1269		 *
1270		 * block sizes less then DEV_BSIZE (usually 512) are not
1271		 * supported due to the page granularity bits (m->valid,
1272		 * m->dirty, etc...).
1273		 *
1274		 * See man buf(9) for more information
1275		 */
1276		resid = bp->b_bufsize;
1277		foff = bp->b_offset;
1278		VM_OBJECT_LOCK(obj);
1279		for (i = 0; i < bp->b_npages; i++) {
1280			int had_bogus = 0;
1281
1282			m = bp->b_pages[i];
1283
1284			/*
1285			 * If we hit a bogus page, fixup *all* the bogus pages
1286			 * now.
1287			 */
1288			if (m == bogus_page) {
1289				poff = OFF_TO_IDX(bp->b_offset);
1290				had_bogus = 1;
1291
1292				for (j = i; j < bp->b_npages; j++) {
1293					vm_page_t mtmp;
1294					mtmp = bp->b_pages[j];
1295					if (mtmp == bogus_page) {
1296						mtmp = vm_page_lookup(obj, poff + j);
1297						if (!mtmp) {
1298							panic("brelse: page missing\n");
1299						}
1300						bp->b_pages[j] = mtmp;
1301					}
1302				}
1303
1304				if ((bp->b_flags & B_INVAL) == 0) {
1305					pmap_qenter(
1306					    trunc_page((vm_offset_t)bp->b_data),
1307					    bp->b_pages, bp->b_npages);
1308				}
1309				m = bp->b_pages[i];
1310			}
1311			if ((bp->b_flags & B_NOCACHE) ||
1312			    (bp->b_ioflags & BIO_ERROR)) {
1313				int poffset = foff & PAGE_MASK;
1314				int presid = resid > (PAGE_SIZE - poffset) ?
1315					(PAGE_SIZE - poffset) : resid;
1316
1317				KASSERT(presid >= 0, ("brelse: extra page"));
1318				vm_page_lock_queues();
1319				vm_page_set_invalid(m, poffset, presid);
1320				vm_page_unlock_queues();
1321				if (had_bogus)
1322					printf("avoided corruption bug in bogus_page/brelse code\n");
1323			}
1324			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1325			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1326		}
1327		VM_OBJECT_UNLOCK(obj);
1328		if (bp->b_flags & (B_INVAL | B_RELBUF))
1329			vfs_vmio_release(bp);
1330
1331	} else if (bp->b_flags & B_VMIO) {
1332
1333		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1334			vfs_vmio_release(bp);
1335		}
1336
1337	} else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
1338		if (bp->b_bufsize != 0)
1339			allocbuf(bp, 0);
1340		if (bp->b_vp != NULL)
1341			brelvp(bp);
1342	}
1343
1344	if (BUF_REFCNT(bp) > 1) {
1345		/* do not release to free list */
1346		BUF_UNLOCK(bp);
1347		return;
1348	}
1349
1350	/* enqueue */
1351	mtx_lock(&bqlock);
1352	/* Handle delayed bremfree() processing. */
1353	if (bp->b_flags & B_REMFREE)
1354		bremfreel(bp);
1355	if (bp->b_qindex != QUEUE_NONE)
1356		panic("brelse: free buffer onto another queue???");
1357
1358	/* buffers with no memory */
1359	if (bp->b_bufsize == 0) {
1360		bp->b_flags |= B_INVAL;
1361		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1362		if (bp->b_vflags & BV_BKGRDINPROG)
1363			panic("losing buffer 1");
1364		if (bp->b_kvasize) {
1365			bp->b_qindex = QUEUE_EMPTYKVA;
1366		} else {
1367			bp->b_qindex = QUEUE_EMPTY;
1368		}
1369		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1370	/* buffers with junk contents */
1371	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1372	    (bp->b_ioflags & BIO_ERROR)) {
1373		bp->b_flags |= B_INVAL;
1374		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1375		if (bp->b_vflags & BV_BKGRDINPROG)
1376			panic("losing buffer 2");
1377		bp->b_qindex = QUEUE_CLEAN;
1378		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1379	/* remaining buffers */
1380	} else {
1381		if ((bp->b_flags & (B_DELWRI|B_NEEDSGIANT)) ==
1382		    (B_DELWRI|B_NEEDSGIANT))
1383			bp->b_qindex = QUEUE_DIRTY_GIANT;
1384		if (bp->b_flags & B_DELWRI)
1385			bp->b_qindex = QUEUE_DIRTY;
1386		else
1387			bp->b_qindex = QUEUE_CLEAN;
1388		if (bp->b_flags & B_AGE)
1389			TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1390		else
1391			TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1392	}
1393	mtx_unlock(&bqlock);
1394
1395	/*
1396	 * If B_INVAL and B_DELWRI is set, clear B_DELWRI.  We have already
1397	 * placed the buffer on the correct queue.  We must also disassociate
1398	 * the device and vnode for a B_INVAL buffer so gbincore() doesn't
1399	 * find it.
1400	 */
1401	if (bp->b_flags & B_INVAL) {
1402		if (bp->b_flags & B_DELWRI)
1403			bundirty(bp);
1404		if (bp->b_vp)
1405			brelvp(bp);
1406	}
1407
1408	/*
1409	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
1410	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
1411	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1412	 * if B_INVAL is set ).
1413	 */
1414
1415	if (!(bp->b_flags & B_DELWRI))
1416		bufcountwakeup();
1417
1418	/*
1419	 * Something we can maybe free or reuse
1420	 */
1421	if (bp->b_bufsize || bp->b_kvasize)
1422		bufspacewakeup();
1423
1424	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
1425	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1426		panic("brelse: not dirty");
1427	/* unlock */
1428	BUF_UNLOCK(bp);
1429}
1430
1431/*
1432 * Release a buffer back to the appropriate queue but do not try to free
1433 * it.  The buffer is expected to be used again soon.
1434 *
1435 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1436 * biodone() to requeue an async I/O on completion.  It is also used when
1437 * known good buffers need to be requeued but we think we may need the data
1438 * again soon.
1439 *
1440 * XXX we should be able to leave the B_RELBUF hint set on completion.
1441 */
1442void
1443bqrelse(struct buf *bp)
1444{
1445	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1446	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1447	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1448
1449	if (BUF_REFCNT(bp) > 1) {
1450		/* do not release to free list */
1451		BUF_UNLOCK(bp);
1452		return;
1453	}
1454
1455	if (bp->b_flags & B_MANAGED) {
1456		if (bp->b_flags & B_REMFREE) {
1457			mtx_lock(&bqlock);
1458			bremfreel(bp);
1459			mtx_unlock(&bqlock);
1460		}
1461		bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1462		BUF_UNLOCK(bp);
1463		return;
1464	}
1465
1466	mtx_lock(&bqlock);
1467	/* Handle delayed bremfree() processing. */
1468	if (bp->b_flags & B_REMFREE)
1469		bremfreel(bp);
1470	if (bp->b_qindex != QUEUE_NONE)
1471		panic("bqrelse: free buffer onto another queue???");
1472	/* buffers with stale but valid contents */
1473	if (bp->b_flags & B_DELWRI) {
1474		if (bp->b_flags & B_NEEDSGIANT)
1475			bp->b_qindex = QUEUE_DIRTY_GIANT;
1476		else
1477			bp->b_qindex = QUEUE_DIRTY;
1478		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1479	} else {
1480		/*
1481		 * XXX This lock may not be necessary since BKGRDINPROG
1482		 * cannot be set while we hold the buf lock, it can only be
1483		 * cleared if it is already pending.
1484		 */
1485		BO_LOCK(bp->b_bufobj);
1486		if (!vm_page_count_severe() || bp->b_vflags & BV_BKGRDINPROG) {
1487			BO_UNLOCK(bp->b_bufobj);
1488			bp->b_qindex = QUEUE_CLEAN;
1489			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
1490			    b_freelist);
1491		} else {
1492			/*
1493			 * We are too low on memory, we have to try to free
1494			 * the buffer (most importantly: the wired pages
1495			 * making up its backing store) *now*.
1496			 */
1497			BO_UNLOCK(bp->b_bufobj);
1498			mtx_unlock(&bqlock);
1499			brelse(bp);
1500			return;
1501		}
1502	}
1503	mtx_unlock(&bqlock);
1504
1505	if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
1506		bufcountwakeup();
1507
1508	/*
1509	 * Something we can maybe free or reuse.
1510	 */
1511	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1512		bufspacewakeup();
1513
1514	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1515	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1516		panic("bqrelse: not dirty");
1517	/* unlock */
1518	BUF_UNLOCK(bp);
1519}
1520
1521/* Give pages used by the bp back to the VM system (where possible) */
1522static void
1523vfs_vmio_release(struct buf *bp)
1524{
1525	int i;
1526	vm_page_t m;
1527
1528	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
1529	vm_page_lock_queues();
1530	for (i = 0; i < bp->b_npages; i++) {
1531		m = bp->b_pages[i];
1532		bp->b_pages[i] = NULL;
1533		/*
1534		 * In order to keep page LRU ordering consistent, put
1535		 * everything on the inactive queue.
1536		 */
1537		vm_page_unwire(m, 0);
1538		/*
1539		 * We don't mess with busy pages, it is
1540		 * the responsibility of the process that
1541		 * busied the pages to deal with them.
1542		 */
1543		if ((m->oflags & VPO_BUSY) || (m->busy != 0))
1544			continue;
1545
1546		if (m->wire_count == 0) {
1547			/*
1548			 * Might as well free the page if we can and it has
1549			 * no valid data.  We also free the page if the
1550			 * buffer was used for direct I/O
1551			 */
1552			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
1553			    m->hold_count == 0) {
1554				vm_page_free(m);
1555			} else if (bp->b_flags & B_DIRECT) {
1556				vm_page_try_to_free(m);
1557			} else if (vm_page_count_severe()) {
1558				vm_page_try_to_cache(m);
1559			}
1560		}
1561	}
1562	vm_page_unlock_queues();
1563	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
1564	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1565
1566	if (bp->b_bufsize) {
1567		bufspacewakeup();
1568		bp->b_bufsize = 0;
1569	}
1570	bp->b_npages = 0;
1571	bp->b_flags &= ~B_VMIO;
1572	if (bp->b_vp)
1573		brelvp(bp);
1574}
1575
1576/*
1577 * Check to see if a block at a particular lbn is available for a clustered
1578 * write.
1579 */
1580static int
1581vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
1582{
1583	struct buf *bpa;
1584	int match;
1585
1586	match = 0;
1587
1588	/* If the buf isn't in core skip it */
1589	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
1590		return (0);
1591
1592	/* If the buf is busy we don't want to wait for it */
1593	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1594		return (0);
1595
1596	/* Only cluster with valid clusterable delayed write buffers */
1597	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
1598	    (B_DELWRI | B_CLUSTEROK))
1599		goto done;
1600
1601	if (bpa->b_bufsize != size)
1602		goto done;
1603
1604	/*
1605	 * Check to see if it is in the expected place on disk and that the
1606	 * block has been mapped.
1607	 */
1608	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
1609		match = 1;
1610done:
1611	BUF_UNLOCK(bpa);
1612	return (match);
1613}
1614
1615/*
1616 *	vfs_bio_awrite:
1617 *
1618 *	Implement clustered async writes for clearing out B_DELWRI buffers.
1619 *	This is much better then the old way of writing only one buffer at
1620 *	a time.  Note that we may not be presented with the buffers in the
1621 *	correct order, so we search for the cluster in both directions.
1622 */
1623int
1624vfs_bio_awrite(struct buf *bp)
1625{
1626	int i;
1627	int j;
1628	daddr_t lblkno = bp->b_lblkno;
1629	struct vnode *vp = bp->b_vp;
1630	int ncl;
1631	int nwritten;
1632	int size;
1633	int maxcl;
1634
1635	/*
1636	 * right now we support clustered writing only to regular files.  If
1637	 * we find a clusterable block we could be in the middle of a cluster
1638	 * rather then at the beginning.
1639	 */
1640	if ((vp->v_type == VREG) &&
1641	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1642	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1643
1644		size = vp->v_mount->mnt_stat.f_iosize;
1645		maxcl = MAXPHYS / size;
1646
1647		VI_LOCK(vp);
1648		for (i = 1; i < maxcl; i++)
1649			if (vfs_bio_clcheck(vp, size, lblkno + i,
1650			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
1651				break;
1652
1653		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
1654			if (vfs_bio_clcheck(vp, size, lblkno - j,
1655			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
1656				break;
1657
1658		VI_UNLOCK(vp);
1659		--j;
1660		ncl = i + j;
1661		/*
1662		 * this is a possible cluster write
1663		 */
1664		if (ncl != 1) {
1665			BUF_UNLOCK(bp);
1666			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
1667			return nwritten;
1668		}
1669	}
1670	bremfree(bp);
1671	bp->b_flags |= B_ASYNC;
1672	/*
1673	 * default (old) behavior, writing out only one block
1674	 *
1675	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1676	 */
1677	nwritten = bp->b_bufsize;
1678	(void) bwrite(bp);
1679
1680	return nwritten;
1681}
1682
1683/*
1684 *	getnewbuf:
1685 *
1686 *	Find and initialize a new buffer header, freeing up existing buffers
1687 *	in the bufqueues as necessary.  The new buffer is returned locked.
1688 *
1689 *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1690 *	buffer away, the caller must set B_INVAL prior to calling brelse().
1691 *
1692 *	We block if:
1693 *		We have insufficient buffer headers
1694 *		We have insufficient buffer space
1695 *		buffer_map is too fragmented ( space reservation fails )
1696 *		If we have to flush dirty buffers ( but we try to avoid this )
1697 *
1698 *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1699 *	Instead we ask the buf daemon to do it for us.  We attempt to
1700 *	avoid piecemeal wakeups of the pageout daemon.
1701 */
1702
1703static struct buf *
1704getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
1705{
1706	struct buf *bp;
1707	struct buf *nbp;
1708	int defrag = 0;
1709	int nqindex;
1710	static int flushingbufs;
1711
1712	/*
1713	 * We can't afford to block since we might be holding a vnode lock,
1714	 * which may prevent system daemons from running.  We deal with
1715	 * low-memory situations by proactively returning memory and running
1716	 * async I/O rather then sync I/O.
1717	 */
1718
1719	atomic_add_int(&getnewbufcalls, 1);
1720	atomic_subtract_int(&getnewbufrestarts, 1);
1721restart:
1722	atomic_add_int(&getnewbufrestarts, 1);
1723
1724	/*
1725	 * Setup for scan.  If we do not have enough free buffers,
1726	 * we setup a degenerate case that immediately fails.  Note
1727	 * that if we are specially marked process, we are allowed to
1728	 * dip into our reserves.
1729	 *
1730	 * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
1731	 *
1732	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
1733	 * However, there are a number of cases (defragging, reusing, ...)
1734	 * where we cannot backup.
1735	 */
1736	mtx_lock(&bqlock);
1737	nqindex = QUEUE_EMPTYKVA;
1738	nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
1739
1740	if (nbp == NULL) {
1741		/*
1742		 * If no EMPTYKVA buffers and we are either
1743		 * defragging or reusing, locate a CLEAN buffer
1744		 * to free or reuse.  If bufspace useage is low
1745		 * skip this step so we can allocate a new buffer.
1746		 */
1747		if (defrag || bufspace >= lobufspace) {
1748			nqindex = QUEUE_CLEAN;
1749			nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
1750		}
1751
1752		/*
1753		 * If we could not find or were not allowed to reuse a
1754		 * CLEAN buffer, check to see if it is ok to use an EMPTY
1755		 * buffer.  We can only use an EMPTY buffer if allocating
1756		 * its KVA would not otherwise run us out of buffer space.
1757		 */
1758		if (nbp == NULL && defrag == 0 &&
1759		    bufspace + maxsize < hibufspace) {
1760			nqindex = QUEUE_EMPTY;
1761			nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1762		}
1763	}
1764
1765	/*
1766	 * Run scan, possibly freeing data and/or kva mappings on the fly
1767	 * depending.
1768	 */
1769
1770	while ((bp = nbp) != NULL) {
1771		int qindex = nqindex;
1772
1773		/*
1774		 * Calculate next bp ( we can only use it if we do not block
1775		 * or do other fancy things ).
1776		 */
1777		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1778			switch(qindex) {
1779			case QUEUE_EMPTY:
1780				nqindex = QUEUE_EMPTYKVA;
1781				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
1782					break;
1783				/* FALLTHROUGH */
1784			case QUEUE_EMPTYKVA:
1785				nqindex = QUEUE_CLEAN;
1786				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
1787					break;
1788				/* FALLTHROUGH */
1789			case QUEUE_CLEAN:
1790				/*
1791				 * nbp is NULL.
1792				 */
1793				break;
1794			}
1795		}
1796		/*
1797		 * If we are defragging then we need a buffer with
1798		 * b_kvasize != 0.  XXX this situation should no longer
1799		 * occur, if defrag is non-zero the buffer's b_kvasize
1800		 * should also be non-zero at this point.  XXX
1801		 */
1802		if (defrag && bp->b_kvasize == 0) {
1803			printf("Warning: defrag empty buffer %p\n", bp);
1804			continue;
1805		}
1806
1807		/*
1808		 * Start freeing the bp.  This is somewhat involved.  nbp
1809		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1810		 */
1811		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1812			continue;
1813		if (bp->b_vp) {
1814			BO_LOCK(bp->b_bufobj);
1815			if (bp->b_vflags & BV_BKGRDINPROG) {
1816				BO_UNLOCK(bp->b_bufobj);
1817				BUF_UNLOCK(bp);
1818				continue;
1819			}
1820			BO_UNLOCK(bp->b_bufobj);
1821		}
1822		CTR6(KTR_BUF,
1823		    "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
1824		    "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
1825		    bp->b_kvasize, bp->b_bufsize, qindex);
1826
1827		/*
1828		 * Sanity Checks
1829		 */
1830		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1831
1832		/*
1833		 * Note: we no longer distinguish between VMIO and non-VMIO
1834		 * buffers.
1835		 */
1836
1837		KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
1838
1839		bremfreel(bp);
1840		mtx_unlock(&bqlock);
1841
1842		if (qindex == QUEUE_CLEAN) {
1843			if (bp->b_flags & B_VMIO) {
1844				bp->b_flags &= ~B_ASYNC;
1845				vfs_vmio_release(bp);
1846			}
1847			if (bp->b_vp)
1848				brelvp(bp);
1849		}
1850
1851		/*
1852		 * NOTE:  nbp is now entirely invalid.  We can only restart
1853		 * the scan from this point on.
1854		 *
1855		 * Get the rest of the buffer freed up.  b_kva* is still
1856		 * valid after this operation.
1857		 */
1858
1859		if (bp->b_rcred != NOCRED) {
1860			crfree(bp->b_rcred);
1861			bp->b_rcred = NOCRED;
1862		}
1863		if (bp->b_wcred != NOCRED) {
1864			crfree(bp->b_wcred);
1865			bp->b_wcred = NOCRED;
1866		}
1867		if (!LIST_EMPTY(&bp->b_dep))
1868			buf_deallocate(bp);
1869		if (bp->b_vflags & BV_BKGRDINPROG)
1870			panic("losing buffer 3");
1871		KASSERT(bp->b_vp == NULL,
1872		    ("bp: %p still has vnode %p.  qindex: %d",
1873		    bp, bp->b_vp, qindex));
1874		KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1875		   ("bp: %p still on a buffer list. xflags %X",
1876		    bp, bp->b_xflags));
1877
1878		if (bp->b_bufsize)
1879			allocbuf(bp, 0);
1880
1881		bp->b_flags = 0;
1882		bp->b_ioflags = 0;
1883		bp->b_xflags = 0;
1884		bp->b_vflags = 0;
1885		bp->b_vp = NULL;
1886		bp->b_blkno = bp->b_lblkno = 0;
1887		bp->b_offset = NOOFFSET;
1888		bp->b_iodone = 0;
1889		bp->b_error = 0;
1890		bp->b_resid = 0;
1891		bp->b_bcount = 0;
1892		bp->b_npages = 0;
1893		bp->b_dirtyoff = bp->b_dirtyend = 0;
1894		bp->b_bufobj = NULL;
1895		bp->b_pin_count = 0;
1896		bp->b_fsprivate1 = NULL;
1897		bp->b_fsprivate2 = NULL;
1898		bp->b_fsprivate3 = NULL;
1899
1900		LIST_INIT(&bp->b_dep);
1901
1902		/*
1903		 * If we are defragging then free the buffer.
1904		 */
1905		if (defrag) {
1906			bp->b_flags |= B_INVAL;
1907			bfreekva(bp);
1908			brelse(bp);
1909			defrag = 0;
1910			goto restart;
1911		}
1912
1913		/*
1914		 * Notify any waiters for the buffer lock about
1915		 * identity change by freeing the buffer.
1916		 */
1917		if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp) > 0) {
1918			bp->b_flags |= B_INVAL;
1919			bfreekva(bp);
1920			brelse(bp);
1921			goto restart;
1922		}
1923
1924		/*
1925		 * If we are overcomitted then recover the buffer and its
1926		 * KVM space.  This occurs in rare situations when multiple
1927		 * processes are blocked in getnewbuf() or allocbuf().
1928		 */
1929		if (bufspace >= hibufspace)
1930			flushingbufs = 1;
1931		if (flushingbufs && bp->b_kvasize != 0) {
1932			bp->b_flags |= B_INVAL;
1933			bfreekva(bp);
1934			brelse(bp);
1935			goto restart;
1936		}
1937		if (bufspace < lobufspace)
1938			flushingbufs = 0;
1939		break;
1940	}
1941
1942	/*
1943	 * If we exhausted our list, sleep as appropriate.  We may have to
1944	 * wakeup various daemons and write out some dirty buffers.
1945	 *
1946	 * Generally we are sleeping due to insufficient buffer space.
1947	 */
1948
1949	if (bp == NULL) {
1950		int flags;
1951		char *waitmsg;
1952
1953		if (defrag) {
1954			flags = VFS_BIO_NEED_BUFSPACE;
1955			waitmsg = "nbufkv";
1956		} else if (bufspace >= hibufspace) {
1957			waitmsg = "nbufbs";
1958			flags = VFS_BIO_NEED_BUFSPACE;
1959		} else {
1960			waitmsg = "newbuf";
1961			flags = VFS_BIO_NEED_ANY;
1962		}
1963		mtx_lock(&nblock);
1964		needsbuffer |= flags;
1965		mtx_unlock(&nblock);
1966		mtx_unlock(&bqlock);
1967
1968		bd_speedup();	/* heeeelp */
1969
1970		mtx_lock(&nblock);
1971		while (needsbuffer & flags) {
1972			if (msleep(&needsbuffer, &nblock,
1973			    (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
1974				mtx_unlock(&nblock);
1975				return (NULL);
1976			}
1977		}
1978		mtx_unlock(&nblock);
1979	} else {
1980		/*
1981		 * We finally have a valid bp.  We aren't quite out of the
1982		 * woods, we still have to reserve kva space.  In order
1983		 * to keep fragmentation sane we only allocate kva in
1984		 * BKVASIZE chunks.
1985		 */
1986		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
1987
1988		if (maxsize != bp->b_kvasize) {
1989			vm_offset_t addr = 0;
1990
1991			bfreekva(bp);
1992
1993			vm_map_lock(buffer_map);
1994			if (vm_map_findspace(buffer_map,
1995				vm_map_min(buffer_map), maxsize, &addr)) {
1996				/*
1997				 * Uh oh.  Buffer map is to fragmented.  We
1998				 * must defragment the map.
1999				 */
2000				atomic_add_int(&bufdefragcnt, 1);
2001				vm_map_unlock(buffer_map);
2002				defrag = 1;
2003				bp->b_flags |= B_INVAL;
2004				brelse(bp);
2005				goto restart;
2006			}
2007			if (addr) {
2008				vm_map_insert(buffer_map, NULL, 0,
2009					addr, addr + maxsize,
2010					VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
2011
2012				bp->b_kvabase = (caddr_t) addr;
2013				bp->b_kvasize = maxsize;
2014				atomic_add_int(&bufspace, bp->b_kvasize);
2015				atomic_add_int(&bufreusecnt, 1);
2016			}
2017			vm_map_unlock(buffer_map);
2018		}
2019		bp->b_saveaddr = bp->b_kvabase;
2020		bp->b_data = bp->b_saveaddr;
2021	}
2022	return(bp);
2023}
2024
2025/*
2026 *	buf_daemon:
2027 *
2028 *	buffer flushing daemon.  Buffers are normally flushed by the
2029 *	update daemon but if it cannot keep up this process starts to
2030 *	take the load in an attempt to prevent getnewbuf() from blocking.
2031 */
2032
2033static struct kproc_desc buf_kp = {
2034	"bufdaemon",
2035	buf_daemon,
2036	&bufdaemonproc
2037};
2038SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
2039
2040static void
2041buf_daemon()
2042{
2043
2044	/*
2045	 * This process needs to be suspended prior to shutdown sync.
2046	 */
2047	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
2048	    SHUTDOWN_PRI_LAST);
2049
2050	/*
2051	 * This process is allowed to take the buffer cache to the limit
2052	 */
2053	curthread->td_pflags |= TDP_NORUNNINGBUF;
2054	mtx_lock(&bdlock);
2055	for (;;) {
2056		bd_request = 0;
2057		mtx_unlock(&bdlock);
2058
2059		kproc_suspend_check(bufdaemonproc);
2060
2061		/*
2062		 * Do the flush.  Limit the amount of in-transit I/O we
2063		 * allow to build up, otherwise we would completely saturate
2064		 * the I/O system.  Wakeup any waiting processes before we
2065		 * normally would so they can run in parallel with our drain.
2066		 */
2067		while (numdirtybuffers > lodirtybuffers) {
2068			int flushed;
2069
2070			flushed = flushbufqueues(QUEUE_DIRTY, 0);
2071			/* The list empty check here is slightly racy */
2072			if (!TAILQ_EMPTY(&bufqueues[QUEUE_DIRTY_GIANT])) {
2073				mtx_lock(&Giant);
2074				flushed += flushbufqueues(QUEUE_DIRTY_GIANT, 0);
2075				mtx_unlock(&Giant);
2076			}
2077			if (flushed == 0) {
2078				/*
2079				 * Could not find any buffers without rollback
2080				 * dependencies, so just write the first one
2081				 * in the hopes of eventually making progress.
2082				 */
2083				flushbufqueues(QUEUE_DIRTY, 1);
2084				if (!TAILQ_EMPTY(
2085				    &bufqueues[QUEUE_DIRTY_GIANT])) {
2086					mtx_lock(&Giant);
2087					flushbufqueues(QUEUE_DIRTY_GIANT, 1);
2088					mtx_unlock(&Giant);
2089				}
2090				break;
2091			}
2092			uio_yield();
2093		}
2094
2095		/*
2096		 * Only clear bd_request if we have reached our low water
2097		 * mark.  The buf_daemon normally waits 1 second and
2098		 * then incrementally flushes any dirty buffers that have
2099		 * built up, within reason.
2100		 *
2101		 * If we were unable to hit our low water mark and couldn't
2102		 * find any flushable buffers, we sleep half a second.
2103		 * Otherwise we loop immediately.
2104		 */
2105		mtx_lock(&bdlock);
2106		if (numdirtybuffers <= lodirtybuffers) {
2107			/*
2108			 * We reached our low water mark, reset the
2109			 * request and sleep until we are needed again.
2110			 * The sleep is just so the suspend code works.
2111			 */
2112			bd_request = 0;
2113			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
2114		} else {
2115			/*
2116			 * We couldn't find any flushable dirty buffers but
2117			 * still have too many dirty buffers, we
2118			 * have to sleep and try again.  (rare)
2119			 */
2120			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
2121		}
2122	}
2123}
2124
2125/*
2126 *	flushbufqueues:
2127 *
2128 *	Try to flush a buffer in the dirty queue.  We must be careful to
2129 *	free up B_INVAL buffers instead of write them, which NFS is
2130 *	particularly sensitive to.
2131 */
2132static int flushwithdeps = 0;
2133SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
2134    0, "Number of buffers flushed with dependecies that require rollbacks");
2135
2136static int
2137flushbufqueues(int queue, int flushdeps)
2138{
2139	struct buf sentinel;
2140	struct vnode *vp;
2141	struct mount *mp;
2142	struct buf *bp;
2143	int hasdeps;
2144	int flushed;
2145	int target;
2146
2147	target = numdirtybuffers - lodirtybuffers;
2148	if (flushdeps && target > 2)
2149		target /= 2;
2150	flushed = 0;
2151	bp = NULL;
2152	mtx_lock(&bqlock);
2153	TAILQ_INSERT_TAIL(&bufqueues[queue], &sentinel, b_freelist);
2154	while (flushed != target) {
2155		bp = TAILQ_FIRST(&bufqueues[queue]);
2156		if (bp == &sentinel)
2157			break;
2158		TAILQ_REMOVE(&bufqueues[queue], bp, b_freelist);
2159		TAILQ_INSERT_TAIL(&bufqueues[queue], bp, b_freelist);
2160
2161		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2162			continue;
2163		if (bp->b_pin_count > 0) {
2164			BUF_UNLOCK(bp);
2165			continue;
2166		}
2167		BO_LOCK(bp->b_bufobj);
2168		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
2169		    (bp->b_flags & B_DELWRI) == 0) {
2170			BO_UNLOCK(bp->b_bufobj);
2171			BUF_UNLOCK(bp);
2172			continue;
2173		}
2174		BO_UNLOCK(bp->b_bufobj);
2175		if (bp->b_flags & B_INVAL) {
2176			bremfreel(bp);
2177			mtx_unlock(&bqlock);
2178			brelse(bp);
2179			flushed++;
2180			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2181			mtx_lock(&bqlock);
2182			continue;
2183		}
2184
2185		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
2186			if (flushdeps == 0) {
2187				BUF_UNLOCK(bp);
2188				continue;
2189			}
2190			hasdeps = 1;
2191		} else
2192			hasdeps = 0;
2193		/*
2194		 * We must hold the lock on a vnode before writing
2195		 * one of its buffers. Otherwise we may confuse, or
2196		 * in the case of a snapshot vnode, deadlock the
2197		 * system.
2198		 *
2199		 * The lock order here is the reverse of the normal
2200		 * of vnode followed by buf lock.  This is ok because
2201		 * the NOWAIT will prevent deadlock.
2202		 */
2203		vp = bp->b_vp;
2204		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2205			BUF_UNLOCK(bp);
2206			continue;
2207		}
2208		if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
2209			mtx_unlock(&bqlock);
2210			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
2211			    bp, bp->b_vp, bp->b_flags);
2212			vfs_bio_awrite(bp);
2213			vn_finished_write(mp);
2214			VOP_UNLOCK(vp, 0);
2215			flushwithdeps += hasdeps;
2216			flushed++;
2217			waitrunningbufspace();
2218			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2219			mtx_lock(&bqlock);
2220			continue;
2221		}
2222		vn_finished_write(mp);
2223		BUF_UNLOCK(bp);
2224	}
2225	TAILQ_REMOVE(&bufqueues[queue], &sentinel, b_freelist);
2226	mtx_unlock(&bqlock);
2227	return (flushed);
2228}
2229
2230/*
2231 * Check to see if a block is currently memory resident.
2232 */
2233struct buf *
2234incore(struct bufobj *bo, daddr_t blkno)
2235{
2236	struct buf *bp;
2237
2238	BO_LOCK(bo);
2239	bp = gbincore(bo, blkno);
2240	BO_UNLOCK(bo);
2241	return (bp);
2242}
2243
2244/*
2245 * Returns true if no I/O is needed to access the
2246 * associated VM object.  This is like incore except
2247 * it also hunts around in the VM system for the data.
2248 */
2249
2250static int
2251inmem(struct vnode * vp, daddr_t blkno)
2252{
2253	vm_object_t obj;
2254	vm_offset_t toff, tinc, size;
2255	vm_page_t m;
2256	vm_ooffset_t off;
2257
2258	ASSERT_VOP_LOCKED(vp, "inmem");
2259
2260	if (incore(&vp->v_bufobj, blkno))
2261		return 1;
2262	if (vp->v_mount == NULL)
2263		return 0;
2264	obj = vp->v_object;
2265	if (obj == NULL)
2266		return (0);
2267
2268	size = PAGE_SIZE;
2269	if (size > vp->v_mount->mnt_stat.f_iosize)
2270		size = vp->v_mount->mnt_stat.f_iosize;
2271	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2272
2273	VM_OBJECT_LOCK(obj);
2274	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2275		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2276		if (!m)
2277			goto notinmem;
2278		tinc = size;
2279		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2280			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2281		if (vm_page_is_valid(m,
2282		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2283			goto notinmem;
2284	}
2285	VM_OBJECT_UNLOCK(obj);
2286	return 1;
2287
2288notinmem:
2289	VM_OBJECT_UNLOCK(obj);
2290	return (0);
2291}
2292
2293/*
2294 *	vfs_setdirty:
2295 *
2296 *	Sets the dirty range for a buffer based on the status of the dirty
2297 *	bits in the pages comprising the buffer.
2298 *
2299 *	The range is limited to the size of the buffer.
2300 *
2301 *	This routine is primarily used by NFS, but is generalized for the
2302 *	B_VMIO case.
2303 */
2304static void
2305vfs_setdirty(struct buf *bp)
2306{
2307
2308	/*
2309	 * Degenerate case - empty buffer
2310	 */
2311
2312	if (bp->b_bufsize == 0)
2313		return;
2314
2315	/*
2316	 * We qualify the scan for modified pages on whether the
2317	 * object has been flushed yet.
2318	 */
2319
2320	if ((bp->b_flags & B_VMIO) == 0)
2321		return;
2322
2323	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2324	vfs_setdirty_locked_object(bp);
2325	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2326}
2327
2328static void
2329vfs_setdirty_locked_object(struct buf *bp)
2330{
2331	vm_object_t object;
2332	int i;
2333
2334	object = bp->b_bufobj->bo_object;
2335	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2336	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
2337		vm_offset_t boffset;
2338		vm_offset_t eoffset;
2339
2340		vm_page_lock_queues();
2341		/*
2342		 * test the pages to see if they have been modified directly
2343		 * by users through the VM system.
2344		 */
2345		for (i = 0; i < bp->b_npages; i++)
2346			vm_page_test_dirty(bp->b_pages[i]);
2347
2348		/*
2349		 * Calculate the encompassing dirty range, boffset and eoffset,
2350		 * (eoffset - boffset) bytes.
2351		 */
2352
2353		for (i = 0; i < bp->b_npages; i++) {
2354			if (bp->b_pages[i]->dirty)
2355				break;
2356		}
2357		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2358
2359		for (i = bp->b_npages - 1; i >= 0; --i) {
2360			if (bp->b_pages[i]->dirty) {
2361				break;
2362			}
2363		}
2364		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2365
2366		vm_page_unlock_queues();
2367		/*
2368		 * Fit it to the buffer.
2369		 */
2370
2371		if (eoffset > bp->b_bcount)
2372			eoffset = bp->b_bcount;
2373
2374		/*
2375		 * If we have a good dirty range, merge with the existing
2376		 * dirty range.
2377		 */
2378
2379		if (boffset < eoffset) {
2380			if (bp->b_dirtyoff > boffset)
2381				bp->b_dirtyoff = boffset;
2382			if (bp->b_dirtyend < eoffset)
2383				bp->b_dirtyend = eoffset;
2384		}
2385	}
2386}
2387
2388/*
2389 *	getblk:
2390 *
2391 *	Get a block given a specified block and offset into a file/device.
2392 *	The buffers B_DONE bit will be cleared on return, making it almost
2393 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
2394 *	return.  The caller should clear B_INVAL prior to initiating a
2395 *	READ.
2396 *
2397 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2398 *	an existing buffer.
2399 *
2400 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
2401 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2402 *	and then cleared based on the backing VM.  If the previous buffer is
2403 *	non-0-sized but invalid, B_CACHE will be cleared.
2404 *
2405 *	If getblk() must create a new buffer, the new buffer is returned with
2406 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2407 *	case it is returned with B_INVAL clear and B_CACHE set based on the
2408 *	backing VM.
2409 *
2410 *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
2411 *	B_CACHE bit is clear.
2412 *
2413 *	What this means, basically, is that the caller should use B_CACHE to
2414 *	determine whether the buffer is fully valid or not and should clear
2415 *	B_INVAL prior to issuing a read.  If the caller intends to validate
2416 *	the buffer by loading its data area with something, the caller needs
2417 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
2418 *	the caller should set B_CACHE ( as an optimization ), else the caller
2419 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
2420 *	a write attempt or if it was a successfull read.  If the caller
2421 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
2422 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
2423 */
2424struct buf *
2425getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
2426    int flags)
2427{
2428	struct buf *bp;
2429	struct bufobj *bo;
2430	int error;
2431
2432	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
2433	ASSERT_VOP_LOCKED(vp, "getblk");
2434	if (size > MAXBSIZE)
2435		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
2436
2437	bo = &vp->v_bufobj;
2438loop:
2439	/*
2440	 * Block if we are low on buffers.   Certain processes are allowed
2441	 * to completely exhaust the buffer cache.
2442         *
2443         * If this check ever becomes a bottleneck it may be better to
2444         * move it into the else, when gbincore() fails.  At the moment
2445         * it isn't a problem.
2446	 *
2447	 * XXX remove if 0 sections (clean this up after its proven)
2448         */
2449	if (numfreebuffers == 0) {
2450		if (TD_IS_IDLETHREAD(curthread))
2451			return NULL;
2452		mtx_lock(&nblock);
2453		needsbuffer |= VFS_BIO_NEED_ANY;
2454		mtx_unlock(&nblock);
2455	}
2456
2457	BO_LOCK(bo);
2458	bp = gbincore(bo, blkno);
2459	if (bp != NULL) {
2460		int lockflags;
2461		/*
2462		 * Buffer is in-core.  If the buffer is not busy, it must
2463		 * be on a queue.
2464		 */
2465		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
2466
2467		if (flags & GB_LOCK_NOWAIT)
2468			lockflags |= LK_NOWAIT;
2469
2470		error = BUF_TIMELOCK(bp, lockflags,
2471		    VI_MTX(vp), "getblk", slpflag, slptimeo);
2472
2473		/*
2474		 * If we slept and got the lock we have to restart in case
2475		 * the buffer changed identities.
2476		 */
2477		if (error == ENOLCK)
2478			goto loop;
2479		/* We timed out or were interrupted. */
2480		else if (error)
2481			return (NULL);
2482
2483		/*
2484		 * The buffer is locked.  B_CACHE is cleared if the buffer is
2485		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
2486		 * and for a VMIO buffer B_CACHE is adjusted according to the
2487		 * backing VM cache.
2488		 */
2489		if (bp->b_flags & B_INVAL)
2490			bp->b_flags &= ~B_CACHE;
2491		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
2492			bp->b_flags |= B_CACHE;
2493		bremfree(bp);
2494
2495		/*
2496		 * check for size inconsistancies for non-VMIO case.
2497		 */
2498
2499		if (bp->b_bcount != size) {
2500			if ((bp->b_flags & B_VMIO) == 0 ||
2501			    (size > bp->b_kvasize)) {
2502				if (bp->b_flags & B_DELWRI) {
2503					/*
2504					 * If buffer is pinned and caller does
2505					 * not want sleep  waiting for it to be
2506					 * unpinned, bail out
2507					 * */
2508					if (bp->b_pin_count > 0) {
2509						if (flags & GB_LOCK_NOWAIT) {
2510							bqrelse(bp);
2511							return (NULL);
2512						} else {
2513							bunpin_wait(bp);
2514						}
2515					}
2516					bp->b_flags |= B_NOCACHE;
2517					bwrite(bp);
2518				} else {
2519					if (LIST_EMPTY(&bp->b_dep)) {
2520						bp->b_flags |= B_RELBUF;
2521						brelse(bp);
2522					} else {
2523						bp->b_flags |= B_NOCACHE;
2524						bwrite(bp);
2525					}
2526				}
2527				goto loop;
2528			}
2529		}
2530
2531		/*
2532		 * If the size is inconsistant in the VMIO case, we can resize
2533		 * the buffer.  This might lead to B_CACHE getting set or
2534		 * cleared.  If the size has not changed, B_CACHE remains
2535		 * unchanged from its previous state.
2536		 */
2537
2538		if (bp->b_bcount != size)
2539			allocbuf(bp, size);
2540
2541		KASSERT(bp->b_offset != NOOFFSET,
2542		    ("getblk: no buffer offset"));
2543
2544		/*
2545		 * A buffer with B_DELWRI set and B_CACHE clear must
2546		 * be committed before we can return the buffer in
2547		 * order to prevent the caller from issuing a read
2548		 * ( due to B_CACHE not being set ) and overwriting
2549		 * it.
2550		 *
2551		 * Most callers, including NFS and FFS, need this to
2552		 * operate properly either because they assume they
2553		 * can issue a read if B_CACHE is not set, or because
2554		 * ( for example ) an uncached B_DELWRI might loop due
2555		 * to softupdates re-dirtying the buffer.  In the latter
2556		 * case, B_CACHE is set after the first write completes,
2557		 * preventing further loops.
2558		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
2559		 * above while extending the buffer, we cannot allow the
2560		 * buffer to remain with B_CACHE set after the write
2561		 * completes or it will represent a corrupt state.  To
2562		 * deal with this we set B_NOCACHE to scrap the buffer
2563		 * after the write.
2564		 *
2565		 * We might be able to do something fancy, like setting
2566		 * B_CACHE in bwrite() except if B_DELWRI is already set,
2567		 * so the below call doesn't set B_CACHE, but that gets real
2568		 * confusing.  This is much easier.
2569		 */
2570
2571		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
2572			bp->b_flags |= B_NOCACHE;
2573			bwrite(bp);
2574			goto loop;
2575		}
2576		bp->b_flags &= ~B_DONE;
2577	} else {
2578		int bsize, maxsize, vmio;
2579		off_t offset;
2580
2581		/*
2582		 * Buffer is not in-core, create new buffer.  The buffer
2583		 * returned by getnewbuf() is locked.  Note that the returned
2584		 * buffer is also considered valid (not marked B_INVAL).
2585		 */
2586		BO_UNLOCK(bo);
2587		/*
2588		 * If the user does not want us to create the buffer, bail out
2589		 * here.
2590		 */
2591		if (flags & GB_NOCREAT)
2592			return NULL;
2593		bsize = bo->bo_bsize;
2594		offset = blkno * bsize;
2595		vmio = vp->v_object != NULL;
2596		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
2597		maxsize = imax(maxsize, bsize);
2598
2599		bp = getnewbuf(slpflag, slptimeo, size, maxsize);
2600		if (bp == NULL) {
2601			if (slpflag || slptimeo)
2602				return NULL;
2603			goto loop;
2604		}
2605
2606		/*
2607		 * This code is used to make sure that a buffer is not
2608		 * created while the getnewbuf routine is blocked.
2609		 * This can be a problem whether the vnode is locked or not.
2610		 * If the buffer is created out from under us, we have to
2611		 * throw away the one we just created.
2612		 *
2613		 * Note: this must occur before we associate the buffer
2614		 * with the vp especially considering limitations in
2615		 * the splay tree implementation when dealing with duplicate
2616		 * lblkno's.
2617		 */
2618		BO_LOCK(bo);
2619		if (gbincore(bo, blkno)) {
2620			BO_UNLOCK(bo);
2621			bp->b_flags |= B_INVAL;
2622			brelse(bp);
2623			goto loop;
2624		}
2625
2626		/*
2627		 * Insert the buffer into the hash, so that it can
2628		 * be found by incore.
2629		 */
2630		bp->b_blkno = bp->b_lblkno = blkno;
2631		bp->b_offset = offset;
2632		bgetvp(vp, bp);
2633		BO_UNLOCK(bo);
2634
2635		/*
2636		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2637		 * buffer size starts out as 0, B_CACHE will be set by
2638		 * allocbuf() for the VMIO case prior to it testing the
2639		 * backing store for validity.
2640		 */
2641
2642		if (vmio) {
2643			bp->b_flags |= B_VMIO;
2644#if defined(VFS_BIO_DEBUG)
2645			if (vn_canvmio(vp) != TRUE)
2646				printf("getblk: VMIO on vnode type %d\n",
2647					vp->v_type);
2648#endif
2649			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
2650			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
2651			    bp, vp->v_object, bp->b_bufobj->bo_object));
2652		} else {
2653			bp->b_flags &= ~B_VMIO;
2654			KASSERT(bp->b_bufobj->bo_object == NULL,
2655			    ("ARGH! has b_bufobj->bo_object %p %p\n",
2656			    bp, bp->b_bufobj->bo_object));
2657		}
2658
2659		allocbuf(bp, size);
2660		bp->b_flags &= ~B_DONE;
2661	}
2662	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
2663	KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
2664	KASSERT(bp->b_bufobj == bo,
2665	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2666	return (bp);
2667}
2668
2669/*
2670 * Get an empty, disassociated buffer of given size.  The buffer is initially
2671 * set to B_INVAL.
2672 */
2673struct buf *
2674geteblk(int size)
2675{
2676	struct buf *bp;
2677	int maxsize;
2678
2679	maxsize = (size + BKVAMASK) & ~BKVAMASK;
2680	while ((bp = getnewbuf(0, 0, size, maxsize)) == 0)
2681		continue;
2682	allocbuf(bp, size);
2683	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2684	KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
2685	return (bp);
2686}
2687
2688
2689/*
2690 * This code constitutes the buffer memory from either anonymous system
2691 * memory (in the case of non-VMIO operations) or from an associated
2692 * VM object (in the case of VMIO operations).  This code is able to
2693 * resize a buffer up or down.
2694 *
2695 * Note that this code is tricky, and has many complications to resolve
2696 * deadlock or inconsistant data situations.  Tread lightly!!!
2697 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2698 * the caller.  Calling this code willy nilly can result in the loss of data.
2699 *
2700 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2701 * B_CACHE for the non-VMIO case.
2702 */
2703
2704int
2705allocbuf(struct buf *bp, int size)
2706{
2707	int newbsize, mbsize;
2708	int i;
2709
2710	if (BUF_REFCNT(bp) == 0)
2711		panic("allocbuf: buffer not busy");
2712
2713	if (bp->b_kvasize < size)
2714		panic("allocbuf: buffer too small");
2715
2716	if ((bp->b_flags & B_VMIO) == 0) {
2717		caddr_t origbuf;
2718		int origbufsize;
2719		/*
2720		 * Just get anonymous memory from the kernel.  Don't
2721		 * mess with B_CACHE.
2722		 */
2723		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2724		if (bp->b_flags & B_MALLOC)
2725			newbsize = mbsize;
2726		else
2727			newbsize = round_page(size);
2728
2729		if (newbsize < bp->b_bufsize) {
2730			/*
2731			 * malloced buffers are not shrunk
2732			 */
2733			if (bp->b_flags & B_MALLOC) {
2734				if (newbsize) {
2735					bp->b_bcount = size;
2736				} else {
2737					free(bp->b_data, M_BIOBUF);
2738					if (bp->b_bufsize) {
2739						atomic_subtract_int(
2740						    &bufmallocspace,
2741						    bp->b_bufsize);
2742						bufspacewakeup();
2743						bp->b_bufsize = 0;
2744					}
2745					bp->b_saveaddr = bp->b_kvabase;
2746					bp->b_data = bp->b_saveaddr;
2747					bp->b_bcount = 0;
2748					bp->b_flags &= ~B_MALLOC;
2749				}
2750				return 1;
2751			}
2752			vm_hold_free_pages(
2753			    bp,
2754			    (vm_offset_t) bp->b_data + newbsize,
2755			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2756		} else if (newbsize > bp->b_bufsize) {
2757			/*
2758			 * We only use malloced memory on the first allocation.
2759			 * and revert to page-allocated memory when the buffer
2760			 * grows.
2761			 */
2762			/*
2763			 * There is a potential smp race here that could lead
2764			 * to bufmallocspace slightly passing the max.  It
2765			 * is probably extremely rare and not worth worrying
2766			 * over.
2767			 */
2768			if ( (bufmallocspace < maxbufmallocspace) &&
2769				(bp->b_bufsize == 0) &&
2770				(mbsize <= PAGE_SIZE/2)) {
2771
2772				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2773				bp->b_bufsize = mbsize;
2774				bp->b_bcount = size;
2775				bp->b_flags |= B_MALLOC;
2776				atomic_add_int(&bufmallocspace, mbsize);
2777				return 1;
2778			}
2779			origbuf = NULL;
2780			origbufsize = 0;
2781			/*
2782			 * If the buffer is growing on its other-than-first allocation,
2783			 * then we revert to the page-allocation scheme.
2784			 */
2785			if (bp->b_flags & B_MALLOC) {
2786				origbuf = bp->b_data;
2787				origbufsize = bp->b_bufsize;
2788				bp->b_data = bp->b_kvabase;
2789				if (bp->b_bufsize) {
2790					atomic_subtract_int(&bufmallocspace,
2791					    bp->b_bufsize);
2792					bufspacewakeup();
2793					bp->b_bufsize = 0;
2794				}
2795				bp->b_flags &= ~B_MALLOC;
2796				newbsize = round_page(newbsize);
2797			}
2798			vm_hold_load_pages(
2799			    bp,
2800			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2801			    (vm_offset_t) bp->b_data + newbsize);
2802			if (origbuf) {
2803				bcopy(origbuf, bp->b_data, origbufsize);
2804				free(origbuf, M_BIOBUF);
2805			}
2806		}
2807	} else {
2808		int desiredpages;
2809
2810		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2811		desiredpages = (size == 0) ? 0 :
2812			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2813
2814		if (bp->b_flags & B_MALLOC)
2815			panic("allocbuf: VMIO buffer can't be malloced");
2816		/*
2817		 * Set B_CACHE initially if buffer is 0 length or will become
2818		 * 0-length.
2819		 */
2820		if (size == 0 || bp->b_bufsize == 0)
2821			bp->b_flags |= B_CACHE;
2822
2823		if (newbsize < bp->b_bufsize) {
2824			/*
2825			 * DEV_BSIZE aligned new buffer size is less then the
2826			 * DEV_BSIZE aligned existing buffer size.  Figure out
2827			 * if we have to remove any pages.
2828			 */
2829			if (desiredpages < bp->b_npages) {
2830				vm_page_t m;
2831
2832				VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2833				vm_page_lock_queues();
2834				for (i = desiredpages; i < bp->b_npages; i++) {
2835					/*
2836					 * the page is not freed here -- it
2837					 * is the responsibility of
2838					 * vnode_pager_setsize
2839					 */
2840					m = bp->b_pages[i];
2841					KASSERT(m != bogus_page,
2842					    ("allocbuf: bogus page found"));
2843					while (vm_page_sleep_if_busy(m, TRUE, "biodep"))
2844						vm_page_lock_queues();
2845
2846					bp->b_pages[i] = NULL;
2847					vm_page_unwire(m, 0);
2848				}
2849				vm_page_unlock_queues();
2850				VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2851				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2852				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2853				bp->b_npages = desiredpages;
2854			}
2855		} else if (size > bp->b_bcount) {
2856			/*
2857			 * We are growing the buffer, possibly in a
2858			 * byte-granular fashion.
2859			 */
2860			struct vnode *vp;
2861			vm_object_t obj;
2862			vm_offset_t toff;
2863			vm_offset_t tinc;
2864
2865			/*
2866			 * Step 1, bring in the VM pages from the object,
2867			 * allocating them if necessary.  We must clear
2868			 * B_CACHE if these pages are not valid for the
2869			 * range covered by the buffer.
2870			 */
2871
2872			vp = bp->b_vp;
2873			obj = bp->b_bufobj->bo_object;
2874
2875			VM_OBJECT_LOCK(obj);
2876			while (bp->b_npages < desiredpages) {
2877				vm_page_t m;
2878				vm_pindex_t pi;
2879
2880				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2881				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2882					/*
2883					 * note: must allocate system pages
2884					 * since blocking here could intefere
2885					 * with paging I/O, no matter which
2886					 * process we are.
2887					 */
2888					m = vm_page_alloc(obj, pi,
2889					    VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
2890					    VM_ALLOC_WIRED);
2891					if (m == NULL) {
2892						atomic_add_int(&vm_pageout_deficit,
2893						    desiredpages - bp->b_npages);
2894						VM_OBJECT_UNLOCK(obj);
2895						VM_WAIT;
2896						VM_OBJECT_LOCK(obj);
2897					} else {
2898						if (m->valid == 0)
2899							bp->b_flags &= ~B_CACHE;
2900						bp->b_pages[bp->b_npages] = m;
2901						++bp->b_npages;
2902					}
2903					continue;
2904				}
2905
2906				/*
2907				 * We found a page.  If we have to sleep on it,
2908				 * retry because it might have gotten freed out
2909				 * from under us.
2910				 *
2911				 * We can only test VPO_BUSY here.  Blocking on
2912				 * m->busy might lead to a deadlock:
2913				 *
2914				 *  vm_fault->getpages->cluster_read->allocbuf
2915				 *
2916				 */
2917				if (vm_page_sleep_if_busy(m, FALSE, "pgtblk"))
2918					continue;
2919
2920				/*
2921				 * We have a good page.
2922				 */
2923				vm_page_lock_queues();
2924				vm_page_wire(m);
2925				vm_page_unlock_queues();
2926				bp->b_pages[bp->b_npages] = m;
2927				++bp->b_npages;
2928			}
2929
2930			/*
2931			 * Step 2.  We've loaded the pages into the buffer,
2932			 * we have to figure out if we can still have B_CACHE
2933			 * set.  Note that B_CACHE is set according to the
2934			 * byte-granular range ( bcount and size ), new the
2935			 * aligned range ( newbsize ).
2936			 *
2937			 * The VM test is against m->valid, which is DEV_BSIZE
2938			 * aligned.  Needless to say, the validity of the data
2939			 * needs to also be DEV_BSIZE aligned.  Note that this
2940			 * fails with NFS if the server or some other client
2941			 * extends the file's EOF.  If our buffer is resized,
2942			 * B_CACHE may remain set! XXX
2943			 */
2944
2945			toff = bp->b_bcount;
2946			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2947
2948			while ((bp->b_flags & B_CACHE) && toff < size) {
2949				vm_pindex_t pi;
2950
2951				if (tinc > (size - toff))
2952					tinc = size - toff;
2953
2954				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2955				    PAGE_SHIFT;
2956
2957				vfs_buf_test_cache(
2958				    bp,
2959				    bp->b_offset,
2960				    toff,
2961				    tinc,
2962				    bp->b_pages[pi]
2963				);
2964				toff += tinc;
2965				tinc = PAGE_SIZE;
2966			}
2967			VM_OBJECT_UNLOCK(obj);
2968
2969			/*
2970			 * Step 3, fixup the KVM pmap.  Remember that
2971			 * bp->b_data is relative to bp->b_offset, but
2972			 * bp->b_offset may be offset into the first page.
2973			 */
2974
2975			bp->b_data = (caddr_t)
2976			    trunc_page((vm_offset_t)bp->b_data);
2977			pmap_qenter(
2978			    (vm_offset_t)bp->b_data,
2979			    bp->b_pages,
2980			    bp->b_npages
2981			);
2982
2983			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2984			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
2985		}
2986	}
2987	if (newbsize < bp->b_bufsize)
2988		bufspacewakeup();
2989	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
2990	bp->b_bcount = size;		/* requested buffer size	*/
2991	return 1;
2992}
2993
2994void
2995biodone(struct bio *bp)
2996{
2997	void (*done)(struct bio *);
2998
2999	mtx_lock(&bdonelock);
3000	bp->bio_flags |= BIO_DONE;
3001	done = bp->bio_done;
3002	if (done == NULL)
3003		wakeup(bp);
3004	mtx_unlock(&bdonelock);
3005	if (done != NULL)
3006		done(bp);
3007}
3008
3009/*
3010 * Wait for a BIO to finish.
3011 *
3012 * XXX: resort to a timeout for now.  The optimal locking (if any) for this
3013 * case is not yet clear.
3014 */
3015int
3016biowait(struct bio *bp, const char *wchan)
3017{
3018
3019	mtx_lock(&bdonelock);
3020	while ((bp->bio_flags & BIO_DONE) == 0)
3021		msleep(bp, &bdonelock, PRIBIO, wchan, hz / 10);
3022	mtx_unlock(&bdonelock);
3023	if (bp->bio_error != 0)
3024		return (bp->bio_error);
3025	if (!(bp->bio_flags & BIO_ERROR))
3026		return (0);
3027	return (EIO);
3028}
3029
3030void
3031biofinish(struct bio *bp, struct devstat *stat, int error)
3032{
3033
3034	if (error) {
3035		bp->bio_error = error;
3036		bp->bio_flags |= BIO_ERROR;
3037	}
3038	if (stat != NULL)
3039		devstat_end_transaction_bio(stat, bp);
3040	biodone(bp);
3041}
3042
3043/*
3044 *	bufwait:
3045 *
3046 *	Wait for buffer I/O completion, returning error status.  The buffer
3047 *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3048 *	error and cleared.
3049 */
3050int
3051bufwait(struct buf *bp)
3052{
3053	if (bp->b_iocmd == BIO_READ)
3054		bwait(bp, PRIBIO, "biord");
3055	else
3056		bwait(bp, PRIBIO, "biowr");
3057	if (bp->b_flags & B_EINTR) {
3058		bp->b_flags &= ~B_EINTR;
3059		return (EINTR);
3060	}
3061	if (bp->b_ioflags & BIO_ERROR) {
3062		return (bp->b_error ? bp->b_error : EIO);
3063	} else {
3064		return (0);
3065	}
3066}
3067
3068 /*
3069  * Call back function from struct bio back up to struct buf.
3070  */
3071static void
3072bufdonebio(struct bio *bip)
3073{
3074	struct buf *bp;
3075
3076	bp = bip->bio_caller2;
3077	bp->b_resid = bp->b_bcount - bip->bio_completed;
3078	bp->b_resid = bip->bio_resid;	/* XXX: remove */
3079	bp->b_ioflags = bip->bio_flags;
3080	bp->b_error = bip->bio_error;
3081	if (bp->b_error)
3082		bp->b_ioflags |= BIO_ERROR;
3083	bufdone(bp);
3084	g_destroy_bio(bip);
3085}
3086
3087void
3088dev_strategy(struct cdev *dev, struct buf *bp)
3089{
3090	struct cdevsw *csw;
3091	struct bio *bip;
3092
3093	if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
3094		panic("b_iocmd botch");
3095	for (;;) {
3096		bip = g_new_bio();
3097		if (bip != NULL)
3098			break;
3099		/* Try again later */
3100		tsleep(&bp, PRIBIO, "dev_strat", hz/10);
3101	}
3102	bip->bio_cmd = bp->b_iocmd;
3103	bip->bio_offset = bp->b_iooffset;
3104	bip->bio_length = bp->b_bcount;
3105	bip->bio_bcount = bp->b_bcount;	/* XXX: remove */
3106	bip->bio_data = bp->b_data;
3107	bip->bio_done = bufdonebio;
3108	bip->bio_caller2 = bp;
3109	bip->bio_dev = dev;
3110	KASSERT(dev->si_refcount > 0,
3111	    ("dev_strategy on un-referenced struct cdev *(%s)",
3112	    devtoname(dev)));
3113	csw = dev_refthread(dev);
3114	if (csw == NULL) {
3115		g_destroy_bio(bip);
3116		bp->b_error = ENXIO;
3117		bp->b_ioflags = BIO_ERROR;
3118		bufdone(bp);
3119		return;
3120	}
3121	(*csw->d_strategy)(bip);
3122	dev_relthread(dev);
3123}
3124
3125/*
3126 *	bufdone:
3127 *
3128 *	Finish I/O on a buffer, optionally calling a completion function.
3129 *	This is usually called from an interrupt so process blocking is
3130 *	not allowed.
3131 *
3132 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3133 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3134 *	assuming B_INVAL is clear.
3135 *
3136 *	For the VMIO case, we set B_CACHE if the op was a read and no
3137 *	read error occured, or if the op was a write.  B_CACHE is never
3138 *	set if the buffer is invalid or otherwise uncacheable.
3139 *
3140 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
3141 *	initiator to leave B_INVAL set to brelse the buffer out of existance
3142 *	in the biodone routine.
3143 */
3144void
3145bufdone(struct buf *bp)
3146{
3147	struct bufobj *dropobj;
3148	void    (*biodone)(struct buf *);
3149
3150	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3151	dropobj = NULL;
3152
3153	KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
3154	    BUF_REFCNT(bp)));
3155	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
3156
3157	runningbufwakeup(bp);
3158	if (bp->b_iocmd == BIO_WRITE)
3159		dropobj = bp->b_bufobj;
3160	/* call optional completion function if requested */
3161	if (bp->b_iodone != NULL) {
3162		biodone = bp->b_iodone;
3163		bp->b_iodone = NULL;
3164		(*biodone) (bp);
3165		if (dropobj)
3166			bufobj_wdrop(dropobj);
3167		return;
3168	}
3169
3170	bufdone_finish(bp);
3171
3172	if (dropobj)
3173		bufobj_wdrop(dropobj);
3174}
3175
3176void
3177bufdone_finish(struct buf *bp)
3178{
3179	KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
3180	    BUF_REFCNT(bp)));
3181
3182	if (!LIST_EMPTY(&bp->b_dep))
3183		buf_complete(bp);
3184
3185	if (bp->b_flags & B_VMIO) {
3186		int i;
3187		vm_ooffset_t foff;
3188		vm_page_t m;
3189		vm_object_t obj;
3190		int iosize;
3191		struct vnode *vp = bp->b_vp;
3192		boolean_t are_queues_locked;
3193
3194		obj = bp->b_bufobj->bo_object;
3195
3196#if defined(VFS_BIO_DEBUG)
3197		mp_fixme("usecount and vflag accessed without locks.");
3198		if (vp->v_usecount == 0) {
3199			panic("biodone: zero vnode ref count");
3200		}
3201
3202		KASSERT(vp->v_object != NULL,
3203			("biodone: vnode %p has no vm_object", vp));
3204#endif
3205
3206		foff = bp->b_offset;
3207		KASSERT(bp->b_offset != NOOFFSET,
3208		    ("biodone: no buffer offset"));
3209
3210		VM_OBJECT_LOCK(obj);
3211#if defined(VFS_BIO_DEBUG)
3212		if (obj->paging_in_progress < bp->b_npages) {
3213			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
3214			    obj->paging_in_progress, bp->b_npages);
3215		}
3216#endif
3217
3218		/*
3219		 * Set B_CACHE if the op was a normal read and no error
3220		 * occured.  B_CACHE is set for writes in the b*write()
3221		 * routines.
3222		 */
3223		iosize = bp->b_bcount - bp->b_resid;
3224		if (bp->b_iocmd == BIO_READ &&
3225		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
3226		    !(bp->b_ioflags & BIO_ERROR)) {
3227			bp->b_flags |= B_CACHE;
3228		}
3229		if (bp->b_iocmd == BIO_READ) {
3230			vm_page_lock_queues();
3231			are_queues_locked = TRUE;
3232		} else
3233			are_queues_locked = FALSE;
3234		for (i = 0; i < bp->b_npages; i++) {
3235			int bogusflag = 0;
3236			int resid;
3237
3238			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3239			if (resid > iosize)
3240				resid = iosize;
3241
3242			/*
3243			 * cleanup bogus pages, restoring the originals
3244			 */
3245			m = bp->b_pages[i];
3246			if (m == bogus_page) {
3247				bogusflag = 1;
3248				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3249				if (m == NULL)
3250					panic("biodone: page disappeared!");
3251				bp->b_pages[i] = m;
3252				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3253				    bp->b_pages, bp->b_npages);
3254			}
3255#if defined(VFS_BIO_DEBUG)
3256			if (OFF_TO_IDX(foff) != m->pindex) {
3257				printf(
3258"biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
3259				    (intmax_t)foff, (uintmax_t)m->pindex);
3260			}
3261#endif
3262
3263			/*
3264			 * In the write case, the valid and clean bits are
3265			 * already changed correctly ( see bdwrite() ), so we
3266			 * only need to do this here in the read case.
3267			 */
3268			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
3269				vfs_page_set_valid(bp, foff, m);
3270			}
3271
3272			/*
3273			 * when debugging new filesystems or buffer I/O methods, this
3274			 * is the most common error that pops up.  if you see this, you
3275			 * have not set the page busy flag correctly!!!
3276			 */
3277			if (m->busy == 0) {
3278				printf("biodone: page busy < 0, "
3279				    "pindex: %d, foff: 0x(%x,%x), "
3280				    "resid: %d, index: %d\n",
3281				    (int) m->pindex, (int)(foff >> 32),
3282						(int) foff & 0xffffffff, resid, i);
3283				if (!vn_isdisk(vp, NULL))
3284					printf(" iosize: %jd, lblkno: %jd, flags: 0x%x, npages: %d\n",
3285					    (intmax_t)bp->b_vp->v_mount->mnt_stat.f_iosize,
3286					    (intmax_t) bp->b_lblkno,
3287					    bp->b_flags, bp->b_npages);
3288				else
3289					printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n",
3290					    (intmax_t) bp->b_lblkno,
3291					    bp->b_flags, bp->b_npages);
3292				printf(" valid: 0x%lx, dirty: 0x%lx, wired: %d\n",
3293				    (u_long)m->valid, (u_long)m->dirty,
3294				    m->wire_count);
3295				panic("biodone: page busy < 0\n");
3296			}
3297			vm_page_io_finish(m);
3298			vm_object_pip_subtract(obj, 1);
3299			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3300			iosize -= resid;
3301		}
3302		if (are_queues_locked)
3303			vm_page_unlock_queues();
3304		vm_object_pip_wakeupn(obj, 0);
3305		VM_OBJECT_UNLOCK(obj);
3306	}
3307
3308	/*
3309	 * For asynchronous completions, release the buffer now. The brelse
3310	 * will do a wakeup there if necessary - so no need to do a wakeup
3311	 * here in the async case. The sync case always needs to do a wakeup.
3312	 */
3313
3314	if (bp->b_flags & B_ASYNC) {
3315		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
3316			brelse(bp);
3317		else
3318			bqrelse(bp);
3319	} else
3320		bdone(bp);
3321}
3322
3323/*
3324 * This routine is called in lieu of iodone in the case of
3325 * incomplete I/O.  This keeps the busy status for pages
3326 * consistant.
3327 */
3328void
3329vfs_unbusy_pages(struct buf *bp)
3330{
3331	int i;
3332	vm_object_t obj;
3333	vm_page_t m;
3334
3335	runningbufwakeup(bp);
3336	if (!(bp->b_flags & B_VMIO))
3337		return;
3338
3339	obj = bp->b_bufobj->bo_object;
3340	VM_OBJECT_LOCK(obj);
3341	for (i = 0; i < bp->b_npages; i++) {
3342		m = bp->b_pages[i];
3343		if (m == bogus_page) {
3344			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3345			if (!m)
3346				panic("vfs_unbusy_pages: page missing\n");
3347			bp->b_pages[i] = m;
3348			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3349			    bp->b_pages, bp->b_npages);
3350		}
3351		vm_object_pip_subtract(obj, 1);
3352		vm_page_io_finish(m);
3353	}
3354	vm_object_pip_wakeupn(obj, 0);
3355	VM_OBJECT_UNLOCK(obj);
3356}
3357
3358/*
3359 * vfs_page_set_valid:
3360 *
3361 *	Set the valid bits in a page based on the supplied offset.   The
3362 *	range is restricted to the buffer's size.
3363 *
3364 *	This routine is typically called after a read completes.
3365 */
3366static void
3367vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3368{
3369	vm_ooffset_t soff, eoff;
3370
3371	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3372	/*
3373	 * Start and end offsets in buffer.  eoff - soff may not cross a
3374	 * page boundry or cross the end of the buffer.  The end of the
3375	 * buffer, in this case, is our file EOF, not the allocation size
3376	 * of the buffer.
3377	 */
3378	soff = off;
3379	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3380	if (eoff > bp->b_offset + bp->b_bcount)
3381		eoff = bp->b_offset + bp->b_bcount;
3382
3383	/*
3384	 * Set valid range.  This is typically the entire buffer and thus the
3385	 * entire page.
3386	 */
3387	if (eoff > soff) {
3388		vm_page_set_validclean(
3389		    m,
3390		   (vm_offset_t) (soff & PAGE_MASK),
3391		   (vm_offset_t) (eoff - soff)
3392		);
3393	}
3394}
3395
3396/*
3397 * This routine is called before a device strategy routine.
3398 * It is used to tell the VM system that paging I/O is in
3399 * progress, and treat the pages associated with the buffer
3400 * almost as being VPO_BUSY.  Also the object paging_in_progress
3401 * flag is handled to make sure that the object doesn't become
3402 * inconsistant.
3403 *
3404 * Since I/O has not been initiated yet, certain buffer flags
3405 * such as BIO_ERROR or B_INVAL may be in an inconsistant state
3406 * and should be ignored.
3407 */
3408void
3409vfs_busy_pages(struct buf *bp, int clear_modify)
3410{
3411	int i, bogus;
3412	vm_object_t obj;
3413	vm_ooffset_t foff;
3414	vm_page_t m;
3415
3416	if (!(bp->b_flags & B_VMIO))
3417		return;
3418
3419	obj = bp->b_bufobj->bo_object;
3420	foff = bp->b_offset;
3421	KASSERT(bp->b_offset != NOOFFSET,
3422	    ("vfs_busy_pages: no buffer offset"));
3423	VM_OBJECT_LOCK(obj);
3424	if (bp->b_bufsize != 0)
3425		vfs_setdirty_locked_object(bp);
3426retry:
3427	for (i = 0; i < bp->b_npages; i++) {
3428		m = bp->b_pages[i];
3429
3430		if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
3431			goto retry;
3432	}
3433	bogus = 0;
3434	vm_page_lock_queues();
3435	for (i = 0; i < bp->b_npages; i++) {
3436		m = bp->b_pages[i];
3437
3438		if ((bp->b_flags & B_CLUSTER) == 0) {
3439			vm_object_pip_add(obj, 1);
3440			vm_page_io_start(m);
3441		}
3442		/*
3443		 * When readying a buffer for a read ( i.e
3444		 * clear_modify == 0 ), it is important to do
3445		 * bogus_page replacement for valid pages in
3446		 * partially instantiated buffers.  Partially
3447		 * instantiated buffers can, in turn, occur when
3448		 * reconstituting a buffer from its VM backing store
3449		 * base.  We only have to do this if B_CACHE is
3450		 * clear ( which causes the I/O to occur in the
3451		 * first place ).  The replacement prevents the read
3452		 * I/O from overwriting potentially dirty VM-backed
3453		 * pages.  XXX bogus page replacement is, uh, bogus.
3454		 * It may not work properly with small-block devices.
3455		 * We need to find a better way.
3456		 */
3457		pmap_remove_all(m);
3458		if (clear_modify)
3459			vfs_page_set_valid(bp, foff, m);
3460		else if (m->valid == VM_PAGE_BITS_ALL &&
3461		    (bp->b_flags & B_CACHE) == 0) {
3462			bp->b_pages[i] = bogus_page;
3463			bogus++;
3464		}
3465		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3466	}
3467	vm_page_unlock_queues();
3468	VM_OBJECT_UNLOCK(obj);
3469	if (bogus)
3470		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3471		    bp->b_pages, bp->b_npages);
3472}
3473
3474/*
3475 * Tell the VM system that the pages associated with this buffer
3476 * are clean.  This is used for delayed writes where the data is
3477 * going to go to disk eventually without additional VM intevention.
3478 *
3479 * Note that while we only really need to clean through to b_bcount, we
3480 * just go ahead and clean through to b_bufsize.
3481 */
3482static void
3483vfs_clean_pages(struct buf *bp)
3484{
3485	int i;
3486	vm_ooffset_t foff, noff, eoff;
3487	vm_page_t m;
3488
3489	if (!(bp->b_flags & B_VMIO))
3490		return;
3491
3492	foff = bp->b_offset;
3493	KASSERT(bp->b_offset != NOOFFSET,
3494	    ("vfs_clean_pages: no buffer offset"));
3495	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3496	vm_page_lock_queues();
3497	for (i = 0; i < bp->b_npages; i++) {
3498		m = bp->b_pages[i];
3499		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3500		eoff = noff;
3501
3502		if (eoff > bp->b_offset + bp->b_bufsize)
3503			eoff = bp->b_offset + bp->b_bufsize;
3504		vfs_page_set_valid(bp, foff, m);
3505		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3506		foff = noff;
3507	}
3508	vm_page_unlock_queues();
3509	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3510}
3511
3512/*
3513 *	vfs_bio_set_validclean:
3514 *
3515 *	Set the range within the buffer to valid and clean.  The range is
3516 *	relative to the beginning of the buffer, b_offset.  Note that b_offset
3517 *	itself may be offset from the beginning of the first page.
3518 *
3519 */
3520
3521void
3522vfs_bio_set_validclean(struct buf *bp, int base, int size)
3523{
3524	int i, n;
3525	vm_page_t m;
3526
3527	if (!(bp->b_flags & B_VMIO))
3528		return;
3529	/*
3530	 * Fixup base to be relative to beginning of first page.
3531	 * Set initial n to be the maximum number of bytes in the
3532	 * first page that can be validated.
3533	 */
3534
3535	base += (bp->b_offset & PAGE_MASK);
3536	n = PAGE_SIZE - (base & PAGE_MASK);
3537
3538	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3539	vm_page_lock_queues();
3540	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
3541		m = bp->b_pages[i];
3542		if (n > size)
3543			n = size;
3544		vm_page_set_validclean(m, base & PAGE_MASK, n);
3545		base += n;
3546		size -= n;
3547		n = PAGE_SIZE;
3548	}
3549	vm_page_unlock_queues();
3550	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3551}
3552
3553/*
3554 *	vfs_bio_clrbuf:
3555 *
3556 *	clear a buffer.  This routine essentially fakes an I/O, so we need
3557 *	to clear BIO_ERROR and B_INVAL.
3558 *
3559 *	Note that while we only theoretically need to clear through b_bcount,
3560 *	we go ahead and clear through b_bufsize.
3561 */
3562
3563void
3564vfs_bio_clrbuf(struct buf *bp)
3565{
3566	int i, j, mask = 0;
3567	caddr_t sa, ea;
3568
3569	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
3570		clrbuf(bp);
3571		return;
3572	}
3573
3574	bp->b_flags &= ~B_INVAL;
3575	bp->b_ioflags &= ~BIO_ERROR;
3576	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3577	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
3578	    (bp->b_offset & PAGE_MASK) == 0) {
3579		if (bp->b_pages[0] == bogus_page)
3580			goto unlock;
3581		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
3582		VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
3583		if ((bp->b_pages[0]->valid & mask) == mask)
3584			goto unlock;
3585		if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
3586		    ((bp->b_pages[0]->valid & mask) == 0)) {
3587			bzero(bp->b_data, bp->b_bufsize);
3588			bp->b_pages[0]->valid |= mask;
3589			goto unlock;
3590		}
3591	}
3592	ea = sa = bp->b_data;
3593	for(i = 0; i < bp->b_npages; i++, sa = ea) {
3594		ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
3595		ea = (caddr_t)(vm_offset_t)ulmin(
3596		    (u_long)(vm_offset_t)ea,
3597		    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
3598		if (bp->b_pages[i] == bogus_page)
3599			continue;
3600		j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
3601		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
3602		VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
3603		if ((bp->b_pages[i]->valid & mask) == mask)
3604			continue;
3605		if ((bp->b_pages[i]->valid & mask) == 0) {
3606			if ((bp->b_pages[i]->flags & PG_ZERO) == 0)
3607				bzero(sa, ea - sa);
3608		} else {
3609			for (; sa < ea; sa += DEV_BSIZE, j++) {
3610				if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
3611				    (bp->b_pages[i]->valid & (1 << j)) == 0)
3612					bzero(sa, DEV_BSIZE);
3613			}
3614		}
3615		bp->b_pages[i]->valid |= mask;
3616	}
3617unlock:
3618	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3619	bp->b_resid = 0;
3620}
3621
3622/*
3623 * vm_hold_load_pages and vm_hold_free_pages get pages into
3624 * a buffers address space.  The pages are anonymous and are
3625 * not associated with a file object.
3626 */
3627static void
3628vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
3629{
3630	vm_offset_t pg;
3631	vm_page_t p;
3632	int index;
3633
3634	to = round_page(to);
3635	from = round_page(from);
3636	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3637
3638	VM_OBJECT_LOCK(kernel_object);
3639	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3640tryagain:
3641		/*
3642		 * note: must allocate system pages since blocking here
3643		 * could intefere with paging I/O, no matter which
3644		 * process we are.
3645		 */
3646		p = vm_page_alloc(kernel_object,
3647			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
3648		    VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
3649		if (!p) {
3650			atomic_add_int(&vm_pageout_deficit,
3651			    (to - pg) >> PAGE_SHIFT);
3652			VM_OBJECT_UNLOCK(kernel_object);
3653			VM_WAIT;
3654			VM_OBJECT_LOCK(kernel_object);
3655			goto tryagain;
3656		}
3657		p->valid = VM_PAGE_BITS_ALL;
3658		pmap_qenter(pg, &p, 1);
3659		bp->b_pages[index] = p;
3660	}
3661	VM_OBJECT_UNLOCK(kernel_object);
3662	bp->b_npages = index;
3663}
3664
3665/* Return pages associated with this buf to the vm system */
3666static void
3667vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
3668{
3669	vm_offset_t pg;
3670	vm_page_t p;
3671	int index, newnpages;
3672
3673	from = round_page(from);
3674	to = round_page(to);
3675	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3676
3677	VM_OBJECT_LOCK(kernel_object);
3678	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3679		p = bp->b_pages[index];
3680		if (p && (index < bp->b_npages)) {
3681			if (p->busy) {
3682				printf(
3683			    "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
3684				    (intmax_t)bp->b_blkno,
3685				    (intmax_t)bp->b_lblkno);
3686			}
3687			bp->b_pages[index] = NULL;
3688			pmap_qremove(pg, 1);
3689			vm_page_lock_queues();
3690			vm_page_unwire(p, 0);
3691			vm_page_free(p);
3692			vm_page_unlock_queues();
3693		}
3694	}
3695	VM_OBJECT_UNLOCK(kernel_object);
3696	bp->b_npages = newnpages;
3697}
3698
3699/*
3700 * Map an IO request into kernel virtual address space.
3701 *
3702 * All requests are (re)mapped into kernel VA space.
3703 * Notice that we use b_bufsize for the size of the buffer
3704 * to be mapped.  b_bcount might be modified by the driver.
3705 *
3706 * Note that even if the caller determines that the address space should
3707 * be valid, a race or a smaller-file mapped into a larger space may
3708 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
3709 * check the return value.
3710 */
3711int
3712vmapbuf(struct buf *bp)
3713{
3714	caddr_t addr, kva;
3715	vm_prot_t prot;
3716	int pidx, i;
3717	struct vm_page *m;
3718	struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
3719
3720	if (bp->b_bufsize < 0)
3721		return (-1);
3722	prot = VM_PROT_READ;
3723	if (bp->b_iocmd == BIO_READ)
3724		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
3725	for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
3726	     addr < bp->b_data + bp->b_bufsize;
3727	     addr += PAGE_SIZE, pidx++) {
3728		/*
3729		 * Do the vm_fault if needed; do the copy-on-write thing
3730		 * when reading stuff off device into memory.
3731		 *
3732		 * NOTE! Must use pmap_extract() because addr may be in
3733		 * the userland address space, and kextract is only guarenteed
3734		 * to work for the kernland address space (see: sparc64 port).
3735		 */
3736retry:
3737		if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data,
3738		    prot) < 0) {
3739			vm_page_lock_queues();
3740			for (i = 0; i < pidx; ++i) {
3741				vm_page_unhold(bp->b_pages[i]);
3742				bp->b_pages[i] = NULL;
3743			}
3744			vm_page_unlock_queues();
3745			return(-1);
3746		}
3747		m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot);
3748		if (m == NULL)
3749			goto retry;
3750		bp->b_pages[pidx] = m;
3751	}
3752	if (pidx > btoc(MAXPHYS))
3753		panic("vmapbuf: mapped more than MAXPHYS");
3754	pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
3755
3756	kva = bp->b_saveaddr;
3757	bp->b_npages = pidx;
3758	bp->b_saveaddr = bp->b_data;
3759	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
3760	return(0);
3761}
3762
3763/*
3764 * Free the io map PTEs associated with this IO operation.
3765 * We also invalidate the TLB entries and restore the original b_addr.
3766 */
3767void
3768vunmapbuf(struct buf *bp)
3769{
3770	int pidx;
3771	int npages;
3772
3773	npages = bp->b_npages;
3774	pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
3775	vm_page_lock_queues();
3776	for (pidx = 0; pidx < npages; pidx++)
3777		vm_page_unhold(bp->b_pages[pidx]);
3778	vm_page_unlock_queues();
3779
3780	bp->b_data = bp->b_saveaddr;
3781}
3782
3783void
3784bdone(struct buf *bp)
3785{
3786
3787	mtx_lock(&bdonelock);
3788	bp->b_flags |= B_DONE;
3789	wakeup(bp);
3790	mtx_unlock(&bdonelock);
3791}
3792
3793void
3794bwait(struct buf *bp, u_char pri, const char *wchan)
3795{
3796
3797	mtx_lock(&bdonelock);
3798	while ((bp->b_flags & B_DONE) == 0)
3799		msleep(bp, &bdonelock, pri, wchan, 0);
3800	mtx_unlock(&bdonelock);
3801}
3802
3803int
3804bufsync(struct bufobj *bo, int waitfor, struct thread *td)
3805{
3806
3807	return (VOP_FSYNC(bo->__bo_vnode, waitfor, td));
3808}
3809
3810void
3811bufstrategy(struct bufobj *bo, struct buf *bp)
3812{
3813	int i = 0;
3814	struct vnode *vp;
3815
3816	vp = bp->b_vp;
3817	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
3818	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
3819	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
3820	i = VOP_STRATEGY(vp, bp);
3821	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
3822}
3823
3824void
3825bufobj_wrefl(struct bufobj *bo)
3826{
3827
3828	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3829	ASSERT_BO_LOCKED(bo);
3830	bo->bo_numoutput++;
3831}
3832
3833void
3834bufobj_wref(struct bufobj *bo)
3835{
3836
3837	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3838	BO_LOCK(bo);
3839	bo->bo_numoutput++;
3840	BO_UNLOCK(bo);
3841}
3842
3843void
3844bufobj_wdrop(struct bufobj *bo)
3845{
3846
3847	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
3848	BO_LOCK(bo);
3849	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
3850	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
3851		bo->bo_flag &= ~BO_WWAIT;
3852		wakeup(&bo->bo_numoutput);
3853	}
3854	BO_UNLOCK(bo);
3855}
3856
3857int
3858bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
3859{
3860	int error;
3861
3862	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
3863	ASSERT_BO_LOCKED(bo);
3864	error = 0;
3865	while (bo->bo_numoutput) {
3866		bo->bo_flag |= BO_WWAIT;
3867		error = msleep(&bo->bo_numoutput, BO_MTX(bo),
3868		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
3869		if (error)
3870			break;
3871	}
3872	return (error);
3873}
3874
3875void
3876bpin(struct buf *bp)
3877{
3878	mtx_lock(&bpinlock);
3879	bp->b_pin_count++;
3880	mtx_unlock(&bpinlock);
3881}
3882
3883void
3884bunpin(struct buf *bp)
3885{
3886	mtx_lock(&bpinlock);
3887	if (--bp->b_pin_count == 0)
3888		wakeup(bp);
3889	mtx_unlock(&bpinlock);
3890}
3891
3892void
3893bunpin_wait(struct buf *bp)
3894{
3895	mtx_lock(&bpinlock);
3896	while (bp->b_pin_count > 0)
3897		msleep(bp, &bpinlock, PRIBIO, "bwunpin", 0);
3898	mtx_unlock(&bpinlock);
3899}
3900
3901#include "opt_ddb.h"
3902#ifdef DDB
3903#include <ddb/ddb.h>
3904
3905/* DDB command to show buffer data */
3906DB_SHOW_COMMAND(buffer, db_show_buffer)
3907{
3908	/* get args */
3909	struct buf *bp = (struct buf *)addr;
3910
3911	if (!have_addr) {
3912		db_printf("usage: show buffer <addr>\n");
3913		return;
3914	}
3915
3916	db_printf("buf at %p\n", bp);
3917	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
3918	db_printf(
3919	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
3920	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd\n",
3921	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
3922	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno);
3923	if (bp->b_npages) {
3924		int i;
3925		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
3926		for (i = 0; i < bp->b_npages; i++) {
3927			vm_page_t m;
3928			m = bp->b_pages[i];
3929			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
3930			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
3931			if ((i + 1) < bp->b_npages)
3932				db_printf(",");
3933		}
3934		db_printf("\n");
3935	}
3936	lockmgr_printinfo(&bp->b_lock);
3937}
3938
3939DB_SHOW_COMMAND(lockedbufs, lockedbufs)
3940{
3941	struct buf *bp;
3942	int i;
3943
3944	for (i = 0; i < nbuf; i++) {
3945		bp = &buf[i];
3946		if (lockcount(&bp->b_lock)) {
3947			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
3948			db_printf("\n");
3949		}
3950	}
3951}
3952#endif /* DDB */
3953