1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2004 Poul-Henning Kamp
5 * Copyright (c) 1994,1997 John S. Dyson
6 * Copyright (c) 2013 The FreeBSD Foundation
7 * All rights reserved.
8 *
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34/*
35 * this file contains a new buffer I/O scheme implementing a coherent
36 * VM object and buffer cache scheme.  Pains have been taken to make
37 * sure that the performance degradation associated with schemes such
38 * as this is not realized.
39 *
40 * Author:  John S. Dyson
41 * Significant help during the development and debugging phases
42 * had been provided by David Greenman, also of the FreeBSD core team.
43 *
44 * see man buf(9) for more info.
45 */
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/asan.h>
50#include <sys/bio.h>
51#include <sys/bitset.h>
52#include <sys/boottrace.h>
53#include <sys/buf.h>
54#include <sys/conf.h>
55#include <sys/counter.h>
56#include <sys/devicestat.h>
57#include <sys/eventhandler.h>
58#include <sys/fail.h>
59#include <sys/ktr.h>
60#include <sys/limits.h>
61#include <sys/lock.h>
62#include <sys/malloc.h>
63#include <sys/memdesc.h>
64#include <sys/mount.h>
65#include <sys/mutex.h>
66#include <sys/kernel.h>
67#include <sys/kthread.h>
68#include <sys/pctrie.h>
69#include <sys/proc.h>
70#include <sys/racct.h>
71#include <sys/refcount.h>
72#include <sys/resourcevar.h>
73#include <sys/rwlock.h>
74#include <sys/sched.h>
75#include <sys/smp.h>
76#include <sys/sysctl.h>
77#include <sys/syscallsubr.h>
78#include <sys/vmem.h>
79#include <sys/vmmeter.h>
80#include <sys/vnode.h>
81#include <sys/watchdog.h>
82#include <geom/geom.h>
83#include <vm/vm.h>
84#include <vm/vm_param.h>
85#include <vm/vm_kern.h>
86#include <vm/vm_object.h>
87#include <vm/vm_page.h>
88#include <vm/vm_pageout.h>
89#include <vm/vm_pager.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_map.h>
92#include <vm/swap_pager.h>
93
94static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
95
96struct	bio_ops bioops;		/* I/O operation notification */
97
98struct	buf_ops buf_ops_bio = {
99	.bop_name	=	"buf_ops_bio",
100	.bop_write	=	bufwrite,
101	.bop_strategy	=	bufstrategy,
102	.bop_sync	=	bufsync,
103	.bop_bdflush	=	bufbdflush,
104};
105
106struct bufqueue {
107	struct mtx_padalign	bq_lock;
108	TAILQ_HEAD(, buf)	bq_queue;
109	uint8_t			bq_index;
110	uint16_t		bq_subqueue;
111	int			bq_len;
112} __aligned(CACHE_LINE_SIZE);
113
114#define	BQ_LOCKPTR(bq)		(&(bq)->bq_lock)
115#define	BQ_LOCK(bq)		mtx_lock(BQ_LOCKPTR((bq)))
116#define	BQ_UNLOCK(bq)		mtx_unlock(BQ_LOCKPTR((bq)))
117#define	BQ_ASSERT_LOCKED(bq)	mtx_assert(BQ_LOCKPTR((bq)), MA_OWNED)
118
119struct bufdomain {
120	struct bufqueue	*bd_subq;
121	struct bufqueue bd_dirtyq;
122	struct bufqueue	*bd_cleanq;
123	struct mtx_padalign bd_run_lock;
124	/* Constants */
125	long		bd_maxbufspace;
126	long		bd_hibufspace;
127	long 		bd_lobufspace;
128	long 		bd_bufspacethresh;
129	int		bd_hifreebuffers;
130	int		bd_lofreebuffers;
131	int		bd_hidirtybuffers;
132	int		bd_lodirtybuffers;
133	int		bd_dirtybufthresh;
134	int		bd_lim;
135	/* atomics */
136	int		bd_wanted;
137	bool		bd_shutdown;
138	int __aligned(CACHE_LINE_SIZE)	bd_numdirtybuffers;
139	int __aligned(CACHE_LINE_SIZE)	bd_running;
140	long __aligned(CACHE_LINE_SIZE) bd_bufspace;
141	int __aligned(CACHE_LINE_SIZE)	bd_freebuffers;
142} __aligned(CACHE_LINE_SIZE);
143
144#define	BD_LOCKPTR(bd)		(&(bd)->bd_cleanq->bq_lock)
145#define	BD_LOCK(bd)		mtx_lock(BD_LOCKPTR((bd)))
146#define	BD_UNLOCK(bd)		mtx_unlock(BD_LOCKPTR((bd)))
147#define	BD_ASSERT_LOCKED(bd)	mtx_assert(BD_LOCKPTR((bd)), MA_OWNED)
148#define	BD_RUN_LOCKPTR(bd)	(&(bd)->bd_run_lock)
149#define	BD_RUN_LOCK(bd)		mtx_lock(BD_RUN_LOCKPTR((bd)))
150#define	BD_RUN_UNLOCK(bd)	mtx_unlock(BD_RUN_LOCKPTR((bd)))
151#define	BD_DOMAIN(bd)		(bd - bdomain)
152
153static char *buf;		/* buffer header pool */
154static struct buf *
155nbufp(unsigned i)
156{
157	return ((struct buf *)(buf + (sizeof(struct buf) +
158	    sizeof(vm_page_t) * atop(maxbcachebuf)) * i));
159}
160
161caddr_t __read_mostly unmapped_buf;
162#ifdef INVARIANTS
163caddr_t	poisoned_buf = (void *)-1;
164#endif
165
166/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
167struct proc *bufdaemonproc;
168
169static void vm_hold_free_pages(struct buf *bp, int newbsize);
170static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
171		vm_offset_t to);
172static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
173static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
174		vm_page_t m);
175static void vfs_clean_pages_dirty_buf(struct buf *bp);
176static void vfs_setdirty_range(struct buf *bp);
177static void vfs_vmio_invalidate(struct buf *bp);
178static void vfs_vmio_truncate(struct buf *bp, int npages);
179static void vfs_vmio_extend(struct buf *bp, int npages, int size);
180static int vfs_bio_clcheck(struct vnode *vp, int size,
181		daddr_t lblkno, daddr_t blkno);
182static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int,
183		void (*)(struct buf *));
184static int buf_flush(struct vnode *vp, struct bufdomain *, int);
185static int flushbufqueues(struct vnode *, struct bufdomain *, int, int);
186static void buf_daemon(void);
187static __inline void bd_wakeup(void);
188static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
189static void bufkva_reclaim(vmem_t *, int);
190static void bufkva_free(struct buf *);
191static int buf_import(void *, void **, int, int, int);
192static void buf_release(void *, void **, int);
193static void maxbcachebuf_adjust(void);
194static inline struct bufdomain *bufdomain(struct buf *);
195static void bq_remove(struct bufqueue *bq, struct buf *bp);
196static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
197static int buf_recycle(struct bufdomain *, bool kva);
198static void bq_init(struct bufqueue *bq, int qindex, int cpu,
199	    const char *lockname);
200static void bd_init(struct bufdomain *bd);
201static int bd_flushall(struct bufdomain *bd);
202static int sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS);
203static int sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS);
204
205static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
206int vmiodirenable = TRUE;
207SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
208    "Use the VM system for directory writes");
209long runningbufspace;
210SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
211    "Amount of presently outstanding async buffer io");
212SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
213    NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers");
214static counter_u64_t bufkvaspace;
215SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace,
216    "Kernel virtual memory used for buffers");
217static long maxbufspace;
218SYSCTL_PROC(_vfs, OID_AUTO, maxbufspace,
219    CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &maxbufspace,
220    __offsetof(struct bufdomain, bd_maxbufspace), sysctl_bufdomain_long, "L",
221    "Maximum allowed value of bufspace (including metadata)");
222static long bufmallocspace;
223SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
224    "Amount of malloced memory for buffers");
225static long maxbufmallocspace;
226SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
227    0, "Maximum amount of malloced memory for buffers");
228static long lobufspace;
229SYSCTL_PROC(_vfs, OID_AUTO, lobufspace,
230    CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &lobufspace,
231    __offsetof(struct bufdomain, bd_lobufspace), sysctl_bufdomain_long, "L",
232    "Minimum amount of buffers we want to have");
233long hibufspace;
234SYSCTL_PROC(_vfs, OID_AUTO, hibufspace,
235    CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &hibufspace,
236    __offsetof(struct bufdomain, bd_hibufspace), sysctl_bufdomain_long, "L",
237    "Maximum allowed value of bufspace (excluding metadata)");
238long bufspacethresh;
239SYSCTL_PROC(_vfs, OID_AUTO, bufspacethresh,
240    CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &bufspacethresh,
241    __offsetof(struct bufdomain, bd_bufspacethresh), sysctl_bufdomain_long, "L",
242    "Bufspace consumed before waking the daemon to free some");
243static counter_u64_t buffreekvacnt;
244SYSCTL_COUNTER_U64(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt,
245    "Number of times we have freed the KVA space from some buffer");
246static counter_u64_t bufdefragcnt;
247SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt,
248    "Number of times we have had to repeat buffer allocation to defragment");
249static long lorunningspace;
250SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
251    CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
252    "Minimum preferred space used for in-progress I/O");
253static long hirunningspace;
254SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
255    CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
256    "Maximum amount of space to use for in-progress I/O");
257int dirtybufferflushes;
258SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
259    0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
260int bdwriteskip;
261SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
262    0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
263int altbufferflushes;
264SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW | CTLFLAG_STATS,
265    &altbufferflushes, 0, "Number of fsync flushes to limit dirty buffers");
266static int recursiveflushes;
267SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW | CTLFLAG_STATS,
268    &recursiveflushes, 0, "Number of flushes skipped due to being recursive");
269static int sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS);
270SYSCTL_PROC(_vfs, OID_AUTO, numdirtybuffers,
271    CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RD, NULL, 0, sysctl_numdirtybuffers, "I",
272    "Number of buffers that are dirty (has unwritten changes) at the moment");
273static int lodirtybuffers;
274SYSCTL_PROC(_vfs, OID_AUTO, lodirtybuffers,
275    CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lodirtybuffers,
276    __offsetof(struct bufdomain, bd_lodirtybuffers), sysctl_bufdomain_int, "I",
277    "How many buffers we want to have free before bufdaemon can sleep");
278static int hidirtybuffers;
279SYSCTL_PROC(_vfs, OID_AUTO, hidirtybuffers,
280    CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hidirtybuffers,
281    __offsetof(struct bufdomain, bd_hidirtybuffers), sysctl_bufdomain_int, "I",
282    "When the number of dirty buffers is considered severe");
283int dirtybufthresh;
284SYSCTL_PROC(_vfs, OID_AUTO, dirtybufthresh,
285    CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &dirtybufthresh,
286    __offsetof(struct bufdomain, bd_dirtybufthresh), sysctl_bufdomain_int, "I",
287    "Number of bdwrite to bawrite conversions to clear dirty buffers");
288static int numfreebuffers;
289SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
290    "Number of free buffers");
291static int lofreebuffers;
292SYSCTL_PROC(_vfs, OID_AUTO, lofreebuffers,
293    CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lofreebuffers,
294    __offsetof(struct bufdomain, bd_lofreebuffers), sysctl_bufdomain_int, "I",
295   "Target number of free buffers");
296static int hifreebuffers;
297SYSCTL_PROC(_vfs, OID_AUTO, hifreebuffers,
298    CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hifreebuffers,
299    __offsetof(struct bufdomain, bd_hifreebuffers), sysctl_bufdomain_int, "I",
300   "Threshold for clean buffer recycling");
301static counter_u64_t getnewbufcalls;
302SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD,
303   &getnewbufcalls, "Number of calls to getnewbuf");
304static counter_u64_t getnewbufrestarts;
305SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD,
306    &getnewbufrestarts,
307    "Number of times getnewbuf has had to restart a buffer acquisition");
308static counter_u64_t mappingrestarts;
309SYSCTL_COUNTER_U64(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RD,
310    &mappingrestarts,
311    "Number of times getblk has had to restart a buffer mapping for "
312    "unmapped buffer");
313static counter_u64_t numbufallocfails;
314SYSCTL_COUNTER_U64(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW,
315    &numbufallocfails, "Number of times buffer allocations failed");
316static int flushbufqtarget = 100;
317SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
318    "Amount of work to do in flushbufqueues when helping bufdaemon");
319static counter_u64_t notbufdflushes;
320SYSCTL_COUNTER_U64(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes,
321    "Number of dirty buffer flushes done by the bufdaemon helpers");
322static long barrierwrites;
323SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW | CTLFLAG_STATS,
324    &barrierwrites, 0, "Number of barrier writes");
325SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed,
326    CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
327    &unmapped_buf_allowed, 0,
328    "Permit the use of the unmapped i/o");
329int maxbcachebuf = MAXBCACHEBUF;
330SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
331    "Maximum size of a buffer cache block");
332
333/*
334 * This lock synchronizes access to bd_request.
335 */
336static struct mtx_padalign __exclusive_cache_line bdlock;
337
338/*
339 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
340 * waitrunningbufspace().
341 */
342static struct mtx_padalign __exclusive_cache_line rbreqlock;
343
344/*
345 * Lock that protects bdirtywait.
346 */
347static struct mtx_padalign __exclusive_cache_line bdirtylock;
348
349/*
350 * bufdaemon shutdown request and sleep channel.
351 */
352static bool bd_shutdown;
353
354/*
355 * Wakeup point for bufdaemon, as well as indicator of whether it is already
356 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
357 * is idling.
358 */
359static int bd_request;
360
361/*
362 * Request for the buf daemon to write more buffers than is indicated by
363 * lodirtybuf.  This may be necessary to push out excess dependencies or
364 * defragment the address space where a simple count of the number of dirty
365 * buffers is insufficient to characterize the demand for flushing them.
366 */
367static int bd_speedupreq;
368
369/*
370 * Synchronization (sleep/wakeup) variable for active buffer space requests.
371 * Set when wait starts, cleared prior to wakeup().
372 * Used in runningbufwakeup() and waitrunningbufspace().
373 */
374static int runningbufreq;
375
376/*
377 * Synchronization for bwillwrite() waiters.
378 */
379static int bdirtywait;
380
381/*
382 * Definitions for the buffer free lists.
383 */
384#define QUEUE_NONE	0	/* on no queue */
385#define QUEUE_EMPTY	1	/* empty buffer headers */
386#define QUEUE_DIRTY	2	/* B_DELWRI buffers */
387#define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
388#define QUEUE_SENTINEL	4	/* not an queue index, but mark for sentinel */
389
390/* Maximum number of buffer domains. */
391#define	BUF_DOMAINS	8
392
393struct bufdomainset bdlodirty;		/* Domains > lodirty */
394struct bufdomainset bdhidirty;		/* Domains > hidirty */
395
396/* Configured number of clean queues. */
397static int __read_mostly buf_domains;
398
399BITSET_DEFINE(bufdomainset, BUF_DOMAINS);
400struct bufdomain __exclusive_cache_line bdomain[BUF_DOMAINS];
401struct bufqueue __exclusive_cache_line bqempty;
402
403/*
404 * per-cpu empty buffer cache.
405 */
406uma_zone_t buf_zone;
407
408static int
409sysctl_runningspace(SYSCTL_HANDLER_ARGS)
410{
411	long value;
412	int error;
413
414	value = *(long *)arg1;
415	error = sysctl_handle_long(oidp, &value, 0, req);
416	if (error != 0 || req->newptr == NULL)
417		return (error);
418	mtx_lock(&rbreqlock);
419	if (arg1 == &hirunningspace) {
420		if (value < lorunningspace)
421			error = EINVAL;
422		else
423			hirunningspace = value;
424	} else {
425		KASSERT(arg1 == &lorunningspace,
426		    ("%s: unknown arg1", __func__));
427		if (value > hirunningspace)
428			error = EINVAL;
429		else
430			lorunningspace = value;
431	}
432	mtx_unlock(&rbreqlock);
433	return (error);
434}
435
436static int
437sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS)
438{
439	int error;
440	int value;
441	int i;
442
443	value = *(int *)arg1;
444	error = sysctl_handle_int(oidp, &value, 0, req);
445	if (error != 0 || req->newptr == NULL)
446		return (error);
447	*(int *)arg1 = value;
448	for (i = 0; i < buf_domains; i++)
449		*(int *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
450		    value / buf_domains;
451
452	return (error);
453}
454
455static int
456sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS)
457{
458	long value;
459	int error;
460	int i;
461
462	value = *(long *)arg1;
463	error = sysctl_handle_long(oidp, &value, 0, req);
464	if (error != 0 || req->newptr == NULL)
465		return (error);
466	*(long *)arg1 = value;
467	for (i = 0; i < buf_domains; i++)
468		*(long *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
469		    value / buf_domains;
470
471	return (error);
472}
473
474#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
475    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
476static int
477sysctl_bufspace(SYSCTL_HANDLER_ARGS)
478{
479	long lvalue;
480	int ivalue;
481	int i;
482
483	lvalue = 0;
484	for (i = 0; i < buf_domains; i++)
485		lvalue += bdomain[i].bd_bufspace;
486	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
487		return (sysctl_handle_long(oidp, &lvalue, 0, req));
488	if (lvalue > INT_MAX)
489		/* On overflow, still write out a long to trigger ENOMEM. */
490		return (sysctl_handle_long(oidp, &lvalue, 0, req));
491	ivalue = lvalue;
492	return (sysctl_handle_int(oidp, &ivalue, 0, req));
493}
494#else
495static int
496sysctl_bufspace(SYSCTL_HANDLER_ARGS)
497{
498	long lvalue;
499	int i;
500
501	lvalue = 0;
502	for (i = 0; i < buf_domains; i++)
503		lvalue += bdomain[i].bd_bufspace;
504	return (sysctl_handle_long(oidp, &lvalue, 0, req));
505}
506#endif
507
508static int
509sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS)
510{
511	int value;
512	int i;
513
514	value = 0;
515	for (i = 0; i < buf_domains; i++)
516		value += bdomain[i].bd_numdirtybuffers;
517	return (sysctl_handle_int(oidp, &value, 0, req));
518}
519
520/*
521 *	bdirtywakeup:
522 *
523 *	Wakeup any bwillwrite() waiters.
524 */
525static void
526bdirtywakeup(void)
527{
528	mtx_lock(&bdirtylock);
529	if (bdirtywait) {
530		bdirtywait = 0;
531		wakeup(&bdirtywait);
532	}
533	mtx_unlock(&bdirtylock);
534}
535
536/*
537 *	bd_clear:
538 *
539 *	Clear a domain from the appropriate bitsets when dirtybuffers
540 *	is decremented.
541 */
542static void
543bd_clear(struct bufdomain *bd)
544{
545
546	mtx_lock(&bdirtylock);
547	if (bd->bd_numdirtybuffers <= bd->bd_lodirtybuffers)
548		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
549	if (bd->bd_numdirtybuffers <= bd->bd_hidirtybuffers)
550		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
551	mtx_unlock(&bdirtylock);
552}
553
554/*
555 *	bd_set:
556 *
557 *	Set a domain in the appropriate bitsets when dirtybuffers
558 *	is incremented.
559 */
560static void
561bd_set(struct bufdomain *bd)
562{
563
564	mtx_lock(&bdirtylock);
565	if (bd->bd_numdirtybuffers > bd->bd_lodirtybuffers)
566		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
567	if (bd->bd_numdirtybuffers > bd->bd_hidirtybuffers)
568		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
569	mtx_unlock(&bdirtylock);
570}
571
572/*
573 *	bdirtysub:
574 *
575 *	Decrement the numdirtybuffers count by one and wakeup any
576 *	threads blocked in bwillwrite().
577 */
578static void
579bdirtysub(struct buf *bp)
580{
581	struct bufdomain *bd;
582	int num;
583
584	bd = bufdomain(bp);
585	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, -1);
586	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
587		bdirtywakeup();
588	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
589		bd_clear(bd);
590}
591
592/*
593 *	bdirtyadd:
594 *
595 *	Increment the numdirtybuffers count by one and wakeup the buf
596 *	daemon if needed.
597 */
598static void
599bdirtyadd(struct buf *bp)
600{
601	struct bufdomain *bd;
602	int num;
603
604	/*
605	 * Only do the wakeup once as we cross the boundary.  The
606	 * buf daemon will keep running until the condition clears.
607	 */
608	bd = bufdomain(bp);
609	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, 1);
610	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
611		bd_wakeup();
612	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
613		bd_set(bd);
614}
615
616/*
617 *	bufspace_daemon_wakeup:
618 *
619 *	Wakeup the daemons responsible for freeing clean bufs.
620 */
621static void
622bufspace_daemon_wakeup(struct bufdomain *bd)
623{
624
625	/*
626	 * avoid the lock if the daemon is running.
627	 */
628	if (atomic_fetchadd_int(&bd->bd_running, 1) == 0) {
629		BD_RUN_LOCK(bd);
630		atomic_store_int(&bd->bd_running, 1);
631		wakeup(&bd->bd_running);
632		BD_RUN_UNLOCK(bd);
633	}
634}
635
636/*
637 *	bufspace_adjust:
638 *
639 *	Adjust the reported bufspace for a KVA managed buffer, possibly
640 * 	waking any waiters.
641 */
642static void
643bufspace_adjust(struct buf *bp, int bufsize)
644{
645	struct bufdomain *bd;
646	long space;
647	int diff;
648
649	KASSERT((bp->b_flags & B_MALLOC) == 0,
650	    ("bufspace_adjust: malloc buf %p", bp));
651	bd = bufdomain(bp);
652	diff = bufsize - bp->b_bufsize;
653	if (diff < 0) {
654		atomic_subtract_long(&bd->bd_bufspace, -diff);
655	} else if (diff > 0) {
656		space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
657		/* Wake up the daemon on the transition. */
658		if (space < bd->bd_bufspacethresh &&
659		    space + diff >= bd->bd_bufspacethresh)
660			bufspace_daemon_wakeup(bd);
661	}
662	bp->b_bufsize = bufsize;
663}
664
665/*
666 *	bufspace_reserve:
667 *
668 *	Reserve bufspace before calling allocbuf().  metadata has a
669 *	different space limit than data.
670 */
671static int
672bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
673{
674	long limit, new;
675	long space;
676
677	if (metadata)
678		limit = bd->bd_maxbufspace;
679	else
680		limit = bd->bd_hibufspace;
681	space = atomic_fetchadd_long(&bd->bd_bufspace, size);
682	new = space + size;
683	if (new > limit) {
684		atomic_subtract_long(&bd->bd_bufspace, size);
685		return (ENOSPC);
686	}
687
688	/* Wake up the daemon on the transition. */
689	if (space < bd->bd_bufspacethresh && new >= bd->bd_bufspacethresh)
690		bufspace_daemon_wakeup(bd);
691
692	return (0);
693}
694
695/*
696 *	bufspace_release:
697 *
698 *	Release reserved bufspace after bufspace_adjust() has consumed it.
699 */
700static void
701bufspace_release(struct bufdomain *bd, int size)
702{
703
704	atomic_subtract_long(&bd->bd_bufspace, size);
705}
706
707/*
708 *	bufspace_wait:
709 *
710 *	Wait for bufspace, acting as the buf daemon if a locked vnode is
711 *	supplied.  bd_wanted must be set prior to polling for space.  The
712 *	operation must be re-tried on return.
713 */
714static void
715bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags,
716    int slpflag, int slptimeo)
717{
718	struct thread *td;
719	int error, fl, norunbuf;
720
721	if ((gbflags & GB_NOWAIT_BD) != 0)
722		return;
723
724	td = curthread;
725	BD_LOCK(bd);
726	while (bd->bd_wanted) {
727		if (vp != NULL && vp->v_type != VCHR &&
728		    (td->td_pflags & TDP_BUFNEED) == 0) {
729			BD_UNLOCK(bd);
730			/*
731			 * getblk() is called with a vnode locked, and
732			 * some majority of the dirty buffers may as
733			 * well belong to the vnode.  Flushing the
734			 * buffers there would make a progress that
735			 * cannot be achieved by the buf_daemon, that
736			 * cannot lock the vnode.
737			 */
738			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
739			    (td->td_pflags & TDP_NORUNNINGBUF);
740
741			/*
742			 * Play bufdaemon.  The getnewbuf() function
743			 * may be called while the thread owns lock
744			 * for another dirty buffer for the same
745			 * vnode, which makes it impossible to use
746			 * VOP_FSYNC() there, due to the buffer lock
747			 * recursion.
748			 */
749			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
750			fl = buf_flush(vp, bd, flushbufqtarget);
751			td->td_pflags &= norunbuf;
752			BD_LOCK(bd);
753			if (fl != 0)
754				continue;
755			if (bd->bd_wanted == 0)
756				break;
757		}
758		error = msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
759		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
760		if (error != 0)
761			break;
762	}
763	BD_UNLOCK(bd);
764}
765
766static void
767bufspace_daemon_shutdown(void *arg, int howto __unused)
768{
769	struct bufdomain *bd = arg;
770	int error;
771
772	if (KERNEL_PANICKED())
773		return;
774
775	BD_RUN_LOCK(bd);
776	bd->bd_shutdown = true;
777	wakeup(&bd->bd_running);
778	error = msleep(&bd->bd_shutdown, BD_RUN_LOCKPTR(bd), 0,
779	    "bufspace_shutdown", 60 * hz);
780	BD_RUN_UNLOCK(bd);
781	if (error != 0)
782		printf("bufspacedaemon wait error: %d\n", error);
783}
784
785/*
786 *	bufspace_daemon:
787 *
788 *	buffer space management daemon.  Tries to maintain some marginal
789 *	amount of free buffer space so that requesting processes neither
790 *	block nor work to reclaim buffers.
791 */
792static void
793bufspace_daemon(void *arg)
794{
795	struct bufdomain *bd = arg;
796
797	EVENTHANDLER_REGISTER(shutdown_pre_sync, bufspace_daemon_shutdown, bd,
798	    SHUTDOWN_PRI_LAST + 100);
799
800	BD_RUN_LOCK(bd);
801	while (!bd->bd_shutdown) {
802		BD_RUN_UNLOCK(bd);
803
804		/*
805		 * Free buffers from the clean queue until we meet our
806		 * targets.
807		 *
808		 * Theory of operation:  The buffer cache is most efficient
809		 * when some free buffer headers and space are always
810		 * available to getnewbuf().  This daemon attempts to prevent
811		 * the excessive blocking and synchronization associated
812		 * with shortfall.  It goes through three phases according
813		 * demand:
814		 *
815		 * 1)	The daemon wakes up voluntarily once per-second
816		 *	during idle periods when the counters are below
817		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
818		 *
819		 * 2)	The daemon wakes up as we cross the thresholds
820		 *	ahead of any potential blocking.  This may bounce
821		 *	slightly according to the rate of consumption and
822		 *	release.
823		 *
824		 * 3)	The daemon and consumers are starved for working
825		 *	clean buffers.  This is the 'bufspace' sleep below
826		 *	which will inefficiently trade bufs with bqrelse
827		 *	until we return to condition 2.
828		 */
829		while (bd->bd_bufspace > bd->bd_lobufspace ||
830		    bd->bd_freebuffers < bd->bd_hifreebuffers) {
831			if (buf_recycle(bd, false) != 0) {
832				if (bd_flushall(bd))
833					continue;
834				/*
835				 * Speedup dirty if we've run out of clean
836				 * buffers.  This is possible in particular
837				 * because softdep may held many bufs locked
838				 * pending writes to other bufs which are
839				 * marked for delayed write, exhausting
840				 * clean space until they are written.
841				 */
842				bd_speedup();
843				BD_LOCK(bd);
844				if (bd->bd_wanted) {
845					msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
846					    PRIBIO|PDROP, "bufspace", hz/10);
847				} else
848					BD_UNLOCK(bd);
849			}
850			maybe_yield();
851		}
852
853		/*
854		 * Re-check our limits and sleep.  bd_running must be
855		 * cleared prior to checking the limits to avoid missed
856		 * wakeups.  The waker will adjust one of bufspace or
857		 * freebuffers prior to checking bd_running.
858		 */
859		BD_RUN_LOCK(bd);
860		if (bd->bd_shutdown)
861			break;
862		atomic_store_int(&bd->bd_running, 0);
863		if (bd->bd_bufspace < bd->bd_bufspacethresh &&
864		    bd->bd_freebuffers > bd->bd_lofreebuffers) {
865			msleep(&bd->bd_running, BD_RUN_LOCKPTR(bd),
866			    PRIBIO, "-", hz);
867		} else {
868			/* Avoid spurious wakeups while running. */
869			atomic_store_int(&bd->bd_running, 1);
870		}
871	}
872	wakeup(&bd->bd_shutdown);
873	BD_RUN_UNLOCK(bd);
874	kthread_exit();
875}
876
877/*
878 *	bufmallocadjust:
879 *
880 *	Adjust the reported bufspace for a malloc managed buffer, possibly
881 *	waking any waiters.
882 */
883static void
884bufmallocadjust(struct buf *bp, int bufsize)
885{
886	int diff;
887
888	KASSERT((bp->b_flags & B_MALLOC) != 0,
889	    ("bufmallocadjust: non-malloc buf %p", bp));
890	diff = bufsize - bp->b_bufsize;
891	if (diff < 0)
892		atomic_subtract_long(&bufmallocspace, -diff);
893	else
894		atomic_add_long(&bufmallocspace, diff);
895	bp->b_bufsize = bufsize;
896}
897
898/*
899 *	runningwakeup:
900 *
901 *	Wake up processes that are waiting on asynchronous writes to fall
902 *	below lorunningspace.
903 */
904static void
905runningwakeup(void)
906{
907
908	mtx_lock(&rbreqlock);
909	if (runningbufreq) {
910		runningbufreq = 0;
911		wakeup(&runningbufreq);
912	}
913	mtx_unlock(&rbreqlock);
914}
915
916/*
917 *	runningbufwakeup:
918 *
919 *	Decrement the outstanding write count according.
920 */
921void
922runningbufwakeup(struct buf *bp)
923{
924	long space, bspace;
925
926	bspace = bp->b_runningbufspace;
927	if (bspace == 0)
928		return;
929	space = atomic_fetchadd_long(&runningbufspace, -bspace);
930	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
931	    space, bspace));
932	bp->b_runningbufspace = 0;
933	/*
934	 * Only acquire the lock and wakeup on the transition from exceeding
935	 * the threshold to falling below it.
936	 */
937	if (space < lorunningspace)
938		return;
939	if (space - bspace > lorunningspace)
940		return;
941	runningwakeup();
942}
943
944/*
945 *	waitrunningbufspace()
946 *
947 *	runningbufspace is a measure of the amount of I/O currently
948 *	running.  This routine is used in async-write situations to
949 *	prevent creating huge backups of pending writes to a device.
950 *	Only asynchronous writes are governed by this function.
951 *
952 *	This does NOT turn an async write into a sync write.  It waits
953 *	for earlier writes to complete and generally returns before the
954 *	caller's write has reached the device.
955 */
956void
957waitrunningbufspace(void)
958{
959
960	mtx_lock(&rbreqlock);
961	while (runningbufspace > hirunningspace) {
962		runningbufreq = 1;
963		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
964	}
965	mtx_unlock(&rbreqlock);
966}
967
968/*
969 *	vfs_buf_test_cache:
970 *
971 *	Called when a buffer is extended.  This function clears the B_CACHE
972 *	bit if the newly extended portion of the buffer does not contain
973 *	valid data.
974 */
975static __inline void
976vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
977    vm_offset_t size, vm_page_t m)
978{
979
980	/*
981	 * This function and its results are protected by higher level
982	 * synchronization requiring vnode and buf locks to page in and
983	 * validate pages.
984	 */
985	if (bp->b_flags & B_CACHE) {
986		int base = (foff + off) & PAGE_MASK;
987		if (vm_page_is_valid(m, base, size) == 0)
988			bp->b_flags &= ~B_CACHE;
989	}
990}
991
992/* Wake up the buffer daemon if necessary */
993static void
994bd_wakeup(void)
995{
996
997	mtx_lock(&bdlock);
998	if (bd_request == 0) {
999		bd_request = 1;
1000		wakeup(&bd_request);
1001	}
1002	mtx_unlock(&bdlock);
1003}
1004
1005/*
1006 * Adjust the maxbcachbuf tunable.
1007 */
1008static void
1009maxbcachebuf_adjust(void)
1010{
1011	int i;
1012
1013	/*
1014	 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
1015	 */
1016	i = 2;
1017	while (i * 2 <= maxbcachebuf)
1018		i *= 2;
1019	maxbcachebuf = i;
1020	if (maxbcachebuf < MAXBSIZE)
1021		maxbcachebuf = MAXBSIZE;
1022	if (maxbcachebuf > maxphys)
1023		maxbcachebuf = maxphys;
1024	if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
1025		printf("maxbcachebuf=%d\n", maxbcachebuf);
1026}
1027
1028/*
1029 * bd_speedup - speedup the buffer cache flushing code
1030 */
1031void
1032bd_speedup(void)
1033{
1034	int needwake;
1035
1036	mtx_lock(&bdlock);
1037	needwake = 0;
1038	if (bd_speedupreq == 0 || bd_request == 0)
1039		needwake = 1;
1040	bd_speedupreq = 1;
1041	bd_request = 1;
1042	if (needwake)
1043		wakeup(&bd_request);
1044	mtx_unlock(&bdlock);
1045}
1046
1047#ifdef __i386__
1048#define	TRANSIENT_DENOM	5
1049#else
1050#define	TRANSIENT_DENOM 10
1051#endif
1052
1053/*
1054 * Calculating buffer cache scaling values and reserve space for buffer
1055 * headers.  This is called during low level kernel initialization and
1056 * may be called more then once.  We CANNOT write to the memory area
1057 * being reserved at this time.
1058 */
1059caddr_t
1060kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
1061{
1062	int tuned_nbuf;
1063	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
1064
1065	/*
1066	 * With KASAN or KMSAN enabled, the kernel map is shadowed.  Account for
1067	 * this when sizing maps based on the amount of physical memory
1068	 * available.
1069	 */
1070#if defined(KASAN)
1071	physmem_est = (physmem_est * KASAN_SHADOW_SCALE) /
1072	    (KASAN_SHADOW_SCALE + 1);
1073#elif defined(KMSAN)
1074	physmem_est /= 3;
1075
1076	/*
1077	 * KMSAN cannot reliably determine whether buffer data is initialized
1078	 * unless it is updated through a KVA mapping.
1079	 */
1080	unmapped_buf_allowed = 0;
1081#endif
1082
1083	/*
1084	 * physmem_est is in pages.  Convert it to kilobytes (assumes
1085	 * PAGE_SIZE is >= 1K)
1086	 */
1087	physmem_est = physmem_est * (PAGE_SIZE / 1024);
1088
1089	maxbcachebuf_adjust();
1090	/*
1091	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
1092	 * For the first 64MB of ram nominally allocate sufficient buffers to
1093	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
1094	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
1095	 * the buffer cache we limit the eventual kva reservation to
1096	 * maxbcache bytes.
1097	 *
1098	 * factor represents the 1/4 x ram conversion.
1099	 */
1100	if (nbuf == 0) {
1101		int factor = 4 * BKVASIZE / 1024;
1102
1103		nbuf = 50;
1104		if (physmem_est > 4096)
1105			nbuf += min((physmem_est - 4096) / factor,
1106			    65536 / factor);
1107		if (physmem_est > 65536)
1108			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
1109			    32 * 1024 * 1024 / (factor * 5));
1110
1111		if (maxbcache && nbuf > maxbcache / BKVASIZE)
1112			nbuf = maxbcache / BKVASIZE;
1113		tuned_nbuf = 1;
1114	} else
1115		tuned_nbuf = 0;
1116
1117	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
1118	maxbuf = (LONG_MAX / 3) / BKVASIZE;
1119	if (nbuf > maxbuf) {
1120		if (!tuned_nbuf)
1121			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
1122			    maxbuf);
1123		nbuf = maxbuf;
1124	}
1125
1126	/*
1127	 * Ideal allocation size for the transient bio submap is 10%
1128	 * of the maximal space buffer map.  This roughly corresponds
1129	 * to the amount of the buffer mapped for typical UFS load.
1130	 *
1131	 * Clip the buffer map to reserve space for the transient
1132	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
1133	 * maximum buffer map extent on the platform.
1134	 *
1135	 * The fall-back to the maxbuf in case of maxbcache unset,
1136	 * allows to not trim the buffer KVA for the architectures
1137	 * with ample KVA space.
1138	 */
1139	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
1140		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
1141		buf_sz = (long)nbuf * BKVASIZE;
1142		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
1143		    (TRANSIENT_DENOM - 1)) {
1144			/*
1145			 * There is more KVA than memory.  Do not
1146			 * adjust buffer map size, and assign the rest
1147			 * of maxbuf to transient map.
1148			 */
1149			biotmap_sz = maxbuf_sz - buf_sz;
1150		} else {
1151			/*
1152			 * Buffer map spans all KVA we could afford on
1153			 * this platform.  Give 10% (20% on i386) of
1154			 * the buffer map to the transient bio map.
1155			 */
1156			biotmap_sz = buf_sz / TRANSIENT_DENOM;
1157			buf_sz -= biotmap_sz;
1158		}
1159		if (biotmap_sz / INT_MAX > maxphys)
1160			bio_transient_maxcnt = INT_MAX;
1161		else
1162			bio_transient_maxcnt = biotmap_sz / maxphys;
1163		/*
1164		 * Artificially limit to 1024 simultaneous in-flight I/Os
1165		 * using the transient mapping.
1166		 */
1167		if (bio_transient_maxcnt > 1024)
1168			bio_transient_maxcnt = 1024;
1169		if (tuned_nbuf)
1170			nbuf = buf_sz / BKVASIZE;
1171	}
1172
1173	if (nswbuf == 0) {
1174		/*
1175		 * Pager buffers are allocated for short periods, so scale the
1176		 * number of reserved buffers based on the number of CPUs rather
1177		 * than amount of memory.
1178		 */
1179		nswbuf = min(nbuf / 4, 32 * mp_ncpus);
1180		if (nswbuf < NSWBUF_MIN)
1181			nswbuf = NSWBUF_MIN;
1182	}
1183
1184	/*
1185	 * Reserve space for the buffer cache buffers
1186	 */
1187	buf = (char *)v;
1188	v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) *
1189	    atop(maxbcachebuf)) * nbuf;
1190
1191	return (v);
1192}
1193
1194/*
1195 * Single global constant for BUF_WMESG, to avoid getting multiple
1196 * references.
1197 */
1198static const char buf_wmesg[] = "bufwait";
1199
1200/* Initialize the buffer subsystem.  Called before use of any buffers. */
1201void
1202bufinit(void)
1203{
1204	struct buf *bp;
1205	int i;
1206
1207	TSENTER();
1208	KASSERT(maxbcachebuf >= MAXBSIZE,
1209	    ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
1210	    MAXBSIZE));
1211	bq_init(&bqempty, QUEUE_EMPTY, -1, "bufq empty lock");
1212	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1213	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1214	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1215
1216	unmapped_buf = (caddr_t)kva_alloc(maxphys);
1217#ifdef INVARIANTS
1218	poisoned_buf = unmapped_buf;
1219#endif
1220
1221	/* finally, initialize each buffer header and stick on empty q */
1222	for (i = 0; i < nbuf; i++) {
1223		bp = nbufp(i);
1224		bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf));
1225		bp->b_flags = B_INVAL;
1226		bp->b_rcred = NOCRED;
1227		bp->b_wcred = NOCRED;
1228		bp->b_qindex = QUEUE_NONE;
1229		bp->b_domain = -1;
1230		bp->b_subqueue = mp_maxid + 1;
1231		bp->b_xflags = 0;
1232		bp->b_data = bp->b_kvabase = unmapped_buf;
1233		LIST_INIT(&bp->b_dep);
1234		BUF_LOCKINIT(bp, buf_wmesg);
1235		bq_insert(&bqempty, bp, false);
1236	}
1237
1238	/*
1239	 * maxbufspace is the absolute maximum amount of buffer space we are
1240	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1241	 * is nominally used by metadata.  hibufspace is the nominal maximum
1242	 * used by most other requests.  The differential is required to
1243	 * ensure that metadata deadlocks don't occur.
1244	 *
1245	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1246	 * this may result in KVM fragmentation which is not handled optimally
1247	 * by the system. XXX This is less true with vmem.  We could use
1248	 * PAGE_SIZE.
1249	 */
1250	maxbufspace = (long)nbuf * BKVASIZE;
1251	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1252	lobufspace = (hibufspace / 20) * 19; /* 95% */
1253	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1254
1255	/*
1256	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1257	 * arbitrarily and may need further tuning. It corresponds to
1258	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1259	 * which fits with many RAID controllers' tagged queuing limits.
1260	 * The lower 1 MiB limit is the historical upper limit for
1261	 * hirunningspace.
1262	 */
1263	hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1264	    16 * 1024 * 1024), 1024 * 1024);
1265	lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1266
1267	/*
1268	 * Limit the amount of malloc memory since it is wired permanently into
1269	 * the kernel space.  Even though this is accounted for in the buffer
1270	 * allocation, we don't want the malloced region to grow uncontrolled.
1271	 * The malloc scheme improves memory utilization significantly on
1272	 * average (small) directories.
1273	 */
1274	maxbufmallocspace = hibufspace / 20;
1275
1276	/*
1277	 * Reduce the chance of a deadlock occurring by limiting the number
1278	 * of delayed-write dirty buffers we allow to stack up.
1279	 */
1280	hidirtybuffers = nbuf / 4 + 20;
1281	dirtybufthresh = hidirtybuffers * 9 / 10;
1282	/*
1283	 * To support extreme low-memory systems, make sure hidirtybuffers
1284	 * cannot eat up all available buffer space.  This occurs when our
1285	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1286	 * buffer space assuming BKVASIZE'd buffers.
1287	 */
1288	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1289		hidirtybuffers >>= 1;
1290	}
1291	lodirtybuffers = hidirtybuffers / 2;
1292
1293	/*
1294	 * lofreebuffers should be sufficient to avoid stalling waiting on
1295	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1296	 * are counted as free but will be unavailable to threads executing
1297	 * on other cpus.
1298	 *
1299	 * hifreebuffers is the free target for the bufspace daemon.  This
1300	 * should be set appropriately to limit work per-iteration.
1301	 */
1302	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1303	hifreebuffers = (3 * lofreebuffers) / 2;
1304	numfreebuffers = nbuf;
1305
1306	/* Setup the kva and free list allocators. */
1307	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1308	buf_zone = uma_zcache_create("buf free cache",
1309	    sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf),
1310	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1311
1312	/*
1313	 * Size the clean queue according to the amount of buffer space.
1314	 * One queue per-256mb up to the max.  More queues gives better
1315	 * concurrency but less accurate LRU.
1316	 */
1317	buf_domains = MIN(howmany(maxbufspace, 256*1024*1024), BUF_DOMAINS);
1318	for (i = 0 ; i < buf_domains; i++) {
1319		struct bufdomain *bd;
1320
1321		bd = &bdomain[i];
1322		bd_init(bd);
1323		bd->bd_freebuffers = nbuf / buf_domains;
1324		bd->bd_hifreebuffers = hifreebuffers / buf_domains;
1325		bd->bd_lofreebuffers = lofreebuffers / buf_domains;
1326		bd->bd_bufspace = 0;
1327		bd->bd_maxbufspace = maxbufspace / buf_domains;
1328		bd->bd_hibufspace = hibufspace / buf_domains;
1329		bd->bd_lobufspace = lobufspace / buf_domains;
1330		bd->bd_bufspacethresh = bufspacethresh / buf_domains;
1331		bd->bd_numdirtybuffers = 0;
1332		bd->bd_hidirtybuffers = hidirtybuffers / buf_domains;
1333		bd->bd_lodirtybuffers = lodirtybuffers / buf_domains;
1334		bd->bd_dirtybufthresh = dirtybufthresh / buf_domains;
1335		/* Don't allow more than 2% of bufs in the per-cpu caches. */
1336		bd->bd_lim = nbuf / buf_domains / 50 / mp_ncpus;
1337	}
1338	getnewbufcalls = counter_u64_alloc(M_WAITOK);
1339	getnewbufrestarts = counter_u64_alloc(M_WAITOK);
1340	mappingrestarts = counter_u64_alloc(M_WAITOK);
1341	numbufallocfails = counter_u64_alloc(M_WAITOK);
1342	notbufdflushes = counter_u64_alloc(M_WAITOK);
1343	buffreekvacnt = counter_u64_alloc(M_WAITOK);
1344	bufdefragcnt = counter_u64_alloc(M_WAITOK);
1345	bufkvaspace = counter_u64_alloc(M_WAITOK);
1346	TSEXIT();
1347}
1348
1349#ifdef INVARIANTS
1350static inline void
1351vfs_buf_check_mapped(struct buf *bp)
1352{
1353
1354	KASSERT(bp->b_kvabase != unmapped_buf,
1355	    ("mapped buf: b_kvabase was not updated %p", bp));
1356	KASSERT(bp->b_data != unmapped_buf,
1357	    ("mapped buf: b_data was not updated %p", bp));
1358	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1359	    maxphys, ("b_data + b_offset unmapped %p", bp));
1360}
1361
1362static inline void
1363vfs_buf_check_unmapped(struct buf *bp)
1364{
1365
1366	KASSERT(bp->b_data == unmapped_buf,
1367	    ("unmapped buf: corrupted b_data %p", bp));
1368}
1369
1370#define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1371#define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1372#else
1373#define	BUF_CHECK_MAPPED(bp) do {} while (0)
1374#define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1375#endif
1376
1377static int
1378isbufbusy(struct buf *bp)
1379{
1380	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1381	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1382		return (1);
1383	return (0);
1384}
1385
1386/*
1387 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1388 */
1389void
1390bufshutdown(int show_busybufs)
1391{
1392	static int first_buf_printf = 1;
1393	struct buf *bp;
1394	int i, iter, nbusy, pbusy;
1395#ifndef PREEMPTION
1396	int subiter;
1397#endif
1398
1399	/*
1400	 * Sync filesystems for shutdown
1401	 */
1402	wdog_kern_pat(WD_LASTVAL);
1403	kern_sync(curthread);
1404
1405	/*
1406	 * With soft updates, some buffers that are
1407	 * written will be remarked as dirty until other
1408	 * buffers are written.
1409	 */
1410	for (iter = pbusy = 0; iter < 20; iter++) {
1411		nbusy = 0;
1412		for (i = nbuf - 1; i >= 0; i--) {
1413			bp = nbufp(i);
1414			if (isbufbusy(bp))
1415				nbusy++;
1416		}
1417		if (nbusy == 0) {
1418			if (first_buf_printf)
1419				printf("All buffers synced.");
1420			break;
1421		}
1422		if (first_buf_printf) {
1423			printf("Syncing disks, buffers remaining... ");
1424			first_buf_printf = 0;
1425		}
1426		printf("%d ", nbusy);
1427		if (nbusy < pbusy)
1428			iter = 0;
1429		pbusy = nbusy;
1430
1431		wdog_kern_pat(WD_LASTVAL);
1432		kern_sync(curthread);
1433
1434#ifdef PREEMPTION
1435		/*
1436		 * Spin for a while to allow interrupt threads to run.
1437		 */
1438		DELAY(50000 * iter);
1439#else
1440		/*
1441		 * Context switch several times to allow interrupt
1442		 * threads to run.
1443		 */
1444		for (subiter = 0; subiter < 50 * iter; subiter++) {
1445			sched_relinquish(curthread);
1446			DELAY(1000);
1447		}
1448#endif
1449	}
1450	printf("\n");
1451	/*
1452	 * Count only busy local buffers to prevent forcing
1453	 * a fsck if we're just a client of a wedged NFS server
1454	 */
1455	nbusy = 0;
1456	for (i = nbuf - 1; i >= 0; i--) {
1457		bp = nbufp(i);
1458		if (isbufbusy(bp)) {
1459#if 0
1460/* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1461			if (bp->b_dev == NULL) {
1462				TAILQ_REMOVE(&mountlist,
1463				    bp->b_vp->v_mount, mnt_list);
1464				continue;
1465			}
1466#endif
1467			nbusy++;
1468			if (show_busybufs > 0) {
1469				printf(
1470	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1471				    nbusy, bp, bp->b_vp, bp->b_flags,
1472				    (intmax_t)bp->b_blkno,
1473				    (intmax_t)bp->b_lblkno);
1474				BUF_LOCKPRINTINFO(bp);
1475				if (show_busybufs > 1)
1476					vn_printf(bp->b_vp,
1477					    "vnode content: ");
1478			}
1479		}
1480	}
1481	if (nbusy) {
1482		/*
1483		 * Failed to sync all blocks. Indicate this and don't
1484		 * unmount filesystems (thus forcing an fsck on reboot).
1485		 */
1486		BOOTTRACE("shutdown failed to sync buffers");
1487		printf("Giving up on %d buffers\n", nbusy);
1488		DELAY(5000000);	/* 5 seconds */
1489		swapoff_all();
1490	} else {
1491		BOOTTRACE("shutdown sync complete");
1492		if (!first_buf_printf)
1493			printf("Final sync complete\n");
1494
1495		/*
1496		 * Unmount filesystems and perform swapoff, to quiesce
1497		 * the system as much as possible.  In particular, no
1498		 * I/O should be initiated from top levels since it
1499		 * might be abruptly terminated by reset, or otherwise
1500		 * erronously handled because other parts of the
1501		 * system are disabled.
1502		 *
1503		 * Swapoff before unmount, because file-backed swap is
1504		 * non-operational after unmount of the underlying
1505		 * filesystem.
1506		 */
1507		if (!KERNEL_PANICKED()) {
1508			swapoff_all();
1509			vfs_unmountall();
1510		}
1511		BOOTTRACE("shutdown unmounted all filesystems");
1512	}
1513	DELAY(100000);		/* wait for console output to finish */
1514}
1515
1516static void
1517bpmap_qenter(struct buf *bp)
1518{
1519
1520	BUF_CHECK_MAPPED(bp);
1521
1522	/*
1523	 * bp->b_data is relative to bp->b_offset, but
1524	 * bp->b_offset may be offset into the first page.
1525	 */
1526	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1527	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1528	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1529	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1530}
1531
1532static inline struct bufdomain *
1533bufdomain(struct buf *bp)
1534{
1535
1536	return (&bdomain[bp->b_domain]);
1537}
1538
1539static struct bufqueue *
1540bufqueue(struct buf *bp)
1541{
1542
1543	switch (bp->b_qindex) {
1544	case QUEUE_NONE:
1545		/* FALLTHROUGH */
1546	case QUEUE_SENTINEL:
1547		return (NULL);
1548	case QUEUE_EMPTY:
1549		return (&bqempty);
1550	case QUEUE_DIRTY:
1551		return (&bufdomain(bp)->bd_dirtyq);
1552	case QUEUE_CLEAN:
1553		return (&bufdomain(bp)->bd_subq[bp->b_subqueue]);
1554	default:
1555		break;
1556	}
1557	panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex);
1558}
1559
1560/*
1561 * Return the locked bufqueue that bp is a member of.
1562 */
1563static struct bufqueue *
1564bufqueue_acquire(struct buf *bp)
1565{
1566	struct bufqueue *bq, *nbq;
1567
1568	/*
1569	 * bp can be pushed from a per-cpu queue to the
1570	 * cleanq while we're waiting on the lock.  Retry
1571	 * if the queues don't match.
1572	 */
1573	bq = bufqueue(bp);
1574	BQ_LOCK(bq);
1575	for (;;) {
1576		nbq = bufqueue(bp);
1577		if (bq == nbq)
1578			break;
1579		BQ_UNLOCK(bq);
1580		BQ_LOCK(nbq);
1581		bq = nbq;
1582	}
1583	return (bq);
1584}
1585
1586/*
1587 *	binsfree:
1588 *
1589 *	Insert the buffer into the appropriate free list.  Requires a
1590 *	locked buffer on entry and buffer is unlocked before return.
1591 */
1592static void
1593binsfree(struct buf *bp, int qindex)
1594{
1595	struct bufdomain *bd;
1596	struct bufqueue *bq;
1597
1598	KASSERT(qindex == QUEUE_CLEAN || qindex == QUEUE_DIRTY,
1599	    ("binsfree: Invalid qindex %d", qindex));
1600	BUF_ASSERT_XLOCKED(bp);
1601
1602	/*
1603	 * Handle delayed bremfree() processing.
1604	 */
1605	if (bp->b_flags & B_REMFREE) {
1606		if (bp->b_qindex == qindex) {
1607			bp->b_flags |= B_REUSE;
1608			bp->b_flags &= ~B_REMFREE;
1609			BUF_UNLOCK(bp);
1610			return;
1611		}
1612		bq = bufqueue_acquire(bp);
1613		bq_remove(bq, bp);
1614		BQ_UNLOCK(bq);
1615	}
1616	bd = bufdomain(bp);
1617	if (qindex == QUEUE_CLEAN) {
1618		if (bd->bd_lim != 0)
1619			bq = &bd->bd_subq[PCPU_GET(cpuid)];
1620		else
1621			bq = bd->bd_cleanq;
1622	} else
1623		bq = &bd->bd_dirtyq;
1624	bq_insert(bq, bp, true);
1625}
1626
1627/*
1628 * buf_free:
1629 *
1630 *	Free a buffer to the buf zone once it no longer has valid contents.
1631 */
1632static void
1633buf_free(struct buf *bp)
1634{
1635
1636	if (bp->b_flags & B_REMFREE)
1637		bremfreef(bp);
1638	if (bp->b_vflags & BV_BKGRDINPROG)
1639		panic("losing buffer 1");
1640	if (bp->b_rcred != NOCRED) {
1641		crfree(bp->b_rcred);
1642		bp->b_rcred = NOCRED;
1643	}
1644	if (bp->b_wcred != NOCRED) {
1645		crfree(bp->b_wcred);
1646		bp->b_wcred = NOCRED;
1647	}
1648	if (!LIST_EMPTY(&bp->b_dep))
1649		buf_deallocate(bp);
1650	bufkva_free(bp);
1651	atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1);
1652	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1653	BUF_UNLOCK(bp);
1654	uma_zfree(buf_zone, bp);
1655}
1656
1657/*
1658 * buf_import:
1659 *
1660 *	Import bufs into the uma cache from the buf list.  The system still
1661 *	expects a static array of bufs and much of the synchronization
1662 *	around bufs assumes type stable storage.  As a result, UMA is used
1663 *	only as a per-cpu cache of bufs still maintained on a global list.
1664 */
1665static int
1666buf_import(void *arg, void **store, int cnt, int domain, int flags)
1667{
1668	struct buf *bp;
1669	int i;
1670
1671	BQ_LOCK(&bqempty);
1672	for (i = 0; i < cnt; i++) {
1673		bp = TAILQ_FIRST(&bqempty.bq_queue);
1674		if (bp == NULL)
1675			break;
1676		bq_remove(&bqempty, bp);
1677		store[i] = bp;
1678	}
1679	BQ_UNLOCK(&bqempty);
1680
1681	return (i);
1682}
1683
1684/*
1685 * buf_release:
1686 *
1687 *	Release bufs from the uma cache back to the buffer queues.
1688 */
1689static void
1690buf_release(void *arg, void **store, int cnt)
1691{
1692	struct bufqueue *bq;
1693	struct buf *bp;
1694        int i;
1695
1696	bq = &bqempty;
1697	BQ_LOCK(bq);
1698        for (i = 0; i < cnt; i++) {
1699		bp = store[i];
1700		/* Inline bq_insert() to batch locking. */
1701		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1702		bp->b_flags &= ~(B_AGE | B_REUSE);
1703		bq->bq_len++;
1704		bp->b_qindex = bq->bq_index;
1705	}
1706	BQ_UNLOCK(bq);
1707}
1708
1709/*
1710 * buf_alloc:
1711 *
1712 *	Allocate an empty buffer header.
1713 */
1714static struct buf *
1715buf_alloc(struct bufdomain *bd)
1716{
1717	struct buf *bp;
1718	int freebufs, error;
1719
1720	/*
1721	 * We can only run out of bufs in the buf zone if the average buf
1722	 * is less than BKVASIZE.  In this case the actual wait/block will
1723	 * come from buf_reycle() failing to flush one of these small bufs.
1724	 */
1725	bp = NULL;
1726	freebufs = atomic_fetchadd_int(&bd->bd_freebuffers, -1);
1727	if (freebufs > 0)
1728		bp = uma_zalloc(buf_zone, M_NOWAIT);
1729	if (bp == NULL) {
1730		atomic_add_int(&bd->bd_freebuffers, 1);
1731		bufspace_daemon_wakeup(bd);
1732		counter_u64_add(numbufallocfails, 1);
1733		return (NULL);
1734	}
1735	/*
1736	 * Wake-up the bufspace daemon on transition below threshold.
1737	 */
1738	if (freebufs == bd->bd_lofreebuffers)
1739		bufspace_daemon_wakeup(bd);
1740
1741	error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1742	KASSERT(error == 0, ("%s: BUF_LOCK on free buf %p: %d.", __func__, bp,
1743	    error));
1744	(void)error;
1745
1746	KASSERT(bp->b_vp == NULL,
1747	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1748	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1749	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1750	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1751	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1752	KASSERT(bp->b_npages == 0,
1753	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1754	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1755	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1756	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1757
1758	bp->b_domain = BD_DOMAIN(bd);
1759	bp->b_flags = 0;
1760	bp->b_ioflags = 0;
1761	bp->b_xflags = 0;
1762	bp->b_vflags = 0;
1763	bp->b_vp = NULL;
1764	bp->b_blkno = bp->b_lblkno = 0;
1765	bp->b_offset = NOOFFSET;
1766	bp->b_iodone = 0;
1767	bp->b_error = 0;
1768	bp->b_resid = 0;
1769	bp->b_bcount = 0;
1770	bp->b_npages = 0;
1771	bp->b_dirtyoff = bp->b_dirtyend = 0;
1772	bp->b_bufobj = NULL;
1773	bp->b_data = bp->b_kvabase = unmapped_buf;
1774	bp->b_fsprivate1 = NULL;
1775	bp->b_fsprivate2 = NULL;
1776	bp->b_fsprivate3 = NULL;
1777	LIST_INIT(&bp->b_dep);
1778
1779	return (bp);
1780}
1781
1782/*
1783 *	buf_recycle:
1784 *
1785 *	Free a buffer from the given bufqueue.  kva controls whether the
1786 *	freed buf must own some kva resources.  This is used for
1787 *	defragmenting.
1788 */
1789static int
1790buf_recycle(struct bufdomain *bd, bool kva)
1791{
1792	struct bufqueue *bq;
1793	struct buf *bp, *nbp;
1794
1795	if (kva)
1796		counter_u64_add(bufdefragcnt, 1);
1797	nbp = NULL;
1798	bq = bd->bd_cleanq;
1799	BQ_LOCK(bq);
1800	KASSERT(BQ_LOCKPTR(bq) == BD_LOCKPTR(bd),
1801	    ("buf_recycle: Locks don't match"));
1802	nbp = TAILQ_FIRST(&bq->bq_queue);
1803
1804	/*
1805	 * Run scan, possibly freeing data and/or kva mappings on the fly
1806	 * depending.
1807	 */
1808	while ((bp = nbp) != NULL) {
1809		/*
1810		 * Calculate next bp (we can only use it if we do not
1811		 * release the bqlock).
1812		 */
1813		nbp = TAILQ_NEXT(bp, b_freelist);
1814
1815		/*
1816		 * If we are defragging then we need a buffer with
1817		 * some kva to reclaim.
1818		 */
1819		if (kva && bp->b_kvasize == 0)
1820			continue;
1821
1822		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1823			continue;
1824
1825		/*
1826		 * Implement a second chance algorithm for frequently
1827		 * accessed buffers.
1828		 */
1829		if ((bp->b_flags & B_REUSE) != 0) {
1830			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1831			TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1832			bp->b_flags &= ~B_REUSE;
1833			BUF_UNLOCK(bp);
1834			continue;
1835		}
1836
1837		/*
1838		 * Skip buffers with background writes in progress.
1839		 */
1840		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1841			BUF_UNLOCK(bp);
1842			continue;
1843		}
1844
1845		KASSERT(bp->b_qindex == QUEUE_CLEAN,
1846		    ("buf_recycle: inconsistent queue %d bp %p",
1847		    bp->b_qindex, bp));
1848		KASSERT(bp->b_domain == BD_DOMAIN(bd),
1849		    ("getnewbuf: queue domain %d doesn't match request %d",
1850		    bp->b_domain, (int)BD_DOMAIN(bd)));
1851		/*
1852		 * NOTE:  nbp is now entirely invalid.  We can only restart
1853		 * the scan from this point on.
1854		 */
1855		bq_remove(bq, bp);
1856		BQ_UNLOCK(bq);
1857
1858		/*
1859		 * Requeue the background write buffer with error and
1860		 * restart the scan.
1861		 */
1862		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1863			bqrelse(bp);
1864			BQ_LOCK(bq);
1865			nbp = TAILQ_FIRST(&bq->bq_queue);
1866			continue;
1867		}
1868		bp->b_flags |= B_INVAL;
1869		brelse(bp);
1870		return (0);
1871	}
1872	bd->bd_wanted = 1;
1873	BQ_UNLOCK(bq);
1874
1875	return (ENOBUFS);
1876}
1877
1878/*
1879 *	bremfree:
1880 *
1881 *	Mark the buffer for removal from the appropriate free list.
1882 *
1883 */
1884void
1885bremfree(struct buf *bp)
1886{
1887
1888	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1889	KASSERT((bp->b_flags & B_REMFREE) == 0,
1890	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1891	KASSERT(bp->b_qindex != QUEUE_NONE,
1892	    ("bremfree: buffer %p not on a queue.", bp));
1893	BUF_ASSERT_XLOCKED(bp);
1894
1895	bp->b_flags |= B_REMFREE;
1896}
1897
1898/*
1899 *	bremfreef:
1900 *
1901 *	Force an immediate removal from a free list.  Used only in nfs when
1902 *	it abuses the b_freelist pointer.
1903 */
1904void
1905bremfreef(struct buf *bp)
1906{
1907	struct bufqueue *bq;
1908
1909	bq = bufqueue_acquire(bp);
1910	bq_remove(bq, bp);
1911	BQ_UNLOCK(bq);
1912}
1913
1914static void
1915bq_init(struct bufqueue *bq, int qindex, int subqueue, const char *lockname)
1916{
1917
1918	mtx_init(&bq->bq_lock, lockname, NULL, MTX_DEF);
1919	TAILQ_INIT(&bq->bq_queue);
1920	bq->bq_len = 0;
1921	bq->bq_index = qindex;
1922	bq->bq_subqueue = subqueue;
1923}
1924
1925static void
1926bd_init(struct bufdomain *bd)
1927{
1928	int i;
1929
1930	/* Per-CPU clean buf queues, plus one global queue. */
1931	bd->bd_subq = mallocarray(mp_maxid + 2, sizeof(struct bufqueue),
1932	    M_BIOBUF, M_WAITOK | M_ZERO);
1933	bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1];
1934	bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock");
1935	bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock");
1936	for (i = 0; i <= mp_maxid; i++)
1937		bq_init(&bd->bd_subq[i], QUEUE_CLEAN, i,
1938		    "bufq clean subqueue lock");
1939	mtx_init(&bd->bd_run_lock, "bufspace daemon run lock", NULL, MTX_DEF);
1940}
1941
1942/*
1943 *	bq_remove:
1944 *
1945 *	Removes a buffer from the free list, must be called with the
1946 *	correct qlock held.
1947 */
1948static void
1949bq_remove(struct bufqueue *bq, struct buf *bp)
1950{
1951
1952	CTR3(KTR_BUF, "bq_remove(%p) vp %p flags %X",
1953	    bp, bp->b_vp, bp->b_flags);
1954	KASSERT(bp->b_qindex != QUEUE_NONE,
1955	    ("bq_remove: buffer %p not on a queue.", bp));
1956	KASSERT(bufqueue(bp) == bq,
1957	    ("bq_remove: Remove buffer %p from wrong queue.", bp));
1958
1959	BQ_ASSERT_LOCKED(bq);
1960	if (bp->b_qindex != QUEUE_EMPTY) {
1961		BUF_ASSERT_XLOCKED(bp);
1962	}
1963	KASSERT(bq->bq_len >= 1,
1964	    ("queue %d underflow", bp->b_qindex));
1965	TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1966	bq->bq_len--;
1967	bp->b_qindex = QUEUE_NONE;
1968	bp->b_flags &= ~(B_REMFREE | B_REUSE);
1969}
1970
1971static void
1972bd_flush(struct bufdomain *bd, struct bufqueue *bq)
1973{
1974	struct buf *bp;
1975
1976	BQ_ASSERT_LOCKED(bq);
1977	if (bq != bd->bd_cleanq) {
1978		BD_LOCK(bd);
1979		while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) {
1980			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1981			TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp,
1982			    b_freelist);
1983			bp->b_subqueue = bd->bd_cleanq->bq_subqueue;
1984		}
1985		bd->bd_cleanq->bq_len += bq->bq_len;
1986		bq->bq_len = 0;
1987	}
1988	if (bd->bd_wanted) {
1989		bd->bd_wanted = 0;
1990		wakeup(&bd->bd_wanted);
1991	}
1992	if (bq != bd->bd_cleanq)
1993		BD_UNLOCK(bd);
1994}
1995
1996static int
1997bd_flushall(struct bufdomain *bd)
1998{
1999	struct bufqueue *bq;
2000	int flushed;
2001	int i;
2002
2003	if (bd->bd_lim == 0)
2004		return (0);
2005	flushed = 0;
2006	for (i = 0; i <= mp_maxid; i++) {
2007		bq = &bd->bd_subq[i];
2008		if (bq->bq_len == 0)
2009			continue;
2010		BQ_LOCK(bq);
2011		bd_flush(bd, bq);
2012		BQ_UNLOCK(bq);
2013		flushed++;
2014	}
2015
2016	return (flushed);
2017}
2018
2019static void
2020bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
2021{
2022	struct bufdomain *bd;
2023
2024	if (bp->b_qindex != QUEUE_NONE)
2025		panic("bq_insert: free buffer %p onto another queue?", bp);
2026
2027	bd = bufdomain(bp);
2028	if (bp->b_flags & B_AGE) {
2029		/* Place this buf directly on the real queue. */
2030		if (bq->bq_index == QUEUE_CLEAN)
2031			bq = bd->bd_cleanq;
2032		BQ_LOCK(bq);
2033		TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist);
2034	} else {
2035		BQ_LOCK(bq);
2036		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
2037	}
2038	bp->b_flags &= ~(B_AGE | B_REUSE);
2039	bq->bq_len++;
2040	bp->b_qindex = bq->bq_index;
2041	bp->b_subqueue = bq->bq_subqueue;
2042
2043	/*
2044	 * Unlock before we notify so that we don't wakeup a waiter that
2045	 * fails a trylock on the buf and sleeps again.
2046	 */
2047	if (unlock)
2048		BUF_UNLOCK(bp);
2049
2050	if (bp->b_qindex == QUEUE_CLEAN) {
2051		/*
2052		 * Flush the per-cpu queue and notify any waiters.
2053		 */
2054		if (bd->bd_wanted || (bq != bd->bd_cleanq &&
2055		    bq->bq_len >= bd->bd_lim))
2056			bd_flush(bd, bq);
2057	}
2058	BQ_UNLOCK(bq);
2059}
2060
2061/*
2062 *	bufkva_free:
2063 *
2064 *	Free the kva allocation for a buffer.
2065 *
2066 */
2067static void
2068bufkva_free(struct buf *bp)
2069{
2070
2071#ifdef INVARIANTS
2072	if (bp->b_kvasize == 0) {
2073		KASSERT(bp->b_kvabase == unmapped_buf &&
2074		    bp->b_data == unmapped_buf,
2075		    ("Leaked KVA space on %p", bp));
2076	} else if (buf_mapped(bp))
2077		BUF_CHECK_MAPPED(bp);
2078	else
2079		BUF_CHECK_UNMAPPED(bp);
2080#endif
2081	if (bp->b_kvasize == 0)
2082		return;
2083
2084	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
2085	counter_u64_add(bufkvaspace, -bp->b_kvasize);
2086	counter_u64_add(buffreekvacnt, 1);
2087	bp->b_data = bp->b_kvabase = unmapped_buf;
2088	bp->b_kvasize = 0;
2089}
2090
2091/*
2092 *	bufkva_alloc:
2093 *
2094 *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
2095 */
2096static int
2097bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
2098{
2099	vm_offset_t addr;
2100	int error;
2101
2102	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
2103	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
2104	MPASS((bp->b_flags & B_MAXPHYS) == 0);
2105	KASSERT(maxsize <= maxbcachebuf,
2106	    ("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf));
2107
2108	bufkva_free(bp);
2109
2110	addr = 0;
2111	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
2112	if (error != 0) {
2113		/*
2114		 * Buffer map is too fragmented.  Request the caller
2115		 * to defragment the map.
2116		 */
2117		return (error);
2118	}
2119	bp->b_kvabase = (caddr_t)addr;
2120	bp->b_kvasize = maxsize;
2121	counter_u64_add(bufkvaspace, bp->b_kvasize);
2122	if ((gbflags & GB_UNMAPPED) != 0) {
2123		bp->b_data = unmapped_buf;
2124		BUF_CHECK_UNMAPPED(bp);
2125	} else {
2126		bp->b_data = bp->b_kvabase;
2127		BUF_CHECK_MAPPED(bp);
2128	}
2129	return (0);
2130}
2131
2132/*
2133 *	bufkva_reclaim:
2134 *
2135 *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
2136 *	callback that fires to avoid returning failure.
2137 */
2138static void
2139bufkva_reclaim(vmem_t *vmem, int flags)
2140{
2141	bool done;
2142	int q;
2143	int i;
2144
2145	done = false;
2146	for (i = 0; i < 5; i++) {
2147		for (q = 0; q < buf_domains; q++)
2148			if (buf_recycle(&bdomain[q], true) != 0)
2149				done = true;
2150		if (done)
2151			break;
2152	}
2153	return;
2154}
2155
2156/*
2157 * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
2158 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
2159 * the buffer is valid and we do not have to do anything.
2160 */
2161static void
2162breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, int cnt,
2163    struct ucred * cred, int flags, void (*ckhashfunc)(struct buf *))
2164{
2165	struct buf *rabp;
2166	struct thread *td;
2167	int i;
2168
2169	td = curthread;
2170
2171	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
2172		if (inmem(vp, *rablkno))
2173			continue;
2174		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
2175		if ((rabp->b_flags & B_CACHE) != 0) {
2176			brelse(rabp);
2177			continue;
2178		}
2179#ifdef RACCT
2180		if (racct_enable) {
2181			PROC_LOCK(curproc);
2182			racct_add_buf(curproc, rabp, 0);
2183			PROC_UNLOCK(curproc);
2184		}
2185#endif /* RACCT */
2186		td->td_ru.ru_inblock++;
2187		rabp->b_flags |= B_ASYNC;
2188		rabp->b_flags &= ~B_INVAL;
2189		if ((flags & GB_CKHASH) != 0) {
2190			rabp->b_flags |= B_CKHASH;
2191			rabp->b_ckhashcalc = ckhashfunc;
2192		}
2193		rabp->b_ioflags &= ~BIO_ERROR;
2194		rabp->b_iocmd = BIO_READ;
2195		if (rabp->b_rcred == NOCRED && cred != NOCRED)
2196			rabp->b_rcred = crhold(cred);
2197		vfs_busy_pages(rabp, 0);
2198		BUF_KERNPROC(rabp);
2199		rabp->b_iooffset = dbtob(rabp->b_blkno);
2200		bstrategy(rabp);
2201	}
2202}
2203
2204/*
2205 * Entry point for bread() and breadn() via #defines in sys/buf.h.
2206 *
2207 * Get a buffer with the specified data.  Look in the cache first.  We
2208 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
2209 * is set, the buffer is valid and we do not have to do anything, see
2210 * getblk(). Also starts asynchronous I/O on read-ahead blocks.
2211 *
2212 * Always return a NULL buffer pointer (in bpp) when returning an error.
2213 *
2214 * The blkno parameter is the logical block being requested. Normally
2215 * the mapping of logical block number to disk block address is done
2216 * by calling VOP_BMAP(). However, if the mapping is already known, the
2217 * disk block address can be passed using the dblkno parameter. If the
2218 * disk block address is not known, then the same value should be passed
2219 * for blkno and dblkno.
2220 */
2221int
2222breadn_flags(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
2223    daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, int flags,
2224    void (*ckhashfunc)(struct buf *), struct buf **bpp)
2225{
2226	struct buf *bp;
2227	struct thread *td;
2228	int error, readwait, rv;
2229
2230	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
2231	td = curthread;
2232	/*
2233	 * Can only return NULL if GB_LOCK_NOWAIT or GB_SPARSE flags
2234	 * are specified.
2235	 */
2236	error = getblkx(vp, blkno, dblkno, size, 0, 0, flags, &bp);
2237	if (error != 0) {
2238		*bpp = NULL;
2239		return (error);
2240	}
2241	KASSERT(blkno == bp->b_lblkno,
2242	    ("getblkx returned buffer for blkno %jd instead of blkno %jd",
2243	    (intmax_t)bp->b_lblkno, (intmax_t)blkno));
2244	flags &= ~GB_NOSPARSE;
2245	*bpp = bp;
2246
2247	/*
2248	 * If not found in cache, do some I/O
2249	 */
2250	readwait = 0;
2251	if ((bp->b_flags & B_CACHE) == 0) {
2252#ifdef RACCT
2253		if (racct_enable) {
2254			PROC_LOCK(td->td_proc);
2255			racct_add_buf(td->td_proc, bp, 0);
2256			PROC_UNLOCK(td->td_proc);
2257		}
2258#endif /* RACCT */
2259		td->td_ru.ru_inblock++;
2260		bp->b_iocmd = BIO_READ;
2261		bp->b_flags &= ~B_INVAL;
2262		if ((flags & GB_CKHASH) != 0) {
2263			bp->b_flags |= B_CKHASH;
2264			bp->b_ckhashcalc = ckhashfunc;
2265		}
2266		if ((flags & GB_CVTENXIO) != 0)
2267			bp->b_xflags |= BX_CVTENXIO;
2268		bp->b_ioflags &= ~BIO_ERROR;
2269		if (bp->b_rcred == NOCRED && cred != NOCRED)
2270			bp->b_rcred = crhold(cred);
2271		vfs_busy_pages(bp, 0);
2272		bp->b_iooffset = dbtob(bp->b_blkno);
2273		bstrategy(bp);
2274		++readwait;
2275	}
2276
2277	/*
2278	 * Attempt to initiate asynchronous I/O on read-ahead blocks.
2279	 */
2280	breada(vp, rablkno, rabsize, cnt, cred, flags, ckhashfunc);
2281
2282	rv = 0;
2283	if (readwait) {
2284		rv = bufwait(bp);
2285		if (rv != 0) {
2286			brelse(bp);
2287			*bpp = NULL;
2288		}
2289	}
2290	return (rv);
2291}
2292
2293/*
2294 * Write, release buffer on completion.  (Done by iodone
2295 * if async).  Do not bother writing anything if the buffer
2296 * is invalid.
2297 *
2298 * Note that we set B_CACHE here, indicating that buffer is
2299 * fully valid and thus cacheable.  This is true even of NFS
2300 * now so we set it generally.  This could be set either here
2301 * or in biodone() since the I/O is synchronous.  We put it
2302 * here.
2303 */
2304int
2305bufwrite(struct buf *bp)
2306{
2307	int oldflags;
2308	struct vnode *vp;
2309	long space;
2310	int vp_md;
2311
2312	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2313	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
2314		bp->b_flags |= B_INVAL | B_RELBUF;
2315		bp->b_flags &= ~B_CACHE;
2316		brelse(bp);
2317		return (ENXIO);
2318	}
2319	if (bp->b_flags & B_INVAL) {
2320		brelse(bp);
2321		return (0);
2322	}
2323
2324	if (bp->b_flags & B_BARRIER)
2325		atomic_add_long(&barrierwrites, 1);
2326
2327	oldflags = bp->b_flags;
2328
2329	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
2330	    ("FFS background buffer should not get here %p", bp));
2331
2332	vp = bp->b_vp;
2333	if (vp)
2334		vp_md = vp->v_vflag & VV_MD;
2335	else
2336		vp_md = 0;
2337
2338	/*
2339	 * Mark the buffer clean.  Increment the bufobj write count
2340	 * before bundirty() call, to prevent other thread from seeing
2341	 * empty dirty list and zero counter for writes in progress,
2342	 * falsely indicating that the bufobj is clean.
2343	 */
2344	bufobj_wref(bp->b_bufobj);
2345	bundirty(bp);
2346
2347	bp->b_flags &= ~B_DONE;
2348	bp->b_ioflags &= ~BIO_ERROR;
2349	bp->b_flags |= B_CACHE;
2350	bp->b_iocmd = BIO_WRITE;
2351
2352	vfs_busy_pages(bp, 1);
2353
2354	/*
2355	 * Normal bwrites pipeline writes
2356	 */
2357	bp->b_runningbufspace = bp->b_bufsize;
2358	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
2359
2360#ifdef RACCT
2361	if (racct_enable) {
2362		PROC_LOCK(curproc);
2363		racct_add_buf(curproc, bp, 1);
2364		PROC_UNLOCK(curproc);
2365	}
2366#endif /* RACCT */
2367	curthread->td_ru.ru_oublock++;
2368	if (oldflags & B_ASYNC)
2369		BUF_KERNPROC(bp);
2370	bp->b_iooffset = dbtob(bp->b_blkno);
2371	buf_track(bp, __func__);
2372	bstrategy(bp);
2373
2374	if ((oldflags & B_ASYNC) == 0) {
2375		int rtval = bufwait(bp);
2376		brelse(bp);
2377		return (rtval);
2378	} else if (space > hirunningspace) {
2379		/*
2380		 * don't allow the async write to saturate the I/O
2381		 * system.  We will not deadlock here because
2382		 * we are blocking waiting for I/O that is already in-progress
2383		 * to complete. We do not block here if it is the update
2384		 * or syncer daemon trying to clean up as that can lead
2385		 * to deadlock.
2386		 */
2387		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
2388			waitrunningbufspace();
2389	}
2390
2391	return (0);
2392}
2393
2394void
2395bufbdflush(struct bufobj *bo, struct buf *bp)
2396{
2397	struct buf *nbp;
2398	struct bufdomain *bd;
2399
2400	bd = &bdomain[bo->bo_domain];
2401	if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh + 10) {
2402		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
2403		altbufferflushes++;
2404	} else if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh) {
2405		BO_LOCK(bo);
2406		/*
2407		 * Try to find a buffer to flush.
2408		 */
2409		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2410			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2411			    BUF_LOCK(nbp,
2412				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
2413				continue;
2414			if (bp == nbp)
2415				panic("bdwrite: found ourselves");
2416			BO_UNLOCK(bo);
2417			/* Don't countdeps with the bo lock held. */
2418			if (buf_countdeps(nbp, 0)) {
2419				BO_LOCK(bo);
2420				BUF_UNLOCK(nbp);
2421				continue;
2422			}
2423			if (nbp->b_flags & B_CLUSTEROK) {
2424				vfs_bio_awrite(nbp);
2425			} else {
2426				bremfree(nbp);
2427				bawrite(nbp);
2428			}
2429			dirtybufferflushes++;
2430			break;
2431		}
2432		if (nbp == NULL)
2433			BO_UNLOCK(bo);
2434	}
2435}
2436
2437/*
2438 * Delayed write. (Buffer is marked dirty).  Do not bother writing
2439 * anything if the buffer is marked invalid.
2440 *
2441 * Note that since the buffer must be completely valid, we can safely
2442 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2443 * biodone() in order to prevent getblk from writing the buffer
2444 * out synchronously.
2445 */
2446void
2447bdwrite(struct buf *bp)
2448{
2449	struct thread *td = curthread;
2450	struct vnode *vp;
2451	struct bufobj *bo;
2452
2453	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2454	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2455	KASSERT((bp->b_flags & B_BARRIER) == 0,
2456	    ("Barrier request in delayed write %p", bp));
2457
2458	if (bp->b_flags & B_INVAL) {
2459		brelse(bp);
2460		return;
2461	}
2462
2463	/*
2464	 * If we have too many dirty buffers, don't create any more.
2465	 * If we are wildly over our limit, then force a complete
2466	 * cleanup. Otherwise, just keep the situation from getting
2467	 * out of control. Note that we have to avoid a recursive
2468	 * disaster and not try to clean up after our own cleanup!
2469	 */
2470	vp = bp->b_vp;
2471	bo = bp->b_bufobj;
2472	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2473		td->td_pflags |= TDP_INBDFLUSH;
2474		BO_BDFLUSH(bo, bp);
2475		td->td_pflags &= ~TDP_INBDFLUSH;
2476	} else
2477		recursiveflushes++;
2478
2479	bdirty(bp);
2480	/*
2481	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2482	 * true even of NFS now.
2483	 */
2484	bp->b_flags |= B_CACHE;
2485
2486	/*
2487	 * This bmap keeps the system from needing to do the bmap later,
2488	 * perhaps when the system is attempting to do a sync.  Since it
2489	 * is likely that the indirect block -- or whatever other datastructure
2490	 * that the filesystem needs is still in memory now, it is a good
2491	 * thing to do this.  Note also, that if the pageout daemon is
2492	 * requesting a sync -- there might not be enough memory to do
2493	 * the bmap then...  So, this is important to do.
2494	 */
2495	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2496		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2497	}
2498
2499	buf_track(bp, __func__);
2500
2501	/*
2502	 * Set the *dirty* buffer range based upon the VM system dirty
2503	 * pages.
2504	 *
2505	 * Mark the buffer pages as clean.  We need to do this here to
2506	 * satisfy the vnode_pager and the pageout daemon, so that it
2507	 * thinks that the pages have been "cleaned".  Note that since
2508	 * the pages are in a delayed write buffer -- the VFS layer
2509	 * "will" see that the pages get written out on the next sync,
2510	 * or perhaps the cluster will be completed.
2511	 */
2512	vfs_clean_pages_dirty_buf(bp);
2513	bqrelse(bp);
2514
2515	/*
2516	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2517	 * due to the softdep code.
2518	 */
2519}
2520
2521/*
2522 *	bdirty:
2523 *
2524 *	Turn buffer into delayed write request.  We must clear BIO_READ and
2525 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2526 *	itself to properly update it in the dirty/clean lists.  We mark it
2527 *	B_DONE to ensure that any asynchronization of the buffer properly
2528 *	clears B_DONE ( else a panic will occur later ).
2529 *
2530 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2531 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2532 *	should only be called if the buffer is known-good.
2533 *
2534 *	Since the buffer is not on a queue, we do not update the numfreebuffers
2535 *	count.
2536 *
2537 *	The buffer must be on QUEUE_NONE.
2538 */
2539void
2540bdirty(struct buf *bp)
2541{
2542
2543	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2544	    bp, bp->b_vp, bp->b_flags);
2545	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2546	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2547	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2548	bp->b_flags &= ~(B_RELBUF);
2549	bp->b_iocmd = BIO_WRITE;
2550
2551	if ((bp->b_flags & B_DELWRI) == 0) {
2552		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2553		reassignbuf(bp);
2554		bdirtyadd(bp);
2555	}
2556}
2557
2558/*
2559 *	bundirty:
2560 *
2561 *	Clear B_DELWRI for buffer.
2562 *
2563 *	Since the buffer is not on a queue, we do not update the numfreebuffers
2564 *	count.
2565 *
2566 *	The buffer must be on QUEUE_NONE.
2567 */
2568
2569void
2570bundirty(struct buf *bp)
2571{
2572
2573	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2574	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2575	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2576	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2577
2578	if (bp->b_flags & B_DELWRI) {
2579		bp->b_flags &= ~B_DELWRI;
2580		reassignbuf(bp);
2581		bdirtysub(bp);
2582	}
2583	/*
2584	 * Since it is now being written, we can clear its deferred write flag.
2585	 */
2586	bp->b_flags &= ~B_DEFERRED;
2587}
2588
2589/*
2590 *	bawrite:
2591 *
2592 *	Asynchronous write.  Start output on a buffer, but do not wait for
2593 *	it to complete.  The buffer is released when the output completes.
2594 *
2595 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2596 *	B_INVAL buffers.  Not us.
2597 */
2598void
2599bawrite(struct buf *bp)
2600{
2601
2602	bp->b_flags |= B_ASYNC;
2603	(void) bwrite(bp);
2604}
2605
2606/*
2607 *	babarrierwrite:
2608 *
2609 *	Asynchronous barrier write.  Start output on a buffer, but do not
2610 *	wait for it to complete.  Place a write barrier after this write so
2611 *	that this buffer and all buffers written before it are committed to
2612 *	the disk before any buffers written after this write are committed
2613 *	to the disk.  The buffer is released when the output completes.
2614 */
2615void
2616babarrierwrite(struct buf *bp)
2617{
2618
2619	bp->b_flags |= B_ASYNC | B_BARRIER;
2620	(void) bwrite(bp);
2621}
2622
2623/*
2624 *	bbarrierwrite:
2625 *
2626 *	Synchronous barrier write.  Start output on a buffer and wait for
2627 *	it to complete.  Place a write barrier after this write so that
2628 *	this buffer and all buffers written before it are committed to
2629 *	the disk before any buffers written after this write are committed
2630 *	to the disk.  The buffer is released when the output completes.
2631 */
2632int
2633bbarrierwrite(struct buf *bp)
2634{
2635
2636	bp->b_flags |= B_BARRIER;
2637	return (bwrite(bp));
2638}
2639
2640/*
2641 *	bwillwrite:
2642 *
2643 *	Called prior to the locking of any vnodes when we are expecting to
2644 *	write.  We do not want to starve the buffer cache with too many
2645 *	dirty buffers so we block here.  By blocking prior to the locking
2646 *	of any vnodes we attempt to avoid the situation where a locked vnode
2647 *	prevents the various system daemons from flushing related buffers.
2648 */
2649void
2650bwillwrite(void)
2651{
2652
2653	if (buf_dirty_count_severe()) {
2654		mtx_lock(&bdirtylock);
2655		while (buf_dirty_count_severe()) {
2656			bdirtywait = 1;
2657			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2658			    "flswai", 0);
2659		}
2660		mtx_unlock(&bdirtylock);
2661	}
2662}
2663
2664/*
2665 * Return true if we have too many dirty buffers.
2666 */
2667int
2668buf_dirty_count_severe(void)
2669{
2670
2671	return (!BIT_EMPTY(BUF_DOMAINS, &bdhidirty));
2672}
2673
2674/*
2675 *	brelse:
2676 *
2677 *	Release a busy buffer and, if requested, free its resources.  The
2678 *	buffer will be stashed in the appropriate bufqueue[] allowing it
2679 *	to be accessed later as a cache entity or reused for other purposes.
2680 */
2681void
2682brelse(struct buf *bp)
2683{
2684	struct mount *v_mnt;
2685	int qindex;
2686
2687	/*
2688	 * Many functions erroneously call brelse with a NULL bp under rare
2689	 * error conditions. Simply return when called with a NULL bp.
2690	 */
2691	if (bp == NULL)
2692		return;
2693	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2694	    bp, bp->b_vp, bp->b_flags);
2695	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2696	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2697	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2698	    ("brelse: non-VMIO buffer marked NOREUSE"));
2699
2700	if (BUF_LOCKRECURSED(bp)) {
2701		/*
2702		 * Do not process, in particular, do not handle the
2703		 * B_INVAL/B_RELBUF and do not release to free list.
2704		 */
2705		BUF_UNLOCK(bp);
2706		return;
2707	}
2708
2709	if (bp->b_flags & B_MANAGED) {
2710		bqrelse(bp);
2711		return;
2712	}
2713
2714	if (LIST_EMPTY(&bp->b_dep)) {
2715		bp->b_flags &= ~B_IOSTARTED;
2716	} else {
2717		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2718		    ("brelse: SU io not finished bp %p", bp));
2719	}
2720
2721	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2722		BO_LOCK(bp->b_bufobj);
2723		bp->b_vflags &= ~BV_BKGRDERR;
2724		BO_UNLOCK(bp->b_bufobj);
2725		bdirty(bp);
2726	}
2727
2728	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2729	    (bp->b_flags & B_INVALONERR)) {
2730		/*
2731		 * Forced invalidation of dirty buffer contents, to be used
2732		 * after a failed write in the rare case that the loss of the
2733		 * contents is acceptable.  The buffer is invalidated and
2734		 * freed.
2735		 */
2736		bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE;
2737		bp->b_flags &= ~(B_ASYNC | B_CACHE);
2738	}
2739
2740	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2741	    (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2742	    !(bp->b_flags & B_INVAL)) {
2743		/*
2744		 * Failed write, redirty.  All errors except ENXIO (which
2745		 * means the device is gone) are treated as being
2746		 * transient.
2747		 *
2748		 * XXX Treating EIO as transient is not correct; the
2749		 * contract with the local storage device drivers is that
2750		 * they will only return EIO once the I/O is no longer
2751		 * retriable.  Network I/O also respects this through the
2752		 * guarantees of TCP and/or the internal retries of NFS.
2753		 * ENOMEM might be transient, but we also have no way of
2754		 * knowing when its ok to retry/reschedule.  In general,
2755		 * this entire case should be made obsolete through better
2756		 * error handling/recovery and resource scheduling.
2757		 *
2758		 * Do this also for buffers that failed with ENXIO, but have
2759		 * non-empty dependencies - the soft updates code might need
2760		 * to access the buffer to untangle them.
2761		 *
2762		 * Must clear BIO_ERROR to prevent pages from being scrapped.
2763		 */
2764		bp->b_ioflags &= ~BIO_ERROR;
2765		bdirty(bp);
2766	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2767	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2768		/*
2769		 * Either a failed read I/O, or we were asked to free or not
2770		 * cache the buffer, or we failed to write to a device that's
2771		 * no longer present.
2772		 */
2773		bp->b_flags |= B_INVAL;
2774		if (!LIST_EMPTY(&bp->b_dep))
2775			buf_deallocate(bp);
2776		if (bp->b_flags & B_DELWRI)
2777			bdirtysub(bp);
2778		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2779		if ((bp->b_flags & B_VMIO) == 0) {
2780			allocbuf(bp, 0);
2781			if (bp->b_vp)
2782				brelvp(bp);
2783		}
2784	}
2785
2786	/*
2787	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2788	 * is called with B_DELWRI set, the underlying pages may wind up
2789	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2790	 * because pages associated with a B_DELWRI bp are marked clean.
2791	 *
2792	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2793	 * if B_DELWRI is set.
2794	 */
2795	if (bp->b_flags & B_DELWRI)
2796		bp->b_flags &= ~B_RELBUF;
2797
2798	/*
2799	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2800	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2801	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2802	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2803	 *
2804	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2805	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2806	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2807	 *
2808	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2809	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2810	 * the commit state and we cannot afford to lose the buffer. If the
2811	 * buffer has a background write in progress, we need to keep it
2812	 * around to prevent it from being reconstituted and starting a second
2813	 * background write.
2814	 */
2815
2816	v_mnt = bp->b_vp != NULL ? bp->b_vp->v_mount : NULL;
2817
2818	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2819	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2820	    (v_mnt == NULL || (v_mnt->mnt_vfc->vfc_flags & VFCF_NETWORK) == 0 ||
2821	    vn_isdisk(bp->b_vp) || (bp->b_flags & B_DELWRI) == 0)) {
2822		vfs_vmio_invalidate(bp);
2823		allocbuf(bp, 0);
2824	}
2825
2826	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2827	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2828		allocbuf(bp, 0);
2829		bp->b_flags &= ~B_NOREUSE;
2830		if (bp->b_vp != NULL)
2831			brelvp(bp);
2832	}
2833
2834	/*
2835	 * If the buffer has junk contents signal it and eventually
2836	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2837	 * doesn't find it.
2838	 */
2839	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2840	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2841		bp->b_flags |= B_INVAL;
2842	if (bp->b_flags & B_INVAL) {
2843		if (bp->b_flags & B_DELWRI)
2844			bundirty(bp);
2845		if (bp->b_vp)
2846			brelvp(bp);
2847	}
2848
2849	buf_track(bp, __func__);
2850
2851	/* buffers with no memory */
2852	if (bp->b_bufsize == 0) {
2853		buf_free(bp);
2854		return;
2855	}
2856	/* buffers with junk contents */
2857	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2858	    (bp->b_ioflags & BIO_ERROR)) {
2859		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2860		if (bp->b_vflags & BV_BKGRDINPROG)
2861			panic("losing buffer 2");
2862		qindex = QUEUE_CLEAN;
2863		bp->b_flags |= B_AGE;
2864	/* remaining buffers */
2865	} else if (bp->b_flags & B_DELWRI)
2866		qindex = QUEUE_DIRTY;
2867	else
2868		qindex = QUEUE_CLEAN;
2869
2870	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2871		panic("brelse: not dirty");
2872
2873	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT);
2874	bp->b_xflags &= ~(BX_CVTENXIO);
2875	/* binsfree unlocks bp. */
2876	binsfree(bp, qindex);
2877}
2878
2879/*
2880 * Release a buffer back to the appropriate queue but do not try to free
2881 * it.  The buffer is expected to be used again soon.
2882 *
2883 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2884 * biodone() to requeue an async I/O on completion.  It is also used when
2885 * known good buffers need to be requeued but we think we may need the data
2886 * again soon.
2887 *
2888 * XXX we should be able to leave the B_RELBUF hint set on completion.
2889 */
2890void
2891bqrelse(struct buf *bp)
2892{
2893	int qindex;
2894
2895	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2896	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2897	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2898
2899	qindex = QUEUE_NONE;
2900	if (BUF_LOCKRECURSED(bp)) {
2901		/* do not release to free list */
2902		BUF_UNLOCK(bp);
2903		return;
2904	}
2905	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2906	bp->b_xflags &= ~(BX_CVTENXIO);
2907
2908	if (LIST_EMPTY(&bp->b_dep)) {
2909		bp->b_flags &= ~B_IOSTARTED;
2910	} else {
2911		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2912		    ("bqrelse: SU io not finished bp %p", bp));
2913	}
2914
2915	if (bp->b_flags & B_MANAGED) {
2916		if (bp->b_flags & B_REMFREE)
2917			bremfreef(bp);
2918		goto out;
2919	}
2920
2921	/* buffers with stale but valid contents */
2922	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2923	    BV_BKGRDERR)) == BV_BKGRDERR) {
2924		BO_LOCK(bp->b_bufobj);
2925		bp->b_vflags &= ~BV_BKGRDERR;
2926		BO_UNLOCK(bp->b_bufobj);
2927		qindex = QUEUE_DIRTY;
2928	} else {
2929		if ((bp->b_flags & B_DELWRI) == 0 &&
2930		    (bp->b_xflags & BX_VNDIRTY))
2931			panic("bqrelse: not dirty");
2932		if ((bp->b_flags & B_NOREUSE) != 0) {
2933			brelse(bp);
2934			return;
2935		}
2936		qindex = QUEUE_CLEAN;
2937	}
2938	buf_track(bp, __func__);
2939	/* binsfree unlocks bp. */
2940	binsfree(bp, qindex);
2941	return;
2942
2943out:
2944	buf_track(bp, __func__);
2945	/* unlock */
2946	BUF_UNLOCK(bp);
2947}
2948
2949/*
2950 * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2951 * restore bogus pages.
2952 */
2953static void
2954vfs_vmio_iodone(struct buf *bp)
2955{
2956	vm_ooffset_t foff;
2957	vm_page_t m;
2958	vm_object_t obj;
2959	struct vnode *vp __unused;
2960	int i, iosize, resid;
2961	bool bogus;
2962
2963	obj = bp->b_bufobj->bo_object;
2964	KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages,
2965	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2966	    blockcount_read(&obj->paging_in_progress), bp->b_npages));
2967
2968	vp = bp->b_vp;
2969	VNPASS(vp->v_holdcnt > 0, vp);
2970	VNPASS(vp->v_object != NULL, vp);
2971
2972	foff = bp->b_offset;
2973	KASSERT(bp->b_offset != NOOFFSET,
2974	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2975
2976	bogus = false;
2977	iosize = bp->b_bcount - bp->b_resid;
2978	for (i = 0; i < bp->b_npages; i++) {
2979		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2980		if (resid > iosize)
2981			resid = iosize;
2982
2983		/*
2984		 * cleanup bogus pages, restoring the originals
2985		 */
2986		m = bp->b_pages[i];
2987		if (m == bogus_page) {
2988			bogus = true;
2989			m = vm_page_relookup(obj, OFF_TO_IDX(foff));
2990			if (m == NULL)
2991				panic("biodone: page disappeared!");
2992			bp->b_pages[i] = m;
2993		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2994			/*
2995			 * In the write case, the valid and clean bits are
2996			 * already changed correctly ( see bdwrite() ), so we
2997			 * only need to do this here in the read case.
2998			 */
2999			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
3000			    resid)) == 0, ("vfs_vmio_iodone: page %p "
3001			    "has unexpected dirty bits", m));
3002			vfs_page_set_valid(bp, foff, m);
3003		}
3004		KASSERT(OFF_TO_IDX(foff) == m->pindex,
3005		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
3006		    (intmax_t)foff, (uintmax_t)m->pindex));
3007
3008		vm_page_sunbusy(m);
3009		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3010		iosize -= resid;
3011	}
3012	vm_object_pip_wakeupn(obj, bp->b_npages);
3013	if (bogus && buf_mapped(bp)) {
3014		BUF_CHECK_MAPPED(bp);
3015		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3016		    bp->b_pages, bp->b_npages);
3017	}
3018}
3019
3020/*
3021 * Perform page invalidation when a buffer is released.  The fully invalid
3022 * pages will be reclaimed later in vfs_vmio_truncate().
3023 */
3024static void
3025vfs_vmio_invalidate(struct buf *bp)
3026{
3027	vm_object_t obj;
3028	vm_page_t m;
3029	int flags, i, resid, poffset, presid;
3030
3031	if (buf_mapped(bp)) {
3032		BUF_CHECK_MAPPED(bp);
3033		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
3034	} else
3035		BUF_CHECK_UNMAPPED(bp);
3036	/*
3037	 * Get the base offset and length of the buffer.  Note that
3038	 * in the VMIO case if the buffer block size is not
3039	 * page-aligned then b_data pointer may not be page-aligned.
3040	 * But our b_pages[] array *IS* page aligned.
3041	 *
3042	 * block sizes less then DEV_BSIZE (usually 512) are not
3043	 * supported due to the page granularity bits (m->valid,
3044	 * m->dirty, etc...).
3045	 *
3046	 * See man buf(9) for more information
3047	 */
3048	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3049	obj = bp->b_bufobj->bo_object;
3050	resid = bp->b_bufsize;
3051	poffset = bp->b_offset & PAGE_MASK;
3052	VM_OBJECT_WLOCK(obj);
3053	for (i = 0; i < bp->b_npages; i++) {
3054		m = bp->b_pages[i];
3055		if (m == bogus_page)
3056			panic("vfs_vmio_invalidate: Unexpected bogus page.");
3057		bp->b_pages[i] = NULL;
3058
3059		presid = resid > (PAGE_SIZE - poffset) ?
3060		    (PAGE_SIZE - poffset) : resid;
3061		KASSERT(presid >= 0, ("brelse: extra page"));
3062		vm_page_busy_acquire(m, VM_ALLOC_SBUSY);
3063		if (pmap_page_wired_mappings(m) == 0)
3064			vm_page_set_invalid(m, poffset, presid);
3065		vm_page_sunbusy(m);
3066		vm_page_release_locked(m, flags);
3067		resid -= presid;
3068		poffset = 0;
3069	}
3070	VM_OBJECT_WUNLOCK(obj);
3071	bp->b_npages = 0;
3072}
3073
3074/*
3075 * Page-granular truncation of an existing VMIO buffer.
3076 */
3077static void
3078vfs_vmio_truncate(struct buf *bp, int desiredpages)
3079{
3080	vm_object_t obj;
3081	vm_page_t m;
3082	int flags, i;
3083
3084	if (bp->b_npages == desiredpages)
3085		return;
3086
3087	if (buf_mapped(bp)) {
3088		BUF_CHECK_MAPPED(bp);
3089		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
3090		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
3091	} else
3092		BUF_CHECK_UNMAPPED(bp);
3093
3094	/*
3095	 * The object lock is needed only if we will attempt to free pages.
3096	 */
3097	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3098	if ((bp->b_flags & B_DIRECT) != 0) {
3099		flags |= VPR_TRYFREE;
3100		obj = bp->b_bufobj->bo_object;
3101		VM_OBJECT_WLOCK(obj);
3102	} else {
3103		obj = NULL;
3104	}
3105	for (i = desiredpages; i < bp->b_npages; i++) {
3106		m = bp->b_pages[i];
3107		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
3108		bp->b_pages[i] = NULL;
3109		if (obj != NULL)
3110			vm_page_release_locked(m, flags);
3111		else
3112			vm_page_release(m, flags);
3113	}
3114	if (obj != NULL)
3115		VM_OBJECT_WUNLOCK(obj);
3116	bp->b_npages = desiredpages;
3117}
3118
3119/*
3120 * Byte granular extension of VMIO buffers.
3121 */
3122static void
3123vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
3124{
3125	/*
3126	 * We are growing the buffer, possibly in a
3127	 * byte-granular fashion.
3128	 */
3129	vm_object_t obj;
3130	vm_offset_t toff;
3131	vm_offset_t tinc;
3132	vm_page_t m;
3133
3134	/*
3135	 * Step 1, bring in the VM pages from the object, allocating
3136	 * them if necessary.  We must clear B_CACHE if these pages
3137	 * are not valid for the range covered by the buffer.
3138	 */
3139	obj = bp->b_bufobj->bo_object;
3140	if (bp->b_npages < desiredpages) {
3141		KASSERT(desiredpages <= atop(maxbcachebuf),
3142		    ("vfs_vmio_extend past maxbcachebuf %p %d %u",
3143		    bp, desiredpages, maxbcachebuf));
3144
3145		/*
3146		 * We must allocate system pages since blocking
3147		 * here could interfere with paging I/O, no
3148		 * matter which process we are.
3149		 *
3150		 * Only exclusive busy can be tested here.
3151		 * Blocking on shared busy might lead to
3152		 * deadlocks once allocbuf() is called after
3153		 * pages are vfs_busy_pages().
3154		 */
3155		(void)vm_page_grab_pages_unlocked(obj,
3156		    OFF_TO_IDX(bp->b_offset) + bp->b_npages,
3157		    VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
3158		    VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
3159		    &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
3160		bp->b_npages = desiredpages;
3161	}
3162
3163	/*
3164	 * Step 2.  We've loaded the pages into the buffer,
3165	 * we have to figure out if we can still have B_CACHE
3166	 * set.  Note that B_CACHE is set according to the
3167	 * byte-granular range ( bcount and size ), not the
3168	 * aligned range ( newbsize ).
3169	 *
3170	 * The VM test is against m->valid, which is DEV_BSIZE
3171	 * aligned.  Needless to say, the validity of the data
3172	 * needs to also be DEV_BSIZE aligned.  Note that this
3173	 * fails with NFS if the server or some other client
3174	 * extends the file's EOF.  If our buffer is resized,
3175	 * B_CACHE may remain set! XXX
3176	 */
3177	toff = bp->b_bcount;
3178	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3179	while ((bp->b_flags & B_CACHE) && toff < size) {
3180		vm_pindex_t pi;
3181
3182		if (tinc > (size - toff))
3183			tinc = size - toff;
3184		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
3185		m = bp->b_pages[pi];
3186		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
3187		toff += tinc;
3188		tinc = PAGE_SIZE;
3189	}
3190
3191	/*
3192	 * Step 3, fixup the KVA pmap.
3193	 */
3194	if (buf_mapped(bp))
3195		bpmap_qenter(bp);
3196	else
3197		BUF_CHECK_UNMAPPED(bp);
3198}
3199
3200/*
3201 * Check to see if a block at a particular lbn is available for a clustered
3202 * write.
3203 */
3204static int
3205vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
3206{
3207	struct buf *bpa;
3208	int match;
3209
3210	match = 0;
3211
3212	/* If the buf isn't in core skip it */
3213	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
3214		return (0);
3215
3216	/* If the buf is busy we don't want to wait for it */
3217	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
3218		return (0);
3219
3220	/* Only cluster with valid clusterable delayed write buffers */
3221	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
3222	    (B_DELWRI | B_CLUSTEROK))
3223		goto done;
3224
3225	if (bpa->b_bufsize != size)
3226		goto done;
3227
3228	/*
3229	 * Check to see if it is in the expected place on disk and that the
3230	 * block has been mapped.
3231	 */
3232	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
3233		match = 1;
3234done:
3235	BUF_UNLOCK(bpa);
3236	return (match);
3237}
3238
3239/*
3240 *	vfs_bio_awrite:
3241 *
3242 *	Implement clustered async writes for clearing out B_DELWRI buffers.
3243 *	This is much better then the old way of writing only one buffer at
3244 *	a time.  Note that we may not be presented with the buffers in the
3245 *	correct order, so we search for the cluster in both directions.
3246 */
3247int
3248vfs_bio_awrite(struct buf *bp)
3249{
3250	struct bufobj *bo;
3251	int i;
3252	int j;
3253	daddr_t lblkno = bp->b_lblkno;
3254	struct vnode *vp = bp->b_vp;
3255	int ncl;
3256	int nwritten;
3257	int size;
3258	int maxcl;
3259	int gbflags;
3260
3261	bo = &vp->v_bufobj;
3262	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
3263	/*
3264	 * right now we support clustered writing only to regular files.  If
3265	 * we find a clusterable block we could be in the middle of a cluster
3266	 * rather then at the beginning.
3267	 */
3268	if ((vp->v_type == VREG) &&
3269	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
3270	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
3271		size = vp->v_mount->mnt_stat.f_iosize;
3272		maxcl = maxphys / size;
3273
3274		BO_RLOCK(bo);
3275		for (i = 1; i < maxcl; i++)
3276			if (vfs_bio_clcheck(vp, size, lblkno + i,
3277			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
3278				break;
3279
3280		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
3281			if (vfs_bio_clcheck(vp, size, lblkno - j,
3282			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
3283				break;
3284		BO_RUNLOCK(bo);
3285		--j;
3286		ncl = i + j;
3287		/*
3288		 * this is a possible cluster write
3289		 */
3290		if (ncl != 1) {
3291			BUF_UNLOCK(bp);
3292			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
3293			    gbflags);
3294			return (nwritten);
3295		}
3296	}
3297	bremfree(bp);
3298	bp->b_flags |= B_ASYNC;
3299	/*
3300	 * default (old) behavior, writing out only one block
3301	 *
3302	 * XXX returns b_bufsize instead of b_bcount for nwritten?
3303	 */
3304	nwritten = bp->b_bufsize;
3305	(void) bwrite(bp);
3306
3307	return (nwritten);
3308}
3309
3310/*
3311 *	getnewbuf_kva:
3312 *
3313 *	Allocate KVA for an empty buf header according to gbflags.
3314 */
3315static int
3316getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
3317{
3318
3319	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
3320		/*
3321		 * In order to keep fragmentation sane we only allocate kva
3322		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
3323		 */
3324		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
3325
3326		if (maxsize != bp->b_kvasize &&
3327		    bufkva_alloc(bp, maxsize, gbflags))
3328			return (ENOSPC);
3329	}
3330	return (0);
3331}
3332
3333/*
3334 *	getnewbuf:
3335 *
3336 *	Find and initialize a new buffer header, freeing up existing buffers
3337 *	in the bufqueues as necessary.  The new buffer is returned locked.
3338 *
3339 *	We block if:
3340 *		We have insufficient buffer headers
3341 *		We have insufficient buffer space
3342 *		buffer_arena is too fragmented ( space reservation fails )
3343 *		If we have to flush dirty buffers ( but we try to avoid this )
3344 *
3345 *	The caller is responsible for releasing the reserved bufspace after
3346 *	allocbuf() is called.
3347 */
3348static struct buf *
3349getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
3350{
3351	struct bufdomain *bd;
3352	struct buf *bp;
3353	bool metadata, reserved;
3354
3355	bp = NULL;
3356	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3357	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3358	if (!unmapped_buf_allowed)
3359		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3360
3361	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
3362	    vp->v_type == VCHR)
3363		metadata = true;
3364	else
3365		metadata = false;
3366	if (vp == NULL)
3367		bd = &bdomain[0];
3368	else
3369		bd = &bdomain[vp->v_bufobj.bo_domain];
3370
3371	counter_u64_add(getnewbufcalls, 1);
3372	reserved = false;
3373	do {
3374		if (reserved == false &&
3375		    bufspace_reserve(bd, maxsize, metadata) != 0) {
3376			counter_u64_add(getnewbufrestarts, 1);
3377			continue;
3378		}
3379		reserved = true;
3380		if ((bp = buf_alloc(bd)) == NULL) {
3381			counter_u64_add(getnewbufrestarts, 1);
3382			continue;
3383		}
3384		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
3385			return (bp);
3386		break;
3387	} while (buf_recycle(bd, false) == 0);
3388
3389	if (reserved)
3390		bufspace_release(bd, maxsize);
3391	if (bp != NULL) {
3392		bp->b_flags |= B_INVAL;
3393		brelse(bp);
3394	}
3395	bufspace_wait(bd, vp, gbflags, slpflag, slptimeo);
3396
3397	return (NULL);
3398}
3399
3400/*
3401 *	buf_daemon:
3402 *
3403 *	buffer flushing daemon.  Buffers are normally flushed by the
3404 *	update daemon but if it cannot keep up this process starts to
3405 *	take the load in an attempt to prevent getnewbuf() from blocking.
3406 */
3407static struct kproc_desc buf_kp = {
3408	"bufdaemon",
3409	buf_daemon,
3410	&bufdaemonproc
3411};
3412SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
3413
3414static int
3415buf_flush(struct vnode *vp, struct bufdomain *bd, int target)
3416{
3417	int flushed;
3418
3419	flushed = flushbufqueues(vp, bd, target, 0);
3420	if (flushed == 0) {
3421		/*
3422		 * Could not find any buffers without rollback
3423		 * dependencies, so just write the first one
3424		 * in the hopes of eventually making progress.
3425		 */
3426		if (vp != NULL && target > 2)
3427			target /= 2;
3428		flushbufqueues(vp, bd, target, 1);
3429	}
3430	return (flushed);
3431}
3432
3433static void
3434buf_daemon_shutdown(void *arg __unused, int howto __unused)
3435{
3436	int error;
3437
3438	if (KERNEL_PANICKED())
3439		return;
3440
3441	mtx_lock(&bdlock);
3442	bd_shutdown = true;
3443	wakeup(&bd_request);
3444	error = msleep(&bd_shutdown, &bdlock, 0, "buf_daemon_shutdown",
3445	    60 * hz);
3446	mtx_unlock(&bdlock);
3447	if (error != 0)
3448		printf("bufdaemon wait error: %d\n", error);
3449}
3450
3451static void
3452buf_daemon(void)
3453{
3454	struct bufdomain *bd;
3455	int speedupreq;
3456	int lodirty;
3457	int i;
3458
3459	/*
3460	 * This process needs to be suspended prior to shutdown sync.
3461	 */
3462	EVENTHANDLER_REGISTER(shutdown_pre_sync, buf_daemon_shutdown, NULL,
3463	    SHUTDOWN_PRI_LAST + 100);
3464
3465	/*
3466	 * Start the buf clean daemons as children threads.
3467	 */
3468	for (i = 0 ; i < buf_domains; i++) {
3469		int error;
3470
3471		error = kthread_add((void (*)(void *))bufspace_daemon,
3472		    &bdomain[i], curproc, NULL, 0, 0, "bufspacedaemon-%d", i);
3473		if (error)
3474			panic("error %d spawning bufspace daemon", error);
3475	}
3476
3477	/*
3478	 * This process is allowed to take the buffer cache to the limit
3479	 */
3480	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3481	mtx_lock(&bdlock);
3482	while (!bd_shutdown) {
3483		bd_request = 0;
3484		mtx_unlock(&bdlock);
3485
3486		/*
3487		 * Save speedupreq for this pass and reset to capture new
3488		 * requests.
3489		 */
3490		speedupreq = bd_speedupreq;
3491		bd_speedupreq = 0;
3492
3493		/*
3494		 * Flush each domain sequentially according to its level and
3495		 * the speedup request.
3496		 */
3497		for (i = 0; i < buf_domains; i++) {
3498			bd = &bdomain[i];
3499			if (speedupreq)
3500				lodirty = bd->bd_numdirtybuffers / 2;
3501			else
3502				lodirty = bd->bd_lodirtybuffers;
3503			while (bd->bd_numdirtybuffers > lodirty) {
3504				if (buf_flush(NULL, bd,
3505				    bd->bd_numdirtybuffers - lodirty) == 0)
3506					break;
3507				kern_yield(PRI_USER);
3508			}
3509		}
3510
3511		/*
3512		 * Only clear bd_request if we have reached our low water
3513		 * mark.  The buf_daemon normally waits 1 second and
3514		 * then incrementally flushes any dirty buffers that have
3515		 * built up, within reason.
3516		 *
3517		 * If we were unable to hit our low water mark and couldn't
3518		 * find any flushable buffers, we sleep for a short period
3519		 * to avoid endless loops on unlockable buffers.
3520		 */
3521		mtx_lock(&bdlock);
3522		if (bd_shutdown)
3523			break;
3524		if (BIT_EMPTY(BUF_DOMAINS, &bdlodirty)) {
3525			/*
3526			 * We reached our low water mark, reset the
3527			 * request and sleep until we are needed again.
3528			 * The sleep is just so the suspend code works.
3529			 */
3530			bd_request = 0;
3531			/*
3532			 * Do an extra wakeup in case dirty threshold
3533			 * changed via sysctl and the explicit transition
3534			 * out of shortfall was missed.
3535			 */
3536			bdirtywakeup();
3537			if (runningbufspace <= lorunningspace)
3538				runningwakeup();
3539			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3540		} else {
3541			/*
3542			 * We couldn't find any flushable dirty buffers but
3543			 * still have too many dirty buffers, we
3544			 * have to sleep and try again.  (rare)
3545			 */
3546			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3547		}
3548	}
3549	wakeup(&bd_shutdown);
3550	mtx_unlock(&bdlock);
3551	kthread_exit();
3552}
3553
3554/*
3555 *	flushbufqueues:
3556 *
3557 *	Try to flush a buffer in the dirty queue.  We must be careful to
3558 *	free up B_INVAL buffers instead of write them, which NFS is
3559 *	particularly sensitive to.
3560 */
3561static int flushwithdeps = 0;
3562SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW | CTLFLAG_STATS,
3563    &flushwithdeps, 0,
3564    "Number of buffers flushed with dependencies that require rollbacks");
3565
3566static int
3567flushbufqueues(struct vnode *lvp, struct bufdomain *bd, int target,
3568    int flushdeps)
3569{
3570	struct bufqueue *bq;
3571	struct buf *sentinel;
3572	struct vnode *vp;
3573	struct mount *mp;
3574	struct buf *bp;
3575	int hasdeps;
3576	int flushed;
3577	int error;
3578	bool unlock;
3579
3580	flushed = 0;
3581	bq = &bd->bd_dirtyq;
3582	bp = NULL;
3583	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3584	sentinel->b_qindex = QUEUE_SENTINEL;
3585	BQ_LOCK(bq);
3586	TAILQ_INSERT_HEAD(&bq->bq_queue, sentinel, b_freelist);
3587	BQ_UNLOCK(bq);
3588	while (flushed != target) {
3589		maybe_yield();
3590		BQ_LOCK(bq);
3591		bp = TAILQ_NEXT(sentinel, b_freelist);
3592		if (bp != NULL) {
3593			TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3594			TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel,
3595			    b_freelist);
3596		} else {
3597			BQ_UNLOCK(bq);
3598			break;
3599		}
3600		/*
3601		 * Skip sentinels inserted by other invocations of the
3602		 * flushbufqueues(), taking care to not reorder them.
3603		 *
3604		 * Only flush the buffers that belong to the
3605		 * vnode locked by the curthread.
3606		 */
3607		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3608		    bp->b_vp != lvp)) {
3609			BQ_UNLOCK(bq);
3610			continue;
3611		}
3612		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3613		BQ_UNLOCK(bq);
3614		if (error != 0)
3615			continue;
3616
3617		/*
3618		 * BKGRDINPROG can only be set with the buf and bufobj
3619		 * locks both held.  We tolerate a race to clear it here.
3620		 */
3621		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3622		    (bp->b_flags & B_DELWRI) == 0) {
3623			BUF_UNLOCK(bp);
3624			continue;
3625		}
3626		if (bp->b_flags & B_INVAL) {
3627			bremfreef(bp);
3628			brelse(bp);
3629			flushed++;
3630			continue;
3631		}
3632
3633		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3634			if (flushdeps == 0) {
3635				BUF_UNLOCK(bp);
3636				continue;
3637			}
3638			hasdeps = 1;
3639		} else
3640			hasdeps = 0;
3641		/*
3642		 * We must hold the lock on a vnode before writing
3643		 * one of its buffers. Otherwise we may confuse, or
3644		 * in the case of a snapshot vnode, deadlock the
3645		 * system.
3646		 *
3647		 * The lock order here is the reverse of the normal
3648		 * of vnode followed by buf lock.  This is ok because
3649		 * the NOWAIT will prevent deadlock.
3650		 */
3651		vp = bp->b_vp;
3652		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3653			BUF_UNLOCK(bp);
3654			continue;
3655		}
3656		if (lvp == NULL) {
3657			unlock = true;
3658			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3659		} else {
3660			ASSERT_VOP_LOCKED(vp, "getbuf");
3661			unlock = false;
3662			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3663			    vn_lock(vp, LK_TRYUPGRADE);
3664		}
3665		if (error == 0) {
3666			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3667			    bp, bp->b_vp, bp->b_flags);
3668			if (curproc == bufdaemonproc) {
3669				vfs_bio_awrite(bp);
3670			} else {
3671				bremfree(bp);
3672				bwrite(bp);
3673				counter_u64_add(notbufdflushes, 1);
3674			}
3675			vn_finished_write(mp);
3676			if (unlock)
3677				VOP_UNLOCK(vp);
3678			flushwithdeps += hasdeps;
3679			flushed++;
3680
3681			/*
3682			 * Sleeping on runningbufspace while holding
3683			 * vnode lock leads to deadlock.
3684			 */
3685			if (curproc == bufdaemonproc &&
3686			    runningbufspace > hirunningspace)
3687				waitrunningbufspace();
3688			continue;
3689		}
3690		vn_finished_write(mp);
3691		BUF_UNLOCK(bp);
3692	}
3693	BQ_LOCK(bq);
3694	TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3695	BQ_UNLOCK(bq);
3696	free(sentinel, M_TEMP);
3697	return (flushed);
3698}
3699
3700/*
3701 * Check to see if a block is currently memory resident.
3702 */
3703struct buf *
3704incore(struct bufobj *bo, daddr_t blkno)
3705{
3706	return (gbincore_unlocked(bo, blkno));
3707}
3708
3709/*
3710 * Returns true if no I/O is needed to access the
3711 * associated VM object.  This is like incore except
3712 * it also hunts around in the VM system for the data.
3713 */
3714bool
3715inmem(struct vnode * vp, daddr_t blkno)
3716{
3717	vm_object_t obj;
3718	vm_offset_t toff, tinc, size;
3719	vm_page_t m, n;
3720	vm_ooffset_t off;
3721	int valid;
3722
3723	ASSERT_VOP_LOCKED(vp, "inmem");
3724
3725	if (incore(&vp->v_bufobj, blkno))
3726		return (true);
3727	if (vp->v_mount == NULL)
3728		return (false);
3729	obj = vp->v_object;
3730	if (obj == NULL)
3731		return (false);
3732
3733	size = PAGE_SIZE;
3734	if (size > vp->v_mount->mnt_stat.f_iosize)
3735		size = vp->v_mount->mnt_stat.f_iosize;
3736	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3737
3738	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3739		m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3740recheck:
3741		if (m == NULL)
3742			return (false);
3743
3744		tinc = size;
3745		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3746			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3747		/*
3748		 * Consider page validity only if page mapping didn't change
3749		 * during the check.
3750		 */
3751		valid = vm_page_is_valid(m,
3752		    (vm_offset_t)((toff + off) & PAGE_MASK), tinc);
3753		n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3754		if (m != n) {
3755			m = n;
3756			goto recheck;
3757		}
3758		if (!valid)
3759			return (false);
3760	}
3761	return (true);
3762}
3763
3764/*
3765 * Set the dirty range for a buffer based on the status of the dirty
3766 * bits in the pages comprising the buffer.  The range is limited
3767 * to the size of the buffer.
3768 *
3769 * Tell the VM system that the pages associated with this buffer
3770 * are clean.  This is used for delayed writes where the data is
3771 * going to go to disk eventually without additional VM intevention.
3772 *
3773 * Note that while we only really need to clean through to b_bcount, we
3774 * just go ahead and clean through to b_bufsize.
3775 */
3776static void
3777vfs_clean_pages_dirty_buf(struct buf *bp)
3778{
3779	vm_ooffset_t foff, noff, eoff;
3780	vm_page_t m;
3781	int i;
3782
3783	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3784		return;
3785
3786	foff = bp->b_offset;
3787	KASSERT(bp->b_offset != NOOFFSET,
3788	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3789
3790	vfs_busy_pages_acquire(bp);
3791	vfs_setdirty_range(bp);
3792	for (i = 0; i < bp->b_npages; i++) {
3793		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3794		eoff = noff;
3795		if (eoff > bp->b_offset + bp->b_bufsize)
3796			eoff = bp->b_offset + bp->b_bufsize;
3797		m = bp->b_pages[i];
3798		vfs_page_set_validclean(bp, foff, m);
3799		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3800		foff = noff;
3801	}
3802	vfs_busy_pages_release(bp);
3803}
3804
3805static void
3806vfs_setdirty_range(struct buf *bp)
3807{
3808	vm_offset_t boffset;
3809	vm_offset_t eoffset;
3810	int i;
3811
3812	/*
3813	 * test the pages to see if they have been modified directly
3814	 * by users through the VM system.
3815	 */
3816	for (i = 0; i < bp->b_npages; i++)
3817		vm_page_test_dirty(bp->b_pages[i]);
3818
3819	/*
3820	 * Calculate the encompassing dirty range, boffset and eoffset,
3821	 * (eoffset - boffset) bytes.
3822	 */
3823
3824	for (i = 0; i < bp->b_npages; i++) {
3825		if (bp->b_pages[i]->dirty)
3826			break;
3827	}
3828	boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3829
3830	for (i = bp->b_npages - 1; i >= 0; --i) {
3831		if (bp->b_pages[i]->dirty) {
3832			break;
3833		}
3834	}
3835	eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3836
3837	/*
3838	 * Fit it to the buffer.
3839	 */
3840
3841	if (eoffset > bp->b_bcount)
3842		eoffset = bp->b_bcount;
3843
3844	/*
3845	 * If we have a good dirty range, merge with the existing
3846	 * dirty range.
3847	 */
3848
3849	if (boffset < eoffset) {
3850		if (bp->b_dirtyoff > boffset)
3851			bp->b_dirtyoff = boffset;
3852		if (bp->b_dirtyend < eoffset)
3853			bp->b_dirtyend = eoffset;
3854	}
3855}
3856
3857/*
3858 * Allocate the KVA mapping for an existing buffer.
3859 * If an unmapped buffer is provided but a mapped buffer is requested, take
3860 * also care to properly setup mappings between pages and KVA.
3861 */
3862static void
3863bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3864{
3865	int bsize, maxsize, need_mapping, need_kva;
3866	off_t offset;
3867
3868	need_mapping = bp->b_data == unmapped_buf &&
3869	    (gbflags & GB_UNMAPPED) == 0;
3870	need_kva = bp->b_kvabase == unmapped_buf &&
3871	    bp->b_data == unmapped_buf &&
3872	    (gbflags & GB_KVAALLOC) != 0;
3873	if (!need_mapping && !need_kva)
3874		return;
3875
3876	BUF_CHECK_UNMAPPED(bp);
3877
3878	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3879		/*
3880		 * Buffer is not mapped, but the KVA was already
3881		 * reserved at the time of the instantiation.  Use the
3882		 * allocated space.
3883		 */
3884		goto has_addr;
3885	}
3886
3887	/*
3888	 * Calculate the amount of the address space we would reserve
3889	 * if the buffer was mapped.
3890	 */
3891	bsize = vn_isdisk(bp->b_vp) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3892	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3893	offset = blkno * bsize;
3894	maxsize = size + (offset & PAGE_MASK);
3895	maxsize = imax(maxsize, bsize);
3896
3897	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3898		if ((gbflags & GB_NOWAIT_BD) != 0) {
3899			/*
3900			 * XXXKIB: defragmentation cannot
3901			 * succeed, not sure what else to do.
3902			 */
3903			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3904		}
3905		counter_u64_add(mappingrestarts, 1);
3906		bufspace_wait(bufdomain(bp), bp->b_vp, gbflags, 0, 0);
3907	}
3908has_addr:
3909	if (need_mapping) {
3910		/* b_offset is handled by bpmap_qenter. */
3911		bp->b_data = bp->b_kvabase;
3912		BUF_CHECK_MAPPED(bp);
3913		bpmap_qenter(bp);
3914	}
3915}
3916
3917struct buf *
3918getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3919    int flags)
3920{
3921	struct buf *bp;
3922	int error;
3923
3924	error = getblkx(vp, blkno, blkno, size, slpflag, slptimeo, flags, &bp);
3925	if (error != 0)
3926		return (NULL);
3927	return (bp);
3928}
3929
3930/*
3931 *	getblkx:
3932 *
3933 *	Get a block given a specified block and offset into a file/device.
3934 *	The buffers B_DONE bit will be cleared on return, making it almost
3935 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3936 *	return.  The caller should clear B_INVAL prior to initiating a
3937 *	READ.
3938 *
3939 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3940 *	an existing buffer.
3941 *
3942 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3943 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3944 *	and then cleared based on the backing VM.  If the previous buffer is
3945 *	non-0-sized but invalid, B_CACHE will be cleared.
3946 *
3947 *	If getblk() must create a new buffer, the new buffer is returned with
3948 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3949 *	case it is returned with B_INVAL clear and B_CACHE set based on the
3950 *	backing VM.
3951 *
3952 *	getblk() also forces a bwrite() for any B_DELWRI buffer whose
3953 *	B_CACHE bit is clear.
3954 *
3955 *	What this means, basically, is that the caller should use B_CACHE to
3956 *	determine whether the buffer is fully valid or not and should clear
3957 *	B_INVAL prior to issuing a read.  If the caller intends to validate
3958 *	the buffer by loading its data area with something, the caller needs
3959 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3960 *	the caller should set B_CACHE ( as an optimization ), else the caller
3961 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3962 *	a write attempt or if it was a successful read.  If the caller
3963 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3964 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3965 *
3966 *	The blkno parameter is the logical block being requested. Normally
3967 *	the mapping of logical block number to disk block address is done
3968 *	by calling VOP_BMAP(). However, if the mapping is already known, the
3969 *	disk block address can be passed using the dblkno parameter. If the
3970 *	disk block address is not known, then the same value should be passed
3971 *	for blkno and dblkno.
3972 */
3973int
3974getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag,
3975    int slptimeo, int flags, struct buf **bpp)
3976{
3977	struct buf *bp;
3978	struct bufobj *bo;
3979	daddr_t d_blkno;
3980	int bsize, error, maxsize, vmio;
3981	off_t offset;
3982
3983	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3984	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3985	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3986	if (vp->v_type != VCHR)
3987		ASSERT_VOP_LOCKED(vp, "getblk");
3988	if (size > maxbcachebuf) {
3989		printf("getblkx: size(%d) > maxbcachebuf(%d)\n", size,
3990		    maxbcachebuf);
3991		return (EIO);
3992	}
3993	if (!unmapped_buf_allowed)
3994		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3995
3996	bo = &vp->v_bufobj;
3997	d_blkno = dblkno;
3998
3999	/* Attempt lockless lookup first. */
4000	bp = gbincore_unlocked(bo, blkno);
4001	if (bp == NULL) {
4002		/*
4003		 * With GB_NOCREAT we must be sure about not finding the buffer
4004		 * as it may have been reassigned during unlocked lookup.
4005		 */
4006		if ((flags & GB_NOCREAT) != 0)
4007			goto loop;
4008		goto newbuf_unlocked;
4009	}
4010
4011	error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL, "getblku", 0,
4012	    0);
4013	if (error != 0) {
4014		KASSERT(error == EBUSY,
4015		    ("getblk: unexpected error %d from buf try-lock", error));
4016		/*
4017		 * We failed a buf try-lock.
4018		 *
4019		 * With GB_LOCK_NOWAIT, just return, rather than taking the
4020		 * bufobj interlock and trying again, since we would probably
4021		 * fail again anyway.  This is okay even if the buf's identity
4022		 * changed and we contended on the wrong lock, as changing
4023		 * identity itself requires the buf lock, and we could have
4024		 * contended on the right lock.
4025		 */
4026		if ((flags & GB_LOCK_NOWAIT) != 0)
4027			return (error);
4028		goto loop;
4029	}
4030
4031	/* Verify buf identify has not changed since lookup. */
4032	if (bp->b_bufobj == bo && bp->b_lblkno == blkno)
4033		goto foundbuf_fastpath;
4034
4035	/* It changed, fallback to locked lookup. */
4036	BUF_UNLOCK_RAW(bp);
4037
4038	/* As above, with GB_LOCK_NOWAIT, just return. */
4039	if ((flags & GB_LOCK_NOWAIT) != 0)
4040		return (EBUSY);
4041
4042loop:
4043	BO_RLOCK(bo);
4044	bp = gbincore(bo, blkno);
4045	if (bp != NULL) {
4046		int lockflags;
4047
4048		/*
4049		 * Buffer is in-core.  If the buffer is not busy nor managed,
4050		 * it must be on a queue.
4051		 */
4052		lockflags = LK_EXCLUSIVE | LK_INTERLOCK |
4053		    ((flags & GB_LOCK_NOWAIT) != 0 ? LK_NOWAIT : LK_SLEEPFAIL);
4054#ifdef WITNESS
4055		lockflags |= (flags & GB_NOWITNESS) != 0 ? LK_NOWITNESS : 0;
4056#endif
4057
4058		error = BUF_TIMELOCK(bp, lockflags,
4059		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
4060
4061		/*
4062		 * If we slept and got the lock we have to restart in case
4063		 * the buffer changed identities.
4064		 */
4065		if (error == ENOLCK)
4066			goto loop;
4067		/* We timed out or were interrupted. */
4068		else if (error != 0)
4069			return (error);
4070
4071foundbuf_fastpath:
4072		/* If recursed, assume caller knows the rules. */
4073		if (BUF_LOCKRECURSED(bp))
4074			goto end;
4075
4076		/*
4077		 * The buffer is locked.  B_CACHE is cleared if the buffer is
4078		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
4079		 * and for a VMIO buffer B_CACHE is adjusted according to the
4080		 * backing VM cache.
4081		 */
4082		if (bp->b_flags & B_INVAL)
4083			bp->b_flags &= ~B_CACHE;
4084		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
4085			bp->b_flags |= B_CACHE;
4086		if (bp->b_flags & B_MANAGED)
4087			MPASS(bp->b_qindex == QUEUE_NONE);
4088		else
4089			bremfree(bp);
4090
4091		/*
4092		 * check for size inconsistencies for non-VMIO case.
4093		 */
4094		if (bp->b_bcount != size) {
4095			if ((bp->b_flags & B_VMIO) == 0 ||
4096			    (size > bp->b_kvasize)) {
4097				if (bp->b_flags & B_DELWRI) {
4098					bp->b_flags |= B_NOCACHE;
4099					bwrite(bp);
4100				} else {
4101					if (LIST_EMPTY(&bp->b_dep)) {
4102						bp->b_flags |= B_RELBUF;
4103						brelse(bp);
4104					} else {
4105						bp->b_flags |= B_NOCACHE;
4106						bwrite(bp);
4107					}
4108				}
4109				goto loop;
4110			}
4111		}
4112
4113		/*
4114		 * Handle the case of unmapped buffer which should
4115		 * become mapped, or the buffer for which KVA
4116		 * reservation is requested.
4117		 */
4118		bp_unmapped_get_kva(bp, blkno, size, flags);
4119
4120		/*
4121		 * If the size is inconsistent in the VMIO case, we can resize
4122		 * the buffer.  This might lead to B_CACHE getting set or
4123		 * cleared.  If the size has not changed, B_CACHE remains
4124		 * unchanged from its previous state.
4125		 */
4126		allocbuf(bp, size);
4127
4128		KASSERT(bp->b_offset != NOOFFSET,
4129		    ("getblk: no buffer offset"));
4130
4131		/*
4132		 * A buffer with B_DELWRI set and B_CACHE clear must
4133		 * be committed before we can return the buffer in
4134		 * order to prevent the caller from issuing a read
4135		 * ( due to B_CACHE not being set ) and overwriting
4136		 * it.
4137		 *
4138		 * Most callers, including NFS and FFS, need this to
4139		 * operate properly either because they assume they
4140		 * can issue a read if B_CACHE is not set, or because
4141		 * ( for example ) an uncached B_DELWRI might loop due
4142		 * to softupdates re-dirtying the buffer.  In the latter
4143		 * case, B_CACHE is set after the first write completes,
4144		 * preventing further loops.
4145		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
4146		 * above while extending the buffer, we cannot allow the
4147		 * buffer to remain with B_CACHE set after the write
4148		 * completes or it will represent a corrupt state.  To
4149		 * deal with this we set B_NOCACHE to scrap the buffer
4150		 * after the write.
4151		 *
4152		 * We might be able to do something fancy, like setting
4153		 * B_CACHE in bwrite() except if B_DELWRI is already set,
4154		 * so the below call doesn't set B_CACHE, but that gets real
4155		 * confusing.  This is much easier.
4156		 */
4157
4158		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
4159			bp->b_flags |= B_NOCACHE;
4160			bwrite(bp);
4161			goto loop;
4162		}
4163		bp->b_flags &= ~B_DONE;
4164	} else {
4165		/*
4166		 * Buffer is not in-core, create new buffer.  The buffer
4167		 * returned by getnewbuf() is locked.  Note that the returned
4168		 * buffer is also considered valid (not marked B_INVAL).
4169		 */
4170		BO_RUNLOCK(bo);
4171newbuf_unlocked:
4172		/*
4173		 * If the user does not want us to create the buffer, bail out
4174		 * here.
4175		 */
4176		if (flags & GB_NOCREAT)
4177			return (EEXIST);
4178
4179		bsize = vn_isdisk(vp) ? DEV_BSIZE : bo->bo_bsize;
4180		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
4181		offset = blkno * bsize;
4182		vmio = vp->v_object != NULL;
4183		if (vmio) {
4184			maxsize = size + (offset & PAGE_MASK);
4185			if (maxsize > maxbcachebuf) {
4186				printf(
4187			    "getblkx: maxsize(%d) > maxbcachebuf(%d)\n",
4188				    maxsize, maxbcachebuf);
4189				return (EIO);
4190			}
4191		} else {
4192			maxsize = size;
4193			/* Do not allow non-VMIO notmapped buffers. */
4194			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
4195		}
4196		maxsize = imax(maxsize, bsize);
4197		if ((flags & GB_NOSPARSE) != 0 && vmio &&
4198		    !vn_isdisk(vp)) {
4199			error = VOP_BMAP(vp, blkno, NULL, &d_blkno, 0, 0);
4200			KASSERT(error != EOPNOTSUPP,
4201			    ("GB_NOSPARSE from fs not supporting bmap, vp %p",
4202			    vp));
4203			if (error != 0)
4204				return (error);
4205			if (d_blkno == -1)
4206				return (EJUSTRETURN);
4207		}
4208
4209		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
4210		if (bp == NULL) {
4211			if (slpflag || slptimeo)
4212				return (ETIMEDOUT);
4213			/*
4214			 * XXX This is here until the sleep path is diagnosed
4215			 * enough to work under very low memory conditions.
4216			 *
4217			 * There's an issue on low memory, 4BSD+non-preempt
4218			 * systems (eg MIPS routers with 32MB RAM) where buffer
4219			 * exhaustion occurs without sleeping for buffer
4220			 * reclaimation.  This just sticks in a loop and
4221			 * constantly attempts to allocate a buffer, which
4222			 * hits exhaustion and tries to wakeup bufdaemon.
4223			 * This never happens because we never yield.
4224			 *
4225			 * The real solution is to identify and fix these cases
4226			 * so we aren't effectively busy-waiting in a loop
4227			 * until the reclaimation path has cycles to run.
4228			 */
4229			kern_yield(PRI_USER);
4230			goto loop;
4231		}
4232
4233		/*
4234		 * This code is used to make sure that a buffer is not
4235		 * created while the getnewbuf routine is blocked.
4236		 * This can be a problem whether the vnode is locked or not.
4237		 * If the buffer is created out from under us, we have to
4238		 * throw away the one we just created.
4239		 *
4240		 * Note: this must occur before we associate the buffer
4241		 * with the vp especially considering limitations in
4242		 * the splay tree implementation when dealing with duplicate
4243		 * lblkno's.
4244		 */
4245		BO_LOCK(bo);
4246		if (gbincore(bo, blkno)) {
4247			BO_UNLOCK(bo);
4248			bp->b_flags |= B_INVAL;
4249			bufspace_release(bufdomain(bp), maxsize);
4250			brelse(bp);
4251			goto loop;
4252		}
4253
4254		/*
4255		 * Insert the buffer into the hash, so that it can
4256		 * be found by incore.
4257		 */
4258		bp->b_lblkno = blkno;
4259		bp->b_blkno = d_blkno;
4260		bp->b_offset = offset;
4261		bgetvp(vp, bp);
4262		BO_UNLOCK(bo);
4263
4264		/*
4265		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
4266		 * buffer size starts out as 0, B_CACHE will be set by
4267		 * allocbuf() for the VMIO case prior to it testing the
4268		 * backing store for validity.
4269		 */
4270
4271		if (vmio) {
4272			bp->b_flags |= B_VMIO;
4273			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
4274			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
4275			    bp, vp->v_object, bp->b_bufobj->bo_object));
4276		} else {
4277			bp->b_flags &= ~B_VMIO;
4278			KASSERT(bp->b_bufobj->bo_object == NULL,
4279			    ("ARGH! has b_bufobj->bo_object %p %p\n",
4280			    bp, bp->b_bufobj->bo_object));
4281			BUF_CHECK_MAPPED(bp);
4282		}
4283
4284		allocbuf(bp, size);
4285		bufspace_release(bufdomain(bp), maxsize);
4286		bp->b_flags &= ~B_DONE;
4287	}
4288	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
4289end:
4290	buf_track(bp, __func__);
4291	KASSERT(bp->b_bufobj == bo,
4292	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
4293	*bpp = bp;
4294	return (0);
4295}
4296
4297/*
4298 * Get an empty, disassociated buffer of given size.  The buffer is initially
4299 * set to B_INVAL.
4300 */
4301struct buf *
4302geteblk(int size, int flags)
4303{
4304	struct buf *bp;
4305	int maxsize;
4306
4307	maxsize = (size + BKVAMASK) & ~BKVAMASK;
4308	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
4309		if ((flags & GB_NOWAIT_BD) &&
4310		    (curthread->td_pflags & TDP_BUFNEED) != 0)
4311			return (NULL);
4312	}
4313	allocbuf(bp, size);
4314	bufspace_release(bufdomain(bp), maxsize);
4315	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
4316	return (bp);
4317}
4318
4319/*
4320 * Truncate the backing store for a non-vmio buffer.
4321 */
4322static void
4323vfs_nonvmio_truncate(struct buf *bp, int newbsize)
4324{
4325
4326	if (bp->b_flags & B_MALLOC) {
4327		/*
4328		 * malloced buffers are not shrunk
4329		 */
4330		if (newbsize == 0) {
4331			bufmallocadjust(bp, 0);
4332			free(bp->b_data, M_BIOBUF);
4333			bp->b_data = bp->b_kvabase;
4334			bp->b_flags &= ~B_MALLOC;
4335		}
4336		return;
4337	}
4338	vm_hold_free_pages(bp, newbsize);
4339	bufspace_adjust(bp, newbsize);
4340}
4341
4342/*
4343 * Extend the backing for a non-VMIO buffer.
4344 */
4345static void
4346vfs_nonvmio_extend(struct buf *bp, int newbsize)
4347{
4348	caddr_t origbuf;
4349	int origbufsize;
4350
4351	/*
4352	 * We only use malloced memory on the first allocation.
4353	 * and revert to page-allocated memory when the buffer
4354	 * grows.
4355	 *
4356	 * There is a potential smp race here that could lead
4357	 * to bufmallocspace slightly passing the max.  It
4358	 * is probably extremely rare and not worth worrying
4359	 * over.
4360	 */
4361	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
4362	    bufmallocspace < maxbufmallocspace) {
4363		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
4364		bp->b_flags |= B_MALLOC;
4365		bufmallocadjust(bp, newbsize);
4366		return;
4367	}
4368
4369	/*
4370	 * If the buffer is growing on its other-than-first
4371	 * allocation then we revert to the page-allocation
4372	 * scheme.
4373	 */
4374	origbuf = NULL;
4375	origbufsize = 0;
4376	if (bp->b_flags & B_MALLOC) {
4377		origbuf = bp->b_data;
4378		origbufsize = bp->b_bufsize;
4379		bp->b_data = bp->b_kvabase;
4380		bufmallocadjust(bp, 0);
4381		bp->b_flags &= ~B_MALLOC;
4382		newbsize = round_page(newbsize);
4383	}
4384	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
4385	    (vm_offset_t) bp->b_data + newbsize);
4386	if (origbuf != NULL) {
4387		bcopy(origbuf, bp->b_data, origbufsize);
4388		free(origbuf, M_BIOBUF);
4389	}
4390	bufspace_adjust(bp, newbsize);
4391}
4392
4393/*
4394 * This code constitutes the buffer memory from either anonymous system
4395 * memory (in the case of non-VMIO operations) or from an associated
4396 * VM object (in the case of VMIO operations).  This code is able to
4397 * resize a buffer up or down.
4398 *
4399 * Note that this code is tricky, and has many complications to resolve
4400 * deadlock or inconsistent data situations.  Tread lightly!!!
4401 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
4402 * the caller.  Calling this code willy nilly can result in the loss of data.
4403 *
4404 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
4405 * B_CACHE for the non-VMIO case.
4406 */
4407int
4408allocbuf(struct buf *bp, int size)
4409{
4410	int newbsize;
4411
4412	if (bp->b_bcount == size)
4413		return (1);
4414
4415	KASSERT(bp->b_kvasize == 0 || bp->b_kvasize >= size,
4416	    ("allocbuf: buffer too small %p %#x %#x",
4417	    bp, bp->b_kvasize, size));
4418
4419	newbsize = roundup2(size, DEV_BSIZE);
4420	if ((bp->b_flags & B_VMIO) == 0) {
4421		if ((bp->b_flags & B_MALLOC) == 0)
4422			newbsize = round_page(newbsize);
4423		/*
4424		 * Just get anonymous memory from the kernel.  Don't
4425		 * mess with B_CACHE.
4426		 */
4427		if (newbsize < bp->b_bufsize)
4428			vfs_nonvmio_truncate(bp, newbsize);
4429		else if (newbsize > bp->b_bufsize)
4430			vfs_nonvmio_extend(bp, newbsize);
4431	} else {
4432		int desiredpages;
4433
4434		desiredpages = size == 0 ? 0 :
4435		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
4436
4437		KASSERT((bp->b_flags & B_MALLOC) == 0,
4438		    ("allocbuf: VMIO buffer can't be malloced %p", bp));
4439
4440		/*
4441		 * Set B_CACHE initially if buffer is 0 length or will become
4442		 * 0-length.
4443		 */
4444		if (size == 0 || bp->b_bufsize == 0)
4445			bp->b_flags |= B_CACHE;
4446
4447		if (newbsize < bp->b_bufsize)
4448			vfs_vmio_truncate(bp, desiredpages);
4449		/* XXX This looks as if it should be newbsize > b_bufsize */
4450		else if (size > bp->b_bcount)
4451			vfs_vmio_extend(bp, desiredpages, size);
4452		bufspace_adjust(bp, newbsize);
4453	}
4454	bp->b_bcount = size;		/* requested buffer size. */
4455	return (1);
4456}
4457
4458extern int inflight_transient_maps;
4459
4460static struct bio_queue nondump_bios;
4461
4462void
4463biodone(struct bio *bp)
4464{
4465	struct mtx *mtxp;
4466	void (*done)(struct bio *);
4467	vm_offset_t start, end;
4468
4469	biotrack(bp, __func__);
4470
4471	/*
4472	 * Avoid completing I/O when dumping after a panic since that may
4473	 * result in a deadlock in the filesystem or pager code.  Note that
4474	 * this doesn't affect dumps that were started manually since we aim
4475	 * to keep the system usable after it has been resumed.
4476	 */
4477	if (__predict_false(dumping && SCHEDULER_STOPPED())) {
4478		TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue);
4479		return;
4480	}
4481	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
4482		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
4483		bp->bio_flags |= BIO_UNMAPPED;
4484		start = trunc_page((vm_offset_t)bp->bio_data);
4485		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
4486		bp->bio_data = unmapped_buf;
4487		pmap_qremove(start, atop(end - start));
4488		vmem_free(transient_arena, start, end - start);
4489		atomic_add_int(&inflight_transient_maps, -1);
4490	}
4491	done = bp->bio_done;
4492	/*
4493	 * The check for done == biodone is to allow biodone to be
4494	 * used as a bio_done routine.
4495	 */
4496	if (done == NULL || done == biodone) {
4497		mtxp = mtx_pool_find(mtxpool_sleep, bp);
4498		mtx_lock(mtxp);
4499		bp->bio_flags |= BIO_DONE;
4500		wakeup(bp);
4501		mtx_unlock(mtxp);
4502	} else
4503		done(bp);
4504}
4505
4506/*
4507 * Wait for a BIO to finish.
4508 */
4509int
4510biowait(struct bio *bp, const char *wmesg)
4511{
4512	struct mtx *mtxp;
4513
4514	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4515	mtx_lock(mtxp);
4516	while ((bp->bio_flags & BIO_DONE) == 0)
4517		msleep(bp, mtxp, PRIBIO, wmesg, 0);
4518	mtx_unlock(mtxp);
4519	if (bp->bio_error != 0)
4520		return (bp->bio_error);
4521	if (!(bp->bio_flags & BIO_ERROR))
4522		return (0);
4523	return (EIO);
4524}
4525
4526void
4527biofinish(struct bio *bp, struct devstat *stat, int error)
4528{
4529
4530	if (error) {
4531		bp->bio_error = error;
4532		bp->bio_flags |= BIO_ERROR;
4533	}
4534	if (stat != NULL)
4535		devstat_end_transaction_bio(stat, bp);
4536	biodone(bp);
4537}
4538
4539#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4540void
4541biotrack_buf(struct bio *bp, const char *location)
4542{
4543
4544	buf_track(bp->bio_track_bp, location);
4545}
4546#endif
4547
4548/*
4549 *	bufwait:
4550 *
4551 *	Wait for buffer I/O completion, returning error status.  The buffer
4552 *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
4553 *	error and cleared.
4554 */
4555int
4556bufwait(struct buf *bp)
4557{
4558	if (bp->b_iocmd == BIO_READ)
4559		bwait(bp, PRIBIO, "biord");
4560	else
4561		bwait(bp, PRIBIO, "biowr");
4562	if (bp->b_flags & B_EINTR) {
4563		bp->b_flags &= ~B_EINTR;
4564		return (EINTR);
4565	}
4566	if (bp->b_ioflags & BIO_ERROR) {
4567		return (bp->b_error ? bp->b_error : EIO);
4568	} else {
4569		return (0);
4570	}
4571}
4572
4573/*
4574 *	bufdone:
4575 *
4576 *	Finish I/O on a buffer, optionally calling a completion function.
4577 *	This is usually called from an interrupt so process blocking is
4578 *	not allowed.
4579 *
4580 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4581 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
4582 *	assuming B_INVAL is clear.
4583 *
4584 *	For the VMIO case, we set B_CACHE if the op was a read and no
4585 *	read error occurred, or if the op was a write.  B_CACHE is never
4586 *	set if the buffer is invalid or otherwise uncacheable.
4587 *
4588 *	bufdone does not mess with B_INVAL, allowing the I/O routine or the
4589 *	initiator to leave B_INVAL set to brelse the buffer out of existence
4590 *	in the biodone routine.
4591 */
4592void
4593bufdone(struct buf *bp)
4594{
4595	struct bufobj *dropobj;
4596	void    (*biodone)(struct buf *);
4597
4598	buf_track(bp, __func__);
4599	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4600	dropobj = NULL;
4601
4602	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4603
4604	runningbufwakeup(bp);
4605	if (bp->b_iocmd == BIO_WRITE)
4606		dropobj = bp->b_bufobj;
4607	/* call optional completion function if requested */
4608	if (bp->b_iodone != NULL) {
4609		biodone = bp->b_iodone;
4610		bp->b_iodone = NULL;
4611		(*biodone) (bp);
4612		if (dropobj)
4613			bufobj_wdrop(dropobj);
4614		return;
4615	}
4616	if (bp->b_flags & B_VMIO) {
4617		/*
4618		 * Set B_CACHE if the op was a normal read and no error
4619		 * occurred.  B_CACHE is set for writes in the b*write()
4620		 * routines.
4621		 */
4622		if (bp->b_iocmd == BIO_READ &&
4623		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4624		    !(bp->b_ioflags & BIO_ERROR))
4625			bp->b_flags |= B_CACHE;
4626		vfs_vmio_iodone(bp);
4627	}
4628	if (!LIST_EMPTY(&bp->b_dep))
4629		buf_complete(bp);
4630	if ((bp->b_flags & B_CKHASH) != 0) {
4631		KASSERT(bp->b_iocmd == BIO_READ,
4632		    ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd));
4633		KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp));
4634		(*bp->b_ckhashcalc)(bp);
4635	}
4636	/*
4637	 * For asynchronous completions, release the buffer now. The brelse
4638	 * will do a wakeup there if necessary - so no need to do a wakeup
4639	 * here in the async case. The sync case always needs to do a wakeup.
4640	 */
4641	if (bp->b_flags & B_ASYNC) {
4642		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4643		    (bp->b_ioflags & BIO_ERROR))
4644			brelse(bp);
4645		else
4646			bqrelse(bp);
4647	} else
4648		bdone(bp);
4649	if (dropobj)
4650		bufobj_wdrop(dropobj);
4651}
4652
4653/*
4654 * This routine is called in lieu of iodone in the case of
4655 * incomplete I/O.  This keeps the busy status for pages
4656 * consistent.
4657 */
4658void
4659vfs_unbusy_pages(struct buf *bp)
4660{
4661	int i;
4662	vm_object_t obj;
4663	vm_page_t m;
4664
4665	runningbufwakeup(bp);
4666	if (!(bp->b_flags & B_VMIO))
4667		return;
4668
4669	obj = bp->b_bufobj->bo_object;
4670	for (i = 0; i < bp->b_npages; i++) {
4671		m = bp->b_pages[i];
4672		if (m == bogus_page) {
4673			m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4674			if (!m)
4675				panic("vfs_unbusy_pages: page missing\n");
4676			bp->b_pages[i] = m;
4677			if (buf_mapped(bp)) {
4678				BUF_CHECK_MAPPED(bp);
4679				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4680				    bp->b_pages, bp->b_npages);
4681			} else
4682				BUF_CHECK_UNMAPPED(bp);
4683		}
4684		vm_page_sunbusy(m);
4685	}
4686	vm_object_pip_wakeupn(obj, bp->b_npages);
4687}
4688
4689/*
4690 * vfs_page_set_valid:
4691 *
4692 *	Set the valid bits in a page based on the supplied offset.   The
4693 *	range is restricted to the buffer's size.
4694 *
4695 *	This routine is typically called after a read completes.
4696 */
4697static void
4698vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4699{
4700	vm_ooffset_t eoff;
4701
4702	/*
4703	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4704	 * page boundary and eoff is not greater than the end of the buffer.
4705	 * The end of the buffer, in this case, is our file EOF, not the
4706	 * allocation size of the buffer.
4707	 */
4708	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4709	if (eoff > bp->b_offset + bp->b_bcount)
4710		eoff = bp->b_offset + bp->b_bcount;
4711
4712	/*
4713	 * Set valid range.  This is typically the entire buffer and thus the
4714	 * entire page.
4715	 */
4716	if (eoff > off)
4717		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4718}
4719
4720/*
4721 * vfs_page_set_validclean:
4722 *
4723 *	Set the valid bits and clear the dirty bits in a page based on the
4724 *	supplied offset.   The range is restricted to the buffer's size.
4725 */
4726static void
4727vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4728{
4729	vm_ooffset_t soff, eoff;
4730
4731	/*
4732	 * Start and end offsets in buffer.  eoff - soff may not cross a
4733	 * page boundary or cross the end of the buffer.  The end of the
4734	 * buffer, in this case, is our file EOF, not the allocation size
4735	 * of the buffer.
4736	 */
4737	soff = off;
4738	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4739	if (eoff > bp->b_offset + bp->b_bcount)
4740		eoff = bp->b_offset + bp->b_bcount;
4741
4742	/*
4743	 * Set valid range.  This is typically the entire buffer and thus the
4744	 * entire page.
4745	 */
4746	if (eoff > soff) {
4747		vm_page_set_validclean(
4748		    m,
4749		   (vm_offset_t) (soff & PAGE_MASK),
4750		   (vm_offset_t) (eoff - soff)
4751		);
4752	}
4753}
4754
4755/*
4756 * Acquire a shared busy on all pages in the buf.
4757 */
4758void
4759vfs_busy_pages_acquire(struct buf *bp)
4760{
4761	int i;
4762
4763	for (i = 0; i < bp->b_npages; i++)
4764		vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY);
4765}
4766
4767void
4768vfs_busy_pages_release(struct buf *bp)
4769{
4770	int i;
4771
4772	for (i = 0; i < bp->b_npages; i++)
4773		vm_page_sunbusy(bp->b_pages[i]);
4774}
4775
4776/*
4777 * This routine is called before a device strategy routine.
4778 * It is used to tell the VM system that paging I/O is in
4779 * progress, and treat the pages associated with the buffer
4780 * almost as being exclusive busy.  Also the object paging_in_progress
4781 * flag is handled to make sure that the object doesn't become
4782 * inconsistent.
4783 *
4784 * Since I/O has not been initiated yet, certain buffer flags
4785 * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4786 * and should be ignored.
4787 */
4788void
4789vfs_busy_pages(struct buf *bp, int clear_modify)
4790{
4791	vm_object_t obj;
4792	vm_ooffset_t foff;
4793	vm_page_t m;
4794	int i;
4795	bool bogus;
4796
4797	if (!(bp->b_flags & B_VMIO))
4798		return;
4799
4800	obj = bp->b_bufobj->bo_object;
4801	foff = bp->b_offset;
4802	KASSERT(bp->b_offset != NOOFFSET,
4803	    ("vfs_busy_pages: no buffer offset"));
4804	if ((bp->b_flags & B_CLUSTER) == 0) {
4805		vm_object_pip_add(obj, bp->b_npages);
4806		vfs_busy_pages_acquire(bp);
4807	}
4808	if (bp->b_bufsize != 0)
4809		vfs_setdirty_range(bp);
4810	bogus = false;
4811	for (i = 0; i < bp->b_npages; i++) {
4812		m = bp->b_pages[i];
4813		vm_page_assert_sbusied(m);
4814
4815		/*
4816		 * When readying a buffer for a read ( i.e
4817		 * clear_modify == 0 ), it is important to do
4818		 * bogus_page replacement for valid pages in
4819		 * partially instantiated buffers.  Partially
4820		 * instantiated buffers can, in turn, occur when
4821		 * reconstituting a buffer from its VM backing store
4822		 * base.  We only have to do this if B_CACHE is
4823		 * clear ( which causes the I/O to occur in the
4824		 * first place ).  The replacement prevents the read
4825		 * I/O from overwriting potentially dirty VM-backed
4826		 * pages.  XXX bogus page replacement is, uh, bogus.
4827		 * It may not work properly with small-block devices.
4828		 * We need to find a better way.
4829		 */
4830		if (clear_modify) {
4831			pmap_remove_write(m);
4832			vfs_page_set_validclean(bp, foff, m);
4833		} else if (vm_page_all_valid(m) &&
4834		    (bp->b_flags & B_CACHE) == 0) {
4835			bp->b_pages[i] = bogus_page;
4836			bogus = true;
4837		}
4838		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4839	}
4840	if (bogus && buf_mapped(bp)) {
4841		BUF_CHECK_MAPPED(bp);
4842		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4843		    bp->b_pages, bp->b_npages);
4844	}
4845}
4846
4847/*
4848 *	vfs_bio_set_valid:
4849 *
4850 *	Set the range within the buffer to valid.  The range is
4851 *	relative to the beginning of the buffer, b_offset.  Note that
4852 *	b_offset itself may be offset from the beginning of the first
4853 *	page.
4854 */
4855void
4856vfs_bio_set_valid(struct buf *bp, int base, int size)
4857{
4858	int i, n;
4859	vm_page_t m;
4860
4861	if (!(bp->b_flags & B_VMIO))
4862		return;
4863
4864	/*
4865	 * Fixup base to be relative to beginning of first page.
4866	 * Set initial n to be the maximum number of bytes in the
4867	 * first page that can be validated.
4868	 */
4869	base += (bp->b_offset & PAGE_MASK);
4870	n = PAGE_SIZE - (base & PAGE_MASK);
4871
4872	/*
4873	 * Busy may not be strictly necessary here because the pages are
4874	 * unlikely to be fully valid and the vnode lock will synchronize
4875	 * their access via getpages.  It is grabbed for consistency with
4876	 * other page validation.
4877	 */
4878	vfs_busy_pages_acquire(bp);
4879	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4880		m = bp->b_pages[i];
4881		if (n > size)
4882			n = size;
4883		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4884		base += n;
4885		size -= n;
4886		n = PAGE_SIZE;
4887	}
4888	vfs_busy_pages_release(bp);
4889}
4890
4891/*
4892 *	vfs_bio_clrbuf:
4893 *
4894 *	If the specified buffer is a non-VMIO buffer, clear the entire
4895 *	buffer.  If the specified buffer is a VMIO buffer, clear and
4896 *	validate only the previously invalid portions of the buffer.
4897 *	This routine essentially fakes an I/O, so we need to clear
4898 *	BIO_ERROR and B_INVAL.
4899 *
4900 *	Note that while we only theoretically need to clear through b_bcount,
4901 *	we go ahead and clear through b_bufsize.
4902 */
4903void
4904vfs_bio_clrbuf(struct buf *bp)
4905{
4906	int i, j, sa, ea, slide, zbits;
4907	vm_page_bits_t mask;
4908
4909	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4910		clrbuf(bp);
4911		return;
4912	}
4913	bp->b_flags &= ~B_INVAL;
4914	bp->b_ioflags &= ~BIO_ERROR;
4915	vfs_busy_pages_acquire(bp);
4916	sa = bp->b_offset & PAGE_MASK;
4917	slide = 0;
4918	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4919		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4920		ea = slide & PAGE_MASK;
4921		if (ea == 0)
4922			ea = PAGE_SIZE;
4923		if (bp->b_pages[i] == bogus_page)
4924			continue;
4925		j = sa / DEV_BSIZE;
4926		zbits = (sizeof(vm_page_bits_t) * NBBY) -
4927		    (ea - sa) / DEV_BSIZE;
4928		mask = (VM_PAGE_BITS_ALL >> zbits) << j;
4929		if ((bp->b_pages[i]->valid & mask) == mask)
4930			continue;
4931		if ((bp->b_pages[i]->valid & mask) == 0)
4932			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4933		else {
4934			for (; sa < ea; sa += DEV_BSIZE, j++) {
4935				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4936					pmap_zero_page_area(bp->b_pages[i],
4937					    sa, DEV_BSIZE);
4938				}
4939			}
4940		}
4941		vm_page_set_valid_range(bp->b_pages[i], j * DEV_BSIZE,
4942		    roundup2(ea - sa, DEV_BSIZE));
4943	}
4944	vfs_busy_pages_release(bp);
4945	bp->b_resid = 0;
4946}
4947
4948void
4949vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4950{
4951	vm_page_t m;
4952	int i, n;
4953
4954	if (buf_mapped(bp)) {
4955		BUF_CHECK_MAPPED(bp);
4956		bzero(bp->b_data + base, size);
4957	} else {
4958		BUF_CHECK_UNMAPPED(bp);
4959		n = PAGE_SIZE - (base & PAGE_MASK);
4960		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4961			m = bp->b_pages[i];
4962			if (n > size)
4963				n = size;
4964			pmap_zero_page_area(m, base & PAGE_MASK, n);
4965			base += n;
4966			size -= n;
4967			n = PAGE_SIZE;
4968		}
4969	}
4970}
4971
4972/*
4973 * Update buffer flags based on I/O request parameters, optionally releasing the
4974 * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
4975 * where they may be placed on a page queue (VMIO) or freed immediately (direct
4976 * I/O).  Otherwise the buffer is released to the cache.
4977 */
4978static void
4979b_io_dismiss(struct buf *bp, int ioflag, bool release)
4980{
4981
4982	KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4983	    ("buf %p non-VMIO noreuse", bp));
4984
4985	if ((ioflag & IO_DIRECT) != 0)
4986		bp->b_flags |= B_DIRECT;
4987	if ((ioflag & IO_EXT) != 0)
4988		bp->b_xflags |= BX_ALTDATA;
4989	if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4990		bp->b_flags |= B_RELBUF;
4991		if ((ioflag & IO_NOREUSE) != 0)
4992			bp->b_flags |= B_NOREUSE;
4993		if (release)
4994			brelse(bp);
4995	} else if (release)
4996		bqrelse(bp);
4997}
4998
4999void
5000vfs_bio_brelse(struct buf *bp, int ioflag)
5001{
5002
5003	b_io_dismiss(bp, ioflag, true);
5004}
5005
5006void
5007vfs_bio_set_flags(struct buf *bp, int ioflag)
5008{
5009
5010	b_io_dismiss(bp, ioflag, false);
5011}
5012
5013/*
5014 * vm_hold_load_pages and vm_hold_free_pages get pages into
5015 * a buffers address space.  The pages are anonymous and are
5016 * not associated with a file object.
5017 */
5018static void
5019vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
5020{
5021	vm_offset_t pg;
5022	vm_page_t p;
5023	int index;
5024
5025	BUF_CHECK_MAPPED(bp);
5026
5027	to = round_page(to);
5028	from = round_page(from);
5029	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
5030	MPASS((bp->b_flags & B_MAXPHYS) == 0);
5031	KASSERT(to - from <= maxbcachebuf,
5032	    ("vm_hold_load_pages too large %p %#jx %#jx %u",
5033	    bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf));
5034
5035	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
5036		/*
5037		 * note: must allocate system pages since blocking here
5038		 * could interfere with paging I/O, no matter which
5039		 * process we are.
5040		 */
5041		p = vm_page_alloc_noobj(VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
5042		    VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | VM_ALLOC_WAITOK);
5043		pmap_qenter(pg, &p, 1);
5044		bp->b_pages[index] = p;
5045	}
5046	bp->b_npages = index;
5047}
5048
5049/* Return pages associated with this buf to the vm system */
5050static void
5051vm_hold_free_pages(struct buf *bp, int newbsize)
5052{
5053	vm_offset_t from;
5054	vm_page_t p;
5055	int index, newnpages;
5056
5057	BUF_CHECK_MAPPED(bp);
5058
5059	from = round_page((vm_offset_t)bp->b_data + newbsize);
5060	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
5061	if (bp->b_npages > newnpages)
5062		pmap_qremove(from, bp->b_npages - newnpages);
5063	for (index = newnpages; index < bp->b_npages; index++) {
5064		p = bp->b_pages[index];
5065		bp->b_pages[index] = NULL;
5066		vm_page_unwire_noq(p);
5067		vm_page_free(p);
5068	}
5069	bp->b_npages = newnpages;
5070}
5071
5072/*
5073 * Map an IO request into kernel virtual address space.
5074 *
5075 * All requests are (re)mapped into kernel VA space.
5076 * Notice that we use b_bufsize for the size of the buffer
5077 * to be mapped.  b_bcount might be modified by the driver.
5078 *
5079 * Note that even if the caller determines that the address space should
5080 * be valid, a race or a smaller-file mapped into a larger space may
5081 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
5082 * check the return value.
5083 *
5084 * This function only works with pager buffers.
5085 */
5086int
5087vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
5088{
5089	vm_prot_t prot;
5090	int pidx;
5091
5092	MPASS((bp->b_flags & B_MAXPHYS) != 0);
5093	prot = VM_PROT_READ;
5094	if (bp->b_iocmd == BIO_READ)
5095		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
5096	pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
5097	    (vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES);
5098	if (pidx < 0)
5099		return (-1);
5100	bp->b_bufsize = len;
5101	bp->b_npages = pidx;
5102	bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK;
5103	if (mapbuf || !unmapped_buf_allowed) {
5104		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
5105		bp->b_data = bp->b_kvabase + bp->b_offset;
5106	} else
5107		bp->b_data = unmapped_buf;
5108	return (0);
5109}
5110
5111/*
5112 * Free the io map PTEs associated with this IO operation.
5113 * We also invalidate the TLB entries and restore the original b_addr.
5114 *
5115 * This function only works with pager buffers.
5116 */
5117void
5118vunmapbuf(struct buf *bp)
5119{
5120	int npages;
5121
5122	npages = bp->b_npages;
5123	if (buf_mapped(bp))
5124		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
5125	vm_page_unhold_pages(bp->b_pages, npages);
5126
5127	bp->b_data = unmapped_buf;
5128}
5129
5130void
5131bdone(struct buf *bp)
5132{
5133	struct mtx *mtxp;
5134
5135	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5136	mtx_lock(mtxp);
5137	bp->b_flags |= B_DONE;
5138	wakeup(bp);
5139	mtx_unlock(mtxp);
5140}
5141
5142void
5143bwait(struct buf *bp, u_char pri, const char *wchan)
5144{
5145	struct mtx *mtxp;
5146
5147	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5148	mtx_lock(mtxp);
5149	while ((bp->b_flags & B_DONE) == 0)
5150		msleep(bp, mtxp, pri, wchan, 0);
5151	mtx_unlock(mtxp);
5152}
5153
5154int
5155bufsync(struct bufobj *bo, int waitfor)
5156{
5157
5158	return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
5159}
5160
5161void
5162bufstrategy(struct bufobj *bo, struct buf *bp)
5163{
5164	int i __unused;
5165	struct vnode *vp;
5166
5167	vp = bp->b_vp;
5168	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
5169	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
5170	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
5171	i = VOP_STRATEGY(vp, bp);
5172	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
5173}
5174
5175/*
5176 * Initialize a struct bufobj before use.  Memory is assumed zero filled.
5177 */
5178void
5179bufobj_init(struct bufobj *bo, void *private)
5180{
5181	static volatile int bufobj_cleanq;
5182
5183        bo->bo_domain =
5184            atomic_fetchadd_int(&bufobj_cleanq, 1) % buf_domains;
5185        rw_init(BO_LOCKPTR(bo), "bufobj interlock");
5186        bo->bo_private = private;
5187        TAILQ_INIT(&bo->bo_clean.bv_hd);
5188	pctrie_init(&bo->bo_clean.bv_root);
5189        TAILQ_INIT(&bo->bo_dirty.bv_hd);
5190	pctrie_init(&bo->bo_dirty.bv_root);
5191}
5192
5193void
5194bufobj_wrefl(struct bufobj *bo)
5195{
5196
5197	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5198	ASSERT_BO_WLOCKED(bo);
5199	bo->bo_numoutput++;
5200}
5201
5202void
5203bufobj_wref(struct bufobj *bo)
5204{
5205
5206	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5207	BO_LOCK(bo);
5208	bo->bo_numoutput++;
5209	BO_UNLOCK(bo);
5210}
5211
5212void
5213bufobj_wdrop(struct bufobj *bo)
5214{
5215
5216	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
5217	BO_LOCK(bo);
5218	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
5219	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
5220		bo->bo_flag &= ~BO_WWAIT;
5221		wakeup(&bo->bo_numoutput);
5222	}
5223	BO_UNLOCK(bo);
5224}
5225
5226int
5227bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
5228{
5229	int error;
5230
5231	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
5232	ASSERT_BO_WLOCKED(bo);
5233	error = 0;
5234	while (bo->bo_numoutput) {
5235		bo->bo_flag |= BO_WWAIT;
5236		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
5237		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
5238		if (error)
5239			break;
5240	}
5241	return (error);
5242}
5243
5244/*
5245 * Set bio_data or bio_ma for struct bio from the struct buf.
5246 */
5247void
5248bdata2bio(struct buf *bp, struct bio *bip)
5249{
5250
5251	if (!buf_mapped(bp)) {
5252		KASSERT(unmapped_buf_allowed, ("unmapped"));
5253		bip->bio_ma = bp->b_pages;
5254		bip->bio_ma_n = bp->b_npages;
5255		bip->bio_data = unmapped_buf;
5256		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
5257		bip->bio_flags |= BIO_UNMAPPED;
5258		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
5259		    PAGE_SIZE == bp->b_npages,
5260		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
5261		    (long long)bip->bio_length, bip->bio_ma_n));
5262	} else {
5263		bip->bio_data = bp->b_data;
5264		bip->bio_ma = NULL;
5265	}
5266}
5267
5268struct memdesc
5269memdesc_bio(struct bio *bio)
5270{
5271	if ((bio->bio_flags & BIO_VLIST) != 0)
5272		return (memdesc_vlist((struct bus_dma_segment *)bio->bio_data,
5273		    bio->bio_ma_n));
5274
5275	if ((bio->bio_flags & BIO_UNMAPPED) != 0)
5276		return (memdesc_vmpages(bio->bio_ma, bio->bio_bcount,
5277		    bio->bio_ma_offset));
5278
5279	return (memdesc_vaddr(bio->bio_data, bio->bio_bcount));
5280}
5281
5282static int buf_pager_relbuf;
5283SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
5284    &buf_pager_relbuf, 0,
5285    "Make buffer pager release buffers after reading");
5286
5287/*
5288 * The buffer pager.  It uses buffer reads to validate pages.
5289 *
5290 * In contrast to the generic local pager from vm/vnode_pager.c, this
5291 * pager correctly and easily handles volumes where the underlying
5292 * device block size is greater than the machine page size.  The
5293 * buffer cache transparently extends the requested page run to be
5294 * aligned at the block boundary, and does the necessary bogus page
5295 * replacements in the addends to avoid obliterating already valid
5296 * pages.
5297 *
5298 * The only non-trivial issue is that the exclusive busy state for
5299 * pages, which is assumed by the vm_pager_getpages() interface, is
5300 * incompatible with the VMIO buffer cache's desire to share-busy the
5301 * pages.  This function performs a trivial downgrade of the pages'
5302 * state before reading buffers, and a less trivial upgrade from the
5303 * shared-busy to excl-busy state after the read.
5304 */
5305int
5306vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
5307    int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
5308    vbg_get_blksize_t get_blksize)
5309{
5310	vm_page_t m;
5311	vm_object_t object;
5312	struct buf *bp;
5313	struct mount *mp;
5314	daddr_t lbn, lbnp;
5315	vm_ooffset_t la, lb, poff, poffe;
5316	long bo_bs, bsize;
5317	int br_flags, error, i, pgsin, pgsin_a, pgsin_b;
5318	bool redo, lpart;
5319
5320	object = vp->v_object;
5321	mp = vp->v_mount;
5322	error = 0;
5323	la = IDX_TO_OFF(ma[count - 1]->pindex);
5324	if (la >= object->un_pager.vnp.vnp_size)
5325		return (VM_PAGER_BAD);
5326
5327	/*
5328	 * Change the meaning of la from where the last requested page starts
5329	 * to where it ends, because that's the end of the requested region
5330	 * and the start of the potential read-ahead region.
5331	 */
5332	la += PAGE_SIZE;
5333	lpart = la > object->un_pager.vnp.vnp_size;
5334	error = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)),
5335	    &bo_bs);
5336	if (error != 0)
5337		return (VM_PAGER_ERROR);
5338
5339	/*
5340	 * Calculate read-ahead, behind and total pages.
5341	 */
5342	pgsin = count;
5343	lb = IDX_TO_OFF(ma[0]->pindex);
5344	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
5345	pgsin += pgsin_b;
5346	if (rbehind != NULL)
5347		*rbehind = pgsin_b;
5348	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
5349	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
5350		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
5351		    PAGE_SIZE) - la);
5352	pgsin += pgsin_a;
5353	if (rahead != NULL)
5354		*rahead = pgsin_a;
5355	VM_CNT_INC(v_vnodein);
5356	VM_CNT_ADD(v_vnodepgsin, pgsin);
5357
5358	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
5359	    != 0) ? GB_UNMAPPED : 0;
5360again:
5361	for (i = 0; i < count; i++) {
5362		if (ma[i] != bogus_page)
5363			vm_page_busy_downgrade(ma[i]);
5364	}
5365
5366	lbnp = -1;
5367	for (i = 0; i < count; i++) {
5368		m = ma[i];
5369		if (m == bogus_page)
5370			continue;
5371
5372		/*
5373		 * Pages are shared busy and the object lock is not
5374		 * owned, which together allow for the pages'
5375		 * invalidation.  The racy test for validity avoids
5376		 * useless creation of the buffer for the most typical
5377		 * case when invalidation is not used in redo or for
5378		 * parallel read.  The shared->excl upgrade loop at
5379		 * the end of the function catches the race in a
5380		 * reliable way (protected by the object lock).
5381		 */
5382		if (vm_page_all_valid(m))
5383			continue;
5384
5385		poff = IDX_TO_OFF(m->pindex);
5386		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
5387		for (; poff < poffe; poff += bsize) {
5388			lbn = get_lblkno(vp, poff);
5389			if (lbn == lbnp)
5390				goto next_page;
5391			lbnp = lbn;
5392
5393			error = get_blksize(vp, lbn, &bsize);
5394			if (error == 0)
5395				error = bread_gb(vp, lbn, bsize,
5396				    curthread->td_ucred, br_flags, &bp);
5397			if (error != 0)
5398				goto end_pages;
5399			if (bp->b_rcred == curthread->td_ucred) {
5400				crfree(bp->b_rcred);
5401				bp->b_rcred = NOCRED;
5402			}
5403			if (LIST_EMPTY(&bp->b_dep)) {
5404				/*
5405				 * Invalidation clears m->valid, but
5406				 * may leave B_CACHE flag if the
5407				 * buffer existed at the invalidation
5408				 * time.  In this case, recycle the
5409				 * buffer to do real read on next
5410				 * bread() after redo.
5411				 *
5412				 * Otherwise B_RELBUF is not strictly
5413				 * necessary, enable to reduce buf
5414				 * cache pressure.
5415				 */
5416				if (buf_pager_relbuf ||
5417				    !vm_page_all_valid(m))
5418					bp->b_flags |= B_RELBUF;
5419
5420				bp->b_flags &= ~B_NOCACHE;
5421				brelse(bp);
5422			} else {
5423				bqrelse(bp);
5424			}
5425		}
5426		KASSERT(1 /* racy, enable for debugging */ ||
5427		    vm_page_all_valid(m) || i == count - 1,
5428		    ("buf %d %p invalid", i, m));
5429		if (i == count - 1 && lpart) {
5430			if (!vm_page_none_valid(m) &&
5431			    !vm_page_all_valid(m))
5432				vm_page_zero_invalid(m, TRUE);
5433		}
5434next_page:;
5435	}
5436end_pages:
5437
5438	redo = false;
5439	for (i = 0; i < count; i++) {
5440		if (ma[i] == bogus_page)
5441			continue;
5442		if (vm_page_busy_tryupgrade(ma[i]) == 0) {
5443			vm_page_sunbusy(ma[i]);
5444			ma[i] = vm_page_grab_unlocked(object, ma[i]->pindex,
5445			    VM_ALLOC_NORMAL);
5446		}
5447
5448		/*
5449		 * Since the pages were only sbusy while neither the
5450		 * buffer nor the object lock was held by us, or
5451		 * reallocated while vm_page_grab() slept for busy
5452		 * relinguish, they could have been invalidated.
5453		 * Recheck the valid bits and re-read as needed.
5454		 *
5455		 * Note that the last page is made fully valid in the
5456		 * read loop, and partial validity for the page at
5457		 * index count - 1 could mean that the page was
5458		 * invalidated or removed, so we must restart for
5459		 * safety as well.
5460		 */
5461		if (!vm_page_all_valid(ma[i]))
5462			redo = true;
5463	}
5464	if (redo && error == 0)
5465		goto again;
5466	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
5467}
5468
5469#include "opt_ddb.h"
5470#ifdef DDB
5471#include <ddb/ddb.h>
5472
5473/* DDB command to show buffer data */
5474DB_SHOW_COMMAND(buffer, db_show_buffer)
5475{
5476	/* get args */
5477	struct buf *bp = (struct buf *)addr;
5478#ifdef FULL_BUF_TRACKING
5479	uint32_t i, j;
5480#endif
5481
5482	if (!have_addr) {
5483		db_printf("usage: show buffer <addr>\n");
5484		return;
5485	}
5486
5487	db_printf("buf at %p\n", bp);
5488	db_printf("b_flags = 0x%b, b_xflags = 0x%b\n",
5489	    (u_int)bp->b_flags, PRINT_BUF_FLAGS,
5490	    (u_int)bp->b_xflags, PRINT_BUF_XFLAGS);
5491	db_printf("b_vflags = 0x%b, b_ioflags = 0x%b\n",
5492	    (u_int)bp->b_vflags, PRINT_BUF_VFLAGS,
5493	    (u_int)bp->b_ioflags, PRINT_BIO_FLAGS);
5494	db_printf(
5495	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
5496	    "b_bufobj = %p, b_data = %p\n"
5497	    "b_blkno = %jd, b_lblkno = %jd, b_vp = %p, b_dep = %p\n",
5498	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
5499	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
5500	    (intmax_t)bp->b_lblkno, bp->b_vp, bp->b_dep.lh_first);
5501	db_printf("b_kvabase = %p, b_kvasize = %d\n",
5502	    bp->b_kvabase, bp->b_kvasize);
5503	if (bp->b_npages) {
5504		int i;
5505		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
5506		for (i = 0; i < bp->b_npages; i++) {
5507			vm_page_t m;
5508			m = bp->b_pages[i];
5509			if (m != NULL)
5510				db_printf("(%p, 0x%lx, 0x%lx)", m->object,
5511				    (u_long)m->pindex,
5512				    (u_long)VM_PAGE_TO_PHYS(m));
5513			else
5514				db_printf("( ??? )");
5515			if ((i + 1) < bp->b_npages)
5516				db_printf(",");
5517		}
5518		db_printf("\n");
5519	}
5520	BUF_LOCKPRINTINFO(bp);
5521#if defined(FULL_BUF_TRACKING)
5522	db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
5523
5524	i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
5525	for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
5526		if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
5527			continue;
5528		db_printf(" %2u: %s\n", j,
5529		    bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
5530	}
5531#elif defined(BUF_TRACKING)
5532	db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
5533#endif
5534}
5535
5536DB_SHOW_COMMAND_FLAGS(bufqueues, bufqueues, DB_CMD_MEMSAFE)
5537{
5538	struct bufdomain *bd;
5539	struct buf *bp;
5540	long total;
5541	int i, j, cnt;
5542
5543	db_printf("bqempty: %d\n", bqempty.bq_len);
5544
5545	for (i = 0; i < buf_domains; i++) {
5546		bd = &bdomain[i];
5547		db_printf("Buf domain %d\n", i);
5548		db_printf("\tfreebufs\t%d\n", bd->bd_freebuffers);
5549		db_printf("\tlofreebufs\t%d\n", bd->bd_lofreebuffers);
5550		db_printf("\thifreebufs\t%d\n", bd->bd_hifreebuffers);
5551		db_printf("\n");
5552		db_printf("\tbufspace\t%ld\n", bd->bd_bufspace);
5553		db_printf("\tmaxbufspace\t%ld\n", bd->bd_maxbufspace);
5554		db_printf("\thibufspace\t%ld\n", bd->bd_hibufspace);
5555		db_printf("\tlobufspace\t%ld\n", bd->bd_lobufspace);
5556		db_printf("\tbufspacethresh\t%ld\n", bd->bd_bufspacethresh);
5557		db_printf("\n");
5558		db_printf("\tnumdirtybuffers\t%d\n", bd->bd_numdirtybuffers);
5559		db_printf("\tlodirtybuffers\t%d\n", bd->bd_lodirtybuffers);
5560		db_printf("\thidirtybuffers\t%d\n", bd->bd_hidirtybuffers);
5561		db_printf("\tdirtybufthresh\t%d\n", bd->bd_dirtybufthresh);
5562		db_printf("\n");
5563		total = 0;
5564		TAILQ_FOREACH(bp, &bd->bd_cleanq->bq_queue, b_freelist)
5565			total += bp->b_bufsize;
5566		db_printf("\tcleanq count\t%d (%ld)\n",
5567		    bd->bd_cleanq->bq_len, total);
5568		total = 0;
5569		TAILQ_FOREACH(bp, &bd->bd_dirtyq.bq_queue, b_freelist)
5570			total += bp->b_bufsize;
5571		db_printf("\tdirtyq count\t%d (%ld)\n",
5572		    bd->bd_dirtyq.bq_len, total);
5573		db_printf("\twakeup\t\t%d\n", bd->bd_wanted);
5574		db_printf("\tlim\t\t%d\n", bd->bd_lim);
5575		db_printf("\tCPU ");
5576		for (j = 0; j <= mp_maxid; j++)
5577			db_printf("%d, ", bd->bd_subq[j].bq_len);
5578		db_printf("\n");
5579		cnt = 0;
5580		total = 0;
5581		for (j = 0; j < nbuf; j++) {
5582			bp = nbufp(j);
5583			if (bp->b_domain == i && BUF_ISLOCKED(bp)) {
5584				cnt++;
5585				total += bp->b_bufsize;
5586			}
5587		}
5588		db_printf("\tLocked buffers: %d space %ld\n", cnt, total);
5589		cnt = 0;
5590		total = 0;
5591		for (j = 0; j < nbuf; j++) {
5592			bp = nbufp(j);
5593			if (bp->b_domain == i) {
5594				cnt++;
5595				total += bp->b_bufsize;
5596			}
5597		}
5598		db_printf("\tTotal buffers: %d space %ld\n", cnt, total);
5599	}
5600}
5601
5602DB_SHOW_COMMAND_FLAGS(lockedbufs, lockedbufs, DB_CMD_MEMSAFE)
5603{
5604	struct buf *bp;
5605	int i;
5606
5607	for (i = 0; i < nbuf; i++) {
5608		bp = nbufp(i);
5609		if (BUF_ISLOCKED(bp)) {
5610			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5611			db_printf("\n");
5612			if (db_pager_quit)
5613				break;
5614		}
5615	}
5616}
5617
5618DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
5619{
5620	struct vnode *vp;
5621	struct buf *bp;
5622
5623	if (!have_addr) {
5624		db_printf("usage: show vnodebufs <addr>\n");
5625		return;
5626	}
5627	vp = (struct vnode *)addr;
5628	db_printf("Clean buffers:\n");
5629	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
5630		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5631		db_printf("\n");
5632	}
5633	db_printf("Dirty buffers:\n");
5634	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
5635		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5636		db_printf("\n");
5637	}
5638}
5639
5640DB_COMMAND_FLAGS(countfreebufs, db_coundfreebufs, DB_CMD_MEMSAFE)
5641{
5642	struct buf *bp;
5643	int i, used = 0, nfree = 0;
5644
5645	if (have_addr) {
5646		db_printf("usage: countfreebufs\n");
5647		return;
5648	}
5649
5650	for (i = 0; i < nbuf; i++) {
5651		bp = nbufp(i);
5652		if (bp->b_qindex == QUEUE_EMPTY)
5653			nfree++;
5654		else
5655			used++;
5656	}
5657
5658	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5659	    nfree + used);
5660	db_printf("numfreebuffers is %d\n", numfreebuffers);
5661}
5662#endif /* DDB */
5663