1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Resident memory system definitions.
63 */
64
65#ifndef	_VM_PAGE_
66#define	_VM_PAGE_
67
68#include <vm/pmap.h>
69#include <vm/_vm_phys.h>
70
71/*
72 *	Management of resident (logical) pages.
73 *
74 *	A small structure is kept for each resident
75 *	page, indexed by page number.  Each structure
76 *	is an element of several collections:
77 *
78 *		A radix tree used to quickly
79 *		perform object/offset lookups
80 *
81 *		A list of all pages for a given object,
82 *		so they can be quickly deactivated at
83 *		time of deallocation.
84 *
85 *		An ordered list of pages due for pageout.
86 *
87 *	In addition, the structure contains the object
88 *	and offset to which this page belongs (for pageout),
89 *	and sundry status bits.
90 *
91 *	In general, operations on this structure's mutable fields are
92 *	synchronized using either one of or a combination of locks.  If a
93 *	field is annotated with two of these locks then holding either is
94 *	sufficient for read access but both are required for write access.
95 *	The queue lock for a page depends on the value of its queue field and is
96 *	described in detail below.
97 *
98 *	The following annotations are possible:
99 *	(A) the field must be accessed using atomic(9) and may require
100 *	    additional synchronization.
101 *	(B) the page busy lock.
102 *	(C) the field is immutable.
103 *	(F) the per-domain lock for the free queues.
104 *	(M) Machine dependent, defined by pmap layer.
105 *	(O) the object that the page belongs to.
106 *	(Q) the page's queue lock.
107 *
108 *	The busy lock is an embedded reader-writer lock that protects the
109 *	page's contents and identity (i.e., its <object, pindex> tuple) as
110 *	well as certain valid/dirty modifications.  To avoid bloating the
111 *	the page structure, the busy lock lacks some of the features available
112 *	the kernel's general-purpose synchronization primitives.  As a result,
113 *	busy lock ordering rules are not verified, lock recursion is not
114 *	detected, and an attempt to xbusy a busy page or sbusy an xbusy page
115 *	results will trigger a panic rather than causing the thread to block.
116 *	vm_page_sleep_if_busy() can be used to sleep until the page's busy
117 *	state changes, after which the caller must re-lookup the page and
118 *	re-evaluate its state.  vm_page_busy_acquire() will block until
119 *	the lock is acquired.
120 *
121 *	The valid field is protected by the page busy lock (B) and object
122 *	lock (O).  Transitions from invalid to valid are generally done
123 *	via I/O or zero filling and do not require the object lock.
124 *	These must be protected with the busy lock to prevent page-in or
125 *	creation races.  Page invalidation generally happens as a result
126 *	of truncate or msync.  When invalidated, pages must not be present
127 *	in pmap and must hold the object lock to prevent concurrent
128 *	speculative read-only mappings that do not require busy.  I/O
129 *	routines may check for validity without a lock if they are prepared
130 *	to handle invalidation races with higher level locks (vnode) or are
131 *	unconcerned with races so long as they hold a reference to prevent
132 *	recycling.  When a valid bit is set while holding a shared busy
133 *	lock (A) atomic operations are used to protect against concurrent
134 *	modification.
135 *
136 *	In contrast, the synchronization of accesses to the page's
137 *	dirty field is a mix of machine dependent (M) and busy (B).  In
138 *	the machine-independent layer, the page busy must be held to
139 *	operate on the field.  However, the pmap layer is permitted to
140 *	set all bits within the field without holding that lock.  If the
141 *	underlying architecture does not support atomic read-modify-write
142 *	operations on the field's type, then the machine-independent
143 *	layer uses a 32-bit atomic on the aligned 32-bit word that
144 *	contains the dirty field.  In the machine-independent layer,
145 *	the implementation of read-modify-write operations on the
146 *	field is encapsulated in vm_page_clear_dirty_mask().  An
147 *	exclusive busy lock combined with pmap_remove_{write/all}() is the
148 *	only way to ensure a page can not become dirty.  I/O generally
149 *	removes the page from pmap to ensure exclusive access and atomic
150 *	writes.
151 *
152 *	The ref_count field tracks references to the page.  References that
153 *	prevent the page from being reclaimable are called wirings and are
154 *	counted in the low bits of ref_count.  The containing object's
155 *	reference, if one exists, is counted using the VPRC_OBJREF bit in the
156 *	ref_count field.  Additionally, the VPRC_BLOCKED bit is used to
157 *	atomically check for wirings and prevent new wirings via
158 *	pmap_extract_and_hold().  When a page belongs to an object, it may be
159 *	wired only when the object is locked, or the page is busy, or by
160 *	pmap_extract_and_hold().  As a result, if the object is locked and the
161 *	page is not busy (or is exclusively busied by the current thread), and
162 *	the page is unmapped, its wire count will not increase.  The ref_count
163 *	field is updated using atomic operations in most cases, except when it
164 *	is known that no other references to the page exist, such as in the page
165 *	allocator.  A page may be present in the page queues, or even actively
166 *	scanned by the page daemon, without an explicitly counted referenced.
167 *	The page daemon must therefore handle the possibility of a concurrent
168 *	free of the page.
169 *
170 *	The queue state of a page consists of the queue and act_count fields of
171 *	its atomically updated state, and the subset of atomic flags specified
172 *	by PGA_QUEUE_STATE_MASK.  The queue field contains the page's page queue
173 *	index, or PQ_NONE if it does not belong to a page queue.  To modify the
174 *	queue field, the page queue lock corresponding to the old value must be
175 *	held, unless that value is PQ_NONE, in which case the queue index must
176 *	be updated using an atomic RMW operation.  There is one exception to
177 *	this rule: the page daemon may transition the queue field from
178 *	PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an
179 *	inactive queue scan.  At that point the page is already dequeued and no
180 *	other references to that vm_page structure can exist.  The PGA_ENQUEUED
181 *	flag, when set, indicates that the page structure is physically inserted
182 *	into the queue corresponding to the page's queue index, and may only be
183 *	set or cleared with the corresponding page queue lock held.
184 *
185 *	To avoid contention on page queue locks, page queue operations (enqueue,
186 *	dequeue, requeue) are batched using fixed-size per-CPU queues.  A
187 *	deferred operation is requested by setting one of the flags in
188 *	PGA_QUEUE_OP_MASK and inserting an entry into a batch queue.  When a
189 *	queue is full, an attempt to insert a new entry will lock the page
190 *	queues and trigger processing of the pending entries.  The
191 *	type-stability of vm_page structures is crucial to this scheme since the
192 *	processing of entries in a given batch queue may be deferred
193 *	indefinitely.  In particular, a page may be freed with pending batch
194 *	queue entries.  The page queue operation flags must be set using atomic
195 *	RWM operations.
196 */
197
198#if PAGE_SIZE == 4096
199#define VM_PAGE_BITS_ALL 0xffu
200typedef uint8_t vm_page_bits_t;
201#elif PAGE_SIZE == 8192
202#define VM_PAGE_BITS_ALL 0xffffu
203typedef uint16_t vm_page_bits_t;
204#elif PAGE_SIZE == 16384
205#define VM_PAGE_BITS_ALL 0xffffffffu
206typedef uint32_t vm_page_bits_t;
207#elif PAGE_SIZE == 32768
208#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
209typedef uint64_t vm_page_bits_t;
210#endif
211
212typedef union vm_page_astate {
213	struct {
214		uint16_t flags;
215		uint8_t	queue;
216		uint8_t act_count;
217	};
218	uint32_t _bits;
219} vm_page_astate_t;
220
221struct vm_page {
222	union {
223		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
224		struct {
225			SLIST_ENTRY(vm_page) ss; /* private slists */
226		} s;
227		struct {
228			u_long p;
229			u_long v;
230		} memguard;
231		struct {
232			void *slab;
233			void *zone;
234		} uma;
235	} plinks;
236	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
237	vm_object_t object;		/* which object am I in (O) */
238	vm_pindex_t pindex;		/* offset into object (O,P) */
239	vm_paddr_t phys_addr;		/* physical address of page (C) */
240	struct md_page md;		/* machine dependent stuff */
241	u_int ref_count;		/* page references (A) */
242	u_int busy_lock;		/* busy owners lock (A) */
243	union vm_page_astate a;		/* state accessed atomically (A) */
244	uint8_t order;			/* index of the buddy queue (F) */
245	uint8_t pool;			/* vm_phys freepool index (F) */
246	uint8_t flags;			/* page PG_* flags (P) */
247	uint8_t oflags;			/* page VPO_* flags (O) */
248	int8_t psind;			/* pagesizes[] index (O) */
249	int8_t segind;			/* vm_phys segment index (C) */
250	/* NOTE that these must support one bit per DEV_BSIZE in a page */
251	/* so, on normal X86 kernels, they must be at least 8 bits wide */
252	vm_page_bits_t valid;		/* valid DEV_BSIZE chunk map (O,B) */
253	vm_page_bits_t dirty;		/* dirty DEV_BSIZE chunk map (M,B) */
254};
255
256/*
257 * Special bits used in the ref_count field.
258 *
259 * ref_count is normally used to count wirings that prevent the page from being
260 * reclaimed, but also supports several special types of references that do not
261 * prevent reclamation.  Accesses to the ref_count field must be atomic unless
262 * the page is unallocated.
263 *
264 * VPRC_OBJREF is the reference held by the containing object.  It can set or
265 * cleared only when the corresponding object's write lock is held.
266 *
267 * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
268 * attempting to tear down all mappings of a given page.  The page busy lock and
269 * object write lock must both be held in order to set or clear this bit.
270 */
271#define	VPRC_BLOCKED	0x40000000u	/* mappings are being removed */
272#define	VPRC_OBJREF	0x80000000u	/* object reference, cleared with (O) */
273#define	VPRC_WIRE_COUNT(c)	((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
274#define	VPRC_WIRE_COUNT_MAX	(~(VPRC_BLOCKED | VPRC_OBJREF))
275
276/*
277 * Page flags stored in oflags:
278 *
279 * Access to these page flags is synchronized by the lock on the object
280 * containing the page (O).
281 *
282 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
283 * 	 indicates that the page is not under PV management but
284 * 	 otherwise should be treated as a normal page.  Pages not
285 * 	 under PV management cannot be paged out via the
286 * 	 object/vm_page_t because there is no knowledge of their pte
287 * 	 mappings, and such pages are also not on any PQ queue.
288 *
289 */
290#define	VPO_KMEM_EXEC	0x01		/* kmem mapping allows execution */
291#define	VPO_SWAPSLEEP	0x02		/* waiting for swap to finish */
292#define	VPO_UNMANAGED	0x04		/* no PV management for page */
293#define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
294
295/*
296 * Busy page implementation details.
297 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
298 * even if the support for owner identity is removed because of size
299 * constraints.  Checks on lock recursion are then not possible, while the
300 * lock assertions effectiveness is someway reduced.
301 */
302#define	VPB_BIT_SHARED		0x01
303#define	VPB_BIT_EXCLUSIVE	0x02
304#define	VPB_BIT_WAITERS		0x04
305#define	VPB_BIT_FLAGMASK						\
306	(VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
307
308#define	VPB_SHARERS_SHIFT	3
309#define	VPB_SHARERS(x)							\
310	(((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
311#define	VPB_SHARERS_WORD(x)	((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
312#define	VPB_ONE_SHARER		(1 << VPB_SHARERS_SHIFT)
313
314#define	VPB_SINGLE_EXCLUSIVE	VPB_BIT_EXCLUSIVE
315#ifdef INVARIANTS
316#define	VPB_CURTHREAD_EXCLUSIVE						\
317	(VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
318#else
319#define	VPB_CURTHREAD_EXCLUSIVE	VPB_SINGLE_EXCLUSIVE
320#endif
321
322#define	VPB_UNBUSIED		VPB_SHARERS_WORD(0)
323
324/* Freed lock blocks both shared and exclusive. */
325#define	VPB_FREED		(0xffffffff - VPB_BIT_SHARED)
326
327#define	PQ_NONE		255
328#define	PQ_INACTIVE	0
329#define	PQ_ACTIVE	1
330#define	PQ_LAUNDRY	2
331#define	PQ_UNSWAPPABLE	3
332#define	PQ_COUNT	4
333
334#ifndef VM_PAGE_HAVE_PGLIST
335TAILQ_HEAD(pglist, vm_page);
336#define VM_PAGE_HAVE_PGLIST
337#endif
338SLIST_HEAD(spglist, vm_page);
339
340#ifdef _KERNEL
341extern vm_page_t bogus_page;
342#endif	/* _KERNEL */
343
344extern struct mtx_padalign pa_lock[];
345
346#if defined(__arm__)
347#define	PDRSHIFT	PDR_SHIFT
348#elif !defined(PDRSHIFT)
349#define PDRSHIFT	21
350#endif
351
352#define	pa_index(pa)	((pa) >> PDRSHIFT)
353#define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
354#define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
355#define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
356#define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
357#define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
358#define	PA_UNLOCK_COND(pa) 			\
359	do {		   			\
360		if ((pa) != 0) {		\
361			PA_UNLOCK((pa));	\
362			(pa) = 0;		\
363		}				\
364	} while (0)
365
366#define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
367
368#if defined(KLD_MODULE) && !defined(KLD_TIED)
369#define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
370#define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
371#define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
372#else	/* !KLD_MODULE */
373#define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
374#define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
375#define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
376#define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
377#endif
378#if defined(INVARIANTS)
379#define	vm_page_assert_locked(m)		\
380    vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
381#define	vm_page_lock_assert(m, a)		\
382    vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
383#else
384#define	vm_page_assert_locked(m)
385#define	vm_page_lock_assert(m, a)
386#endif
387
388/*
389 * The vm_page's aflags are updated using atomic operations.  To set or clear
390 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
391 * must be used.  Neither these flags nor these functions are part of the KBI.
392 *
393 * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
394 * both the MI and MD VM layers.  However, kernel loadable modules should not
395 * directly set this flag.  They should call vm_page_reference() instead.
396 *
397 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
398 * When it does so, the object must be locked, or the page must be
399 * exclusive busied.  The MI VM layer must never access this flag
400 * directly.  Instead, it should call pmap_page_is_write_mapped().
401 *
402 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
403 * at least one executable mapping.  It is not consumed by the MI VM layer.
404 *
405 * PGA_NOSYNC must be set and cleared with the page busy lock held.
406 *
407 * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
408 * from a page queue, respectively.  It determines whether the plinks.q field
409 * of the page is valid.  To set or clear this flag, page's "queue" field must
410 * be a valid queue index, and the corresponding page queue lock must be held.
411 *
412 * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
413 * queue, and cleared when the dequeue request is processed.  A page may
414 * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
415 * is requested after the page is scheduled to be enqueued but before it is
416 * actually inserted into the page queue.
417 *
418 * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
419 * in its page queue.
420 *
421 * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
422 * the inactive queue, thus bypassing LRU.
423 *
424 * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an
425 * atomic RMW operation to ensure that the "queue" field is a valid queue index,
426 * and the corresponding page queue lock must be held when clearing any of the
427 * flags.
428 *
429 * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon
430 * when the context that dirties the page does not have the object write lock
431 * held.
432 */
433#define	PGA_WRITEABLE	0x0001		/* page may be mapped writeable */
434#define	PGA_REFERENCED	0x0002		/* page has been referenced */
435#define	PGA_EXECUTABLE	0x0004		/* page may be mapped executable */
436#define	PGA_ENQUEUED	0x0008		/* page is enqueued in a page queue */
437#define	PGA_DEQUEUE	0x0010		/* page is due to be dequeued */
438#define	PGA_REQUEUE	0x0020		/* page is due to be requeued */
439#define	PGA_REQUEUE_HEAD 0x0040		/* page requeue should bypass LRU */
440#define	PGA_NOSYNC	0x0080		/* do not collect for syncer */
441#define	PGA_SWAP_FREE	0x0100		/* page with swap space was dirtied */
442#define	PGA_SWAP_SPACE	0x0200		/* page has allocated swap space */
443
444#define	PGA_QUEUE_OP_MASK	(PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
445#define	PGA_QUEUE_STATE_MASK	(PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
446
447/*
448 * Page flags.  Updates to these flags are not synchronized, and thus they must
449 * be set during page allocation or free to avoid races.
450 *
451 * The PG_PCPU_CACHE flag is set at allocation time if the page was
452 * allocated from a per-CPU cache.  It is cleared the next time that the
453 * page is allocated from the physical memory allocator.
454 */
455#define	PG_PCPU_CACHE	0x01		/* was allocated from per-CPU caches */
456#define	PG_FICTITIOUS	0x02		/* physical page doesn't exist */
457#define	PG_ZERO		0x04		/* page is zeroed */
458#define	PG_MARKER	0x08		/* special queue marker page */
459#define	PG_NODUMP	0x10		/* don't include this page in a dump */
460
461/*
462 * Misc constants.
463 */
464#define ACT_DECLINE		1
465#define ACT_ADVANCE		3
466#define ACT_INIT		5
467#define ACT_MAX			64
468
469#ifdef _KERNEL
470
471#include <sys/kassert.h>
472#include <machine/atomic.h>
473
474/*
475 * Each pageable resident page falls into one of five lists:
476 *
477 *	free
478 *		Available for allocation now.
479 *
480 *	inactive
481 *		Low activity, candidates for reclamation.
482 *		This list is approximately LRU ordered.
483 *
484 *	laundry
485 *		This is the list of pages that should be
486 *		paged out next.
487 *
488 *	unswappable
489 *		Dirty anonymous pages that cannot be paged
490 *		out because no swap device is configured.
491 *
492 *	active
493 *		Pages that are "active", i.e., they have been
494 *		recently referenced.
495 *
496 */
497
498extern vm_page_t vm_page_array;		/* First resident page in table */
499extern long vm_page_array_size;		/* number of vm_page_t's */
500extern long first_page;			/* first physical page number */
501
502#define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
503
504/*
505 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
506 * page to which the given physical address belongs. The correct vm_page_t
507 * object is returned for addresses that are not page-aligned.
508 */
509vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
510
511/*
512 * Page allocation parameters for vm_page for the functions
513 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
514 * vm_page_alloc_freelist().  Some functions support only a subset
515 * of the flags, and ignore others, see the flags legend.
516 *
517 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
518 * and the vm_page_grab*() functions.  See these functions for details.
519 *
520 * Bits 0 - 1 define class.
521 * Bits 2 - 15 dedicated for flags.
522 * Legend:
523 * (a) - vm_page_alloc() supports the flag.
524 * (c) - vm_page_alloc_contig() supports the flag.
525 * (g) - vm_page_grab() supports the flag.
526 * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag.
527 * (p) - vm_page_grab_pages() supports the flag.
528 * Bits above 15 define the count of additional pages that the caller
529 * intends to allocate.
530 */
531#define VM_ALLOC_NORMAL		0
532#define VM_ALLOC_INTERRUPT	1
533#define VM_ALLOC_SYSTEM		2
534#define	VM_ALLOC_CLASS_MASK	3
535#define	VM_ALLOC_WAITOK		0x0008	/* (acn) Sleep and retry */
536#define	VM_ALLOC_WAITFAIL	0x0010	/* (acn) Sleep and return error */
537#define	VM_ALLOC_WIRED		0x0020	/* (acgnp) Allocate a wired page */
538#define	VM_ALLOC_ZERO		0x0040	/* (acgnp) Allocate a zeroed page */
539#define	VM_ALLOC_NORECLAIM	0x0080	/* (c) Do not reclaim after failure */
540#define	VM_ALLOC_AVAIL0		0x0100
541#define	VM_ALLOC_NOBUSY		0x0200	/* (acgp) Do not excl busy the page */
542#define	VM_ALLOC_NOCREAT	0x0400	/* (gp) Don't create a page */
543#define	VM_ALLOC_AVAIL1		0x0800
544#define	VM_ALLOC_IGN_SBUSY	0x1000	/* (gp) Ignore shared busy flag */
545#define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
546#define	VM_ALLOC_SBUSY		0x4000	/* (acgp) Shared busy the page */
547#define	VM_ALLOC_NOWAIT		0x8000	/* (acgnp) Do not sleep */
548#define	VM_ALLOC_COUNT_MAX	0xffff
549#define	VM_ALLOC_COUNT_SHIFT	16
550#define	VM_ALLOC_COUNT_MASK	(VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX))
551#define	VM_ALLOC_COUNT(count)	({				\
552	KASSERT((count) <= VM_ALLOC_COUNT_MAX,			\
553	    ("%s: invalid VM_ALLOC_COUNT value", __func__));	\
554	(count) << VM_ALLOC_COUNT_SHIFT;			\
555})
556
557#ifdef M_NOWAIT
558static inline int
559malloc2vm_flags(int malloc_flags)
560{
561	int pflags;
562
563	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
564	    (malloc_flags & M_NOWAIT) != 0,
565	    ("M_USE_RESERVE requires M_NOWAIT"));
566	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
567	    VM_ALLOC_SYSTEM;
568	if ((malloc_flags & M_ZERO) != 0)
569		pflags |= VM_ALLOC_ZERO;
570	if ((malloc_flags & M_NODUMP) != 0)
571		pflags |= VM_ALLOC_NODUMP;
572	if ((malloc_flags & M_NOWAIT))
573		pflags |= VM_ALLOC_NOWAIT;
574	if ((malloc_flags & M_WAITOK))
575		pflags |= VM_ALLOC_WAITOK;
576	if ((malloc_flags & M_NORECLAIM))
577		pflags |= VM_ALLOC_NORECLAIM;
578	return (pflags);
579}
580#endif
581
582/*
583 * Predicates supported by vm_page_ps_test():
584 *
585 *	PS_ALL_DIRTY is true only if the entire (super)page is dirty.
586 *	However, it can be spuriously false when the (super)page has become
587 *	dirty in the pmap but that information has not been propagated to the
588 *	machine-independent layer.
589 */
590#define	PS_ALL_DIRTY	0x1
591#define	PS_ALL_VALID	0x2
592#define	PS_NONE_BUSY	0x4
593
594bool vm_page_busy_acquire(vm_page_t m, int allocflags);
595void vm_page_busy_downgrade(vm_page_t m);
596int vm_page_busy_tryupgrade(vm_page_t m);
597bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
598void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
599    vm_pindex_t pindex, const char *wmesg, int allocflags);
600void vm_page_free(vm_page_t m);
601void vm_page_free_zero(vm_page_t m);
602
603void vm_page_activate (vm_page_t);
604void vm_page_advise(vm_page_t m, int advice);
605vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
606vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
607vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
608vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
609    vm_page_t);
610vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
611    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
612    vm_paddr_t boundary, vm_memattr_t memattr);
613vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
614    vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
615    vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
616    vm_memattr_t memattr);
617vm_page_t vm_page_alloc_freelist(int, int);
618vm_page_t vm_page_alloc_freelist_domain(int, int, int);
619vm_page_t vm_page_alloc_noobj(int);
620vm_page_t vm_page_alloc_noobj_domain(int, int);
621vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
622    vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
623    vm_memattr_t memattr);
624vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
625    vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
626    vm_memattr_t memattr);
627void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
628bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
629vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
630vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
631int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
632    vm_page_t *ma, int count);
633int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
634    int allocflags, vm_page_t *ma, int count);
635int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
636    int allocflags);
637int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
638    vm_pindex_t pindex, int allocflags);
639void vm_page_deactivate(vm_page_t);
640void vm_page_deactivate_noreuse(vm_page_t);
641void vm_page_dequeue(vm_page_t m);
642void vm_page_dequeue_deferred(vm_page_t m);
643vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
644void vm_page_free_invalid(vm_page_t);
645vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
646void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
647void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
648void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind);
649int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
650void vm_page_invalid(vm_page_t m);
651void vm_page_launder(vm_page_t m);
652vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
653vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
654vm_page_t vm_page_next(vm_page_t m);
655void vm_page_pqbatch_drain(void);
656void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
657bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
658    vm_page_astate_t new);
659vm_page_t vm_page_prev(vm_page_t m);
660bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
661void vm_page_putfake(vm_page_t m);
662void vm_page_readahead_finish(vm_page_t m);
663int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
664    vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
665int vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
666    vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
667int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
668    vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
669    int desired_runs);
670void vm_page_reference(vm_page_t m);
671#define	VPR_TRYFREE	0x01
672#define	VPR_NOREUSE	0x02
673void vm_page_release(vm_page_t m, int flags);
674void vm_page_release_locked(vm_page_t m, int flags);
675vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
676bool vm_page_remove(vm_page_t);
677bool vm_page_remove_xbusy(vm_page_t);
678int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
679void vm_page_replace(vm_page_t mnew, vm_object_t object,
680    vm_pindex_t pindex, vm_page_t mold);
681int vm_page_sbusied(vm_page_t m);
682vm_page_bits_t vm_page_set_dirty(vm_page_t m);
683void vm_page_set_valid_range(vm_page_t m, int base, int size);
684vm_offset_t vm_page_startup(vm_offset_t vaddr);
685void vm_page_sunbusy(vm_page_t m);
686bool vm_page_try_remove_all(vm_page_t m);
687bool vm_page_try_remove_write(vm_page_t m);
688int vm_page_trysbusy(vm_page_t m);
689int vm_page_tryxbusy(vm_page_t m);
690void vm_page_unhold_pages(vm_page_t *ma, int count);
691void vm_page_unswappable(vm_page_t m);
692void vm_page_unwire(vm_page_t m, uint8_t queue);
693bool vm_page_unwire_noq(vm_page_t m);
694void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
695void vm_page_wire(vm_page_t);
696bool vm_page_wire_mapped(vm_page_t m);
697void vm_page_xunbusy_hard(vm_page_t m);
698void vm_page_xunbusy_hard_unchecked(vm_page_t m);
699void vm_page_set_validclean (vm_page_t, int, int);
700void vm_page_clear_dirty(vm_page_t, int, int);
701void vm_page_set_invalid(vm_page_t, int, int);
702void vm_page_valid(vm_page_t m);
703int vm_page_is_valid(vm_page_t, int, int);
704void vm_page_test_dirty(vm_page_t);
705vm_page_bits_t vm_page_bits(int base, int size);
706void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
707void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
708
709void vm_page_dirty_KBI(vm_page_t m);
710void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
711void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
712int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
713#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
714void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
715void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
716#endif
717
718#define	vm_page_busy_fetch(m)	atomic_load_int(&(m)->busy_lock)
719
720#define	vm_page_assert_busied(m)					\
721	KASSERT(vm_page_busied(m),					\
722	    ("vm_page_assert_busied: page %p not busy @ %s:%d", \
723	    (m), __FILE__, __LINE__))
724
725#define	vm_page_assert_sbusied(m)					\
726	KASSERT(vm_page_sbusied(m),					\
727	    ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
728	    (m), __FILE__, __LINE__))
729
730#define	vm_page_assert_unbusied(m)					\
731	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) !=		\
732	    VPB_CURTHREAD_EXCLUSIVE,					\
733	    ("vm_page_assert_unbusied: page %p busy_lock %#x owned"	\
734	     " by me (%p) @ %s:%d",					\
735	    (m), (m)->busy_lock, curthread, __FILE__, __LINE__));	\
736
737#define	vm_page_assert_xbusied_unchecked(m) do {			\
738	KASSERT(vm_page_xbusied(m),					\
739	    ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
740	    (m), __FILE__, __LINE__));					\
741} while (0)
742#define	vm_page_assert_xbusied(m) do {					\
743	vm_page_assert_xbusied_unchecked(m);				\
744	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) ==		\
745	    VPB_CURTHREAD_EXCLUSIVE,					\
746	    ("vm_page_assert_xbusied: page %p busy_lock %#x not owned"	\
747	     " by me (%p) @ %s:%d",					\
748	    (m), (m)->busy_lock, curthread, __FILE__, __LINE__));	\
749} while (0)
750
751#define	vm_page_busied(m)						\
752	(vm_page_busy_fetch(m) != VPB_UNBUSIED)
753
754#define	vm_page_xbusied(m)						\
755	((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
756
757#define	vm_page_busy_freed(m)						\
758	(vm_page_busy_fetch(m) == VPB_FREED)
759
760/* Note: page m's lock must not be owned by the caller. */
761#define	vm_page_xunbusy(m) do {						\
762	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
763	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
764		vm_page_xunbusy_hard(m);				\
765} while (0)
766#define	vm_page_xunbusy_unchecked(m) do {				\
767	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
768	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
769		vm_page_xunbusy_hard_unchecked(m);			\
770} while (0)
771
772#ifdef INVARIANTS
773void vm_page_object_busy_assert(vm_page_t m);
774#define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	vm_page_object_busy_assert(m)
775void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
776#define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
777	vm_page_assert_pga_writeable(m, bits)
778/*
779 * Claim ownership of a page's xbusy state.  In non-INVARIANTS kernels this
780 * operation is a no-op since ownership is not tracked.  In particular
781 * this macro does not provide any synchronization with the previous owner.
782 */
783#define	vm_page_xbusy_claim(m) do {					\
784	u_int _busy_lock;						\
785									\
786	vm_page_assert_xbusied_unchecked((m));				\
787	do {								\
788		_busy_lock = vm_page_busy_fetch(m);			\
789	} while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock,	\
790	    (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
791} while (0)
792#else
793#define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	(void)0
794#define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
795#define	vm_page_xbusy_claim(m)
796#endif
797
798#if BYTE_ORDER == BIG_ENDIAN
799#define	VM_PAGE_AFLAG_SHIFT	16
800#else
801#define	VM_PAGE_AFLAG_SHIFT	0
802#endif
803
804/*
805 *	Load a snapshot of a page's 32-bit atomic state.
806 */
807static inline vm_page_astate_t
808vm_page_astate_load(vm_page_t m)
809{
810	vm_page_astate_t a;
811
812	a._bits = atomic_load_32(&m->a._bits);
813	return (a);
814}
815
816/*
817 *	Atomically compare and set a page's atomic state.
818 */
819static inline bool
820vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
821{
822
823	KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
824	    ("%s: invalid head requeue request for page %p", __func__, m));
825	KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
826	    ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
827	KASSERT(new._bits != old->_bits,
828	    ("%s: bits are unchanged", __func__));
829
830	return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
831}
832
833/*
834 *	Clear the given bits in the specified page.
835 */
836static inline void
837vm_page_aflag_clear(vm_page_t m, uint16_t bits)
838{
839	uint32_t *addr, val;
840
841	/*
842	 * Access the whole 32-bit word containing the aflags field with an
843	 * atomic update.  Parallel non-atomic updates to the other fields
844	 * within this word are handled properly by the atomic update.
845	 */
846	addr = (void *)&m->a;
847	val = bits << VM_PAGE_AFLAG_SHIFT;
848	atomic_clear_32(addr, val);
849}
850
851/*
852 *	Set the given bits in the specified page.
853 */
854static inline void
855vm_page_aflag_set(vm_page_t m, uint16_t bits)
856{
857	uint32_t *addr, val;
858
859	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
860
861	/*
862	 * Access the whole 32-bit word containing the aflags field with an
863	 * atomic update.  Parallel non-atomic updates to the other fields
864	 * within this word are handled properly by the atomic update.
865	 */
866	addr = (void *)&m->a;
867	val = bits << VM_PAGE_AFLAG_SHIFT;
868	atomic_set_32(addr, val);
869}
870
871/*
872 *	vm_page_dirty:
873 *
874 *	Set all bits in the page's dirty field.
875 *
876 *	The object containing the specified page must be locked if the
877 *	call is made from the machine-independent layer.
878 *
879 *	See vm_page_clear_dirty_mask().
880 */
881static __inline void
882vm_page_dirty(vm_page_t m)
883{
884
885	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
886#if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
887	vm_page_dirty_KBI(m);
888#else
889	m->dirty = VM_PAGE_BITS_ALL;
890#endif
891}
892
893/*
894 *	vm_page_undirty:
895 *
896 *	Set page to not be dirty.  Note: does not clear pmap modify bits
897 */
898static __inline void
899vm_page_undirty(vm_page_t m)
900{
901
902	VM_PAGE_OBJECT_BUSY_ASSERT(m);
903	m->dirty = 0;
904}
905
906static inline uint8_t
907_vm_page_queue(vm_page_astate_t as)
908{
909
910	if ((as.flags & PGA_DEQUEUE) != 0)
911		return (PQ_NONE);
912	return (as.queue);
913}
914
915/*
916 *	vm_page_queue:
917 *
918 *	Return the index of the queue containing m.
919 */
920static inline uint8_t
921vm_page_queue(vm_page_t m)
922{
923
924	return (_vm_page_queue(vm_page_astate_load(m)));
925}
926
927static inline bool
928vm_page_active(vm_page_t m)
929{
930
931	return (vm_page_queue(m) == PQ_ACTIVE);
932}
933
934static inline bool
935vm_page_inactive(vm_page_t m)
936{
937
938	return (vm_page_queue(m) == PQ_INACTIVE);
939}
940
941static inline bool
942vm_page_in_laundry(vm_page_t m)
943{
944	uint8_t queue;
945
946	queue = vm_page_queue(m);
947	return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
948}
949
950/*
951 *	vm_page_drop:
952 *
953 *	Release a reference to a page and return the old reference count.
954 */
955static inline u_int
956vm_page_drop(vm_page_t m, u_int val)
957{
958	u_int old;
959
960	/*
961	 * Synchronize with vm_page_free_prep(): ensure that all updates to the
962	 * page structure are visible before it is freed.
963	 */
964	atomic_thread_fence_rel();
965	old = atomic_fetchadd_int(&m->ref_count, -val);
966	KASSERT(old != VPRC_BLOCKED,
967	    ("vm_page_drop: page %p has an invalid refcount value", m));
968	return (old);
969}
970
971/*
972 *	vm_page_wired:
973 *
974 *	Perform a racy check to determine whether a reference prevents the page
975 *	from being reclaimable.  If the page's object is locked, and the page is
976 *	unmapped and exclusively busied by the current thread, no new wirings
977 *	may be created.
978 */
979static inline bool
980vm_page_wired(vm_page_t m)
981{
982
983	return (VPRC_WIRE_COUNT(m->ref_count) > 0);
984}
985
986static inline bool
987vm_page_all_valid(vm_page_t m)
988{
989
990	return (m->valid == VM_PAGE_BITS_ALL);
991}
992
993static inline bool
994vm_page_any_valid(vm_page_t m)
995{
996
997	return (m->valid != 0);
998}
999
1000static inline bool
1001vm_page_none_valid(vm_page_t m)
1002{
1003
1004	return (m->valid == 0);
1005}
1006
1007static inline int
1008vm_page_domain(vm_page_t m __numa_used)
1009{
1010#ifdef NUMA
1011	int domn, segind;
1012
1013	segind = m->segind;
1014	KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
1015	domn = vm_phys_segs[segind].domain;
1016	KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));
1017	return (domn);
1018#else
1019	return (0);
1020#endif
1021}
1022
1023#endif				/* _KERNEL */
1024#endif				/* !_VM_PAGE_ */
1025