150479Speter/*-
21908Swollman * SPDX-License-Identifier: BSD-3-Clause
357568Smarkm *
410814Sgibbs * Copyright (c) 1991, 1993
551993Smarkm *	The Regents of the University of California.  All rights reserved.
651993Smarkm *
751993Smarkm * This code is derived from software contributed to Berkeley by
851993Smarkm * The Mach Operating System project at Carnegie-Mellon University.
951993Smarkm *
1051993Smarkm * Redistribution and use in source and binary forms, with or without
1151993Smarkm * modification, are permitted provided that the following conditions
1251993Smarkm * are met:
1310814Sgibbs * 1. Redistributions of source code must retain the above copyright
1410814Sgibbs *    notice, this list of conditions and the following disclaimer.
1510814Sgibbs * 2. Redistributions in binary form must reproduce the above copyright
1610814Sgibbs *    notice, this list of conditions and the following disclaimer in the
1710814Sgibbs *    documentation and/or other materials provided with the distribution.
1862030Smarkm * 3. Neither the name of the University nor the names of its contributors
1962030Smarkm *    may be used to endorse or promote products derived from this software
2062030Smarkm *    without specific prior written permission.
2162030Smarkm *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Author: Avadis Tevanian, Jr.
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Machine address mapping definitions -- machine-independent
63 *	section.  [For machine-dependent section, see "machine/pmap.h".]
64 */
65
66#ifndef	_PMAP_VM_
67#define	_PMAP_VM_
68/*
69 * Each machine dependent implementation is expected to
70 * keep certain statistics.  They may do this anyway they
71 * so choose, but are expected to return the statistics
72 * in the following structure.
73 */
74struct pmap_statistics {
75	long resident_count;	/* # of pages mapped (total) */
76	long wired_count;	/* # of pages wired */
77};
78typedef struct pmap_statistics *pmap_statistics_t;
79
80/*
81 * Each machine-dependent implementation is required to provide:
82 *
83 * vm_memattr_t	pmap_page_get_memattr(vm_page_t);
84 * bool		pmap_page_is_mapped(vm_page_t);
85 * bool		pmap_page_is_write_mapped(vm_page_t);
86 * void		pmap_page_set_memattr(vm_page_t, vm_memattr_t);
87 */
88#include <machine/pmap.h>
89
90#ifdef _KERNEL
91#include <sys/_cpuset.h>
92struct thread;
93
94/*
95 * Updates to kernel_vm_end are synchronized by the kernel_map's system mutex.
96 */
97extern vm_offset_t kernel_vm_end;
98
99/*
100 * Flags for pmap_enter().  The bits in the low-order byte are reserved
101 * for the protection code (vm_prot_t) that describes the fault type.
102 * Bits 24 through 31 are reserved for the pmap's internal use.
103 */
104#define	PMAP_ENTER_NOSLEEP	0x00000100
105#define	PMAP_ENTER_WIRED	0x00000200
106#define	PMAP_ENTER_LARGEPAGE	0x00000400
107#define	PMAP_ENTER_RESERVED	0xFF000000
108
109/*
110 * Define the maximum number of machine-dependent reference bits that are
111 * cleared by a call to pmap_ts_referenced().  This limit serves two purposes.
112 * First, it bounds the cost of reference bit maintenance on widely shared
113 * pages.  Second, it prevents numeric overflow during maintenance of a
114 * widely shared page's "act_count" field.  An overflow could result in the
115 * premature deactivation of the page.
116 */
117#define	PMAP_TS_REFERENCED_MAX	5
118
119void		 pmap_activate(struct thread *td);
120void		 pmap_active_cpus(pmap_t pmap, cpuset_t *res);
121void		 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
122		    int advice);
123void		 pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
124		    vm_size_t);
125void		 pmap_clear_modify(vm_page_t m);
126void		 pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
127void		 pmap_copy_page(vm_page_t, vm_page_t);
128void		 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,
129		    vm_page_t mb[], vm_offset_t b_offset, int xfersize);
130int		 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
131		    vm_prot_t prot, u_int flags, int8_t psind);
132void		 pmap_enter_object(pmap_t pmap, vm_offset_t start,
133		    vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
134void		 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
135		    vm_prot_t prot);
136vm_paddr_t	 pmap_extract(pmap_t pmap, vm_offset_t va);
137vm_page_t	 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
138		    vm_prot_t prot);
139void		 pmap_growkernel(vm_offset_t);
140void		 pmap_init(void);
141bool		 pmap_is_modified(vm_page_t m);
142bool		 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
143bool		 pmap_is_referenced(vm_page_t m);
144bool		 pmap_is_valid_memattr(pmap_t, vm_memattr_t);
145vm_offset_t	 pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
146int		 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap);
147void		 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
148		    vm_object_t object, vm_pindex_t pindex, vm_size_t size);
149bool		 pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
150void		 pmap_page_init(vm_page_t m);
151int		 pmap_page_wired_mappings(vm_page_t m);
152int		 pmap_pinit(pmap_t);
153void		 pmap_pinit0(pmap_t);
154void		 pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
155void		 pmap_qenter(vm_offset_t, vm_page_t *, int);
156void		 pmap_qremove(vm_offset_t, int);
157vm_offset_t	 pmap_quick_enter_page(vm_page_t);
158void		 pmap_quick_remove_page(vm_offset_t);
159void		 pmap_release(pmap_t);
160void		 pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
161void		 pmap_remove_all(vm_page_t m);
162void		 pmap_remove_pages(pmap_t);
163void		 pmap_remove_write(vm_page_t m);
164void		 pmap_sync_icache(pmap_t, vm_offset_t, vm_size_t);
165int		 pmap_ts_referenced(vm_page_t m);
166void		 pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end);
167void		 pmap_zero_page(vm_page_t);
168void		 pmap_zero_page_area(vm_page_t, int off, int size);
169
170/*
171 * Invalidation request.  PCPU pc_smp_tlb_op uses u_int instead of the
172 * enum to avoid both namespace and ABI issues (with enums).
173 */
174enum invl_op_codes {
175	INVL_OP_TLB               = 1,
176	INVL_OP_TLB_INVPCID       = 2,
177	INVL_OP_TLB_INVPCID_PTI   = 3,
178	INVL_OP_TLB_PCID          = 4,
179	INVL_OP_PGRNG             = 5,
180	INVL_OP_PGRNG_INVPCID     = 6,
181	INVL_OP_PGRNG_PCID        = 7,
182	INVL_OP_PG                = 8,
183	INVL_OP_PG_INVPCID        = 9,
184	INVL_OP_PG_PCID           = 10,
185	INVL_OP_CACHE             = 11,
186};
187typedef void (*smp_invl_local_cb_t)(struct pmap *, vm_offset_t addr1,
188    vm_offset_t addr2);
189typedef void (*smp_targeted_tlb_shootdown_t)(pmap_t, vm_offset_t, vm_offset_t,
190    smp_invl_local_cb_t, enum invl_op_codes);
191
192extern void
193smp_targeted_tlb_shootdown_native(pmap_t, vm_offset_t, vm_offset_t,
194    smp_invl_local_cb_t, enum invl_op_codes);
195extern smp_targeted_tlb_shootdown_t smp_targeted_tlb_shootdown;
196
197#define	pmap_resident_count(pm)	((pm)->pm_stats.resident_count)
198#define	pmap_wired_count(pm)	((pm)->pm_stats.wired_count)
199
200#endif /* _KERNEL */
201#endif /* _PMAP_VM_ */
202