1/*	$OpenBSD: uvm_extern.h,v 1.174 2024/04/02 08:39:17 deraadt Exp $	*/
2/*	$NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $	*/
3
4/*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*-
30 * Copyright (c) 1991, 1992, 1993
31 *	The Regents of the University of California.  All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 *    notice, this list of conditions and the following disclaimer in the
40 *    documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 *    may be used to endorse or promote products derived from this software
43 *    without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 *	@(#)vm_extern.h	8.5 (Berkeley) 5/3/95
58 */
59
60#ifndef _UVM_UVM_EXTERN_H_
61#define _UVM_UVM_EXTERN_H_
62
63typedef int vm_fault_t;
64
65typedef int vm_inherit_t;	/* XXX: inheritance codes */
66typedef off_t voff_t;		/* XXX: offset within a uvm_object */
67
68struct vm_map_entry;
69typedef struct vm_map_entry *vm_map_entry_t;
70
71struct vm_map;
72typedef struct vm_map *vm_map_t;
73
74struct vm_page;
75typedef struct vm_page  *vm_page_t;
76
77/*
78 * Bit assignments assigned by UVM_MAPFLAG() and extracted by
79 * UVM_{PROTECTION,INHERIT,MAXPROTECTION,ADVICE}():
80 * bits 0-2	protection
81 *  bit 3	 unused
82 * bits 4-5	inheritance
83 *  bits 6-7	 unused
84 * bits 8-10	max protection
85 *  bit 11	 unused
86 * bits 12-14	advice
87 *  bit 15	 unused
88 * bits 16-N	flags
89 */
90
91/* protections bits */
92#define PROT_MASK	(PROT_READ | PROT_WRITE | PROT_EXEC)
93
94/* inherit codes */
95#define MAP_INHERIT_MASK	0x3	/* inherit mask */
96
97typedef int		vm_prot_t;
98
99#define MADV_MASK	0x7	/* mask */
100
101/* mapping flags */
102#define UVM_FLAG_FIXED   0x0010000 /* find space */
103#define UVM_FLAG_OVERLAY 0x0020000 /* establish overlay */
104#define UVM_FLAG_NOMERGE 0x0040000 /* don't merge map entries */
105#define UVM_FLAG_COPYONW 0x0080000 /* set copy_on_write flag */
106#define UVM_FLAG_TRYLOCK 0x0100000 /* fail if we can not lock map */
107#define UVM_FLAG_HOLE    0x0200000 /* no backend */
108#define UVM_FLAG_QUERY   0x0400000 /* do everything, except actual execution */
109#define UVM_FLAG_NOFAULT 0x0800000 /* don't fault */
110#define UVM_FLAG_UNMAP   0x1000000 /* unmap to make space */
111#define UVM_FLAG_STACK   0x2000000 /* page may contain a stack */
112#define UVM_FLAG_WC      0x4000000 /* write combining */
113#define UVM_FLAG_CONCEAL 0x8000000 /* omit from dumps */
114#define UVM_FLAG_SIGALTSTACK 0x20000000 /* sigaltstack validation required */
115
116/* macros to extract info */
117#define UVM_PROTECTION(X)	((X) & PROT_MASK)
118#define UVM_INHERIT(X)		(((X) >> 4) & MAP_INHERIT_MASK)
119#define UVM_MAXPROTECTION(X)	(((X) >> 8) & PROT_MASK)
120#define UVM_ADVICE(X)		(((X) >> 12) & MADV_MASK)
121
122#define UVM_MAPFLAG(prot, maxprot, inh, advice, flags) \
123	((prot) | ((maxprot) << 8) | ((inh) << 4) | ((advice) << 12) | (flags))
124
125/* magic offset value */
126#define UVM_UNKNOWN_OFFSET ((voff_t) -1)
127				/* offset not known(obj) or don't care(!obj) */
128
129/*
130 * the following defines are for uvm_km_kmemalloc's flags
131 */
132#define UVM_KMF_NOWAIT	0x1			/* matches M_NOWAIT */
133#define UVM_KMF_VALLOC	0x2			/* allocate VA only */
134#define UVM_KMF_CANFAIL	0x4			/* caller handles failure */
135#define UVM_KMF_ZERO	0x08			/* zero pages */
136#define UVM_KMF_TRYLOCK	UVM_FLAG_TRYLOCK	/* try locking only */
137
138/*
139 * flags for uvm_pagealloc()
140 */
141#define UVM_PGA_USERESERVE	0x0001	/* ok to use reserve pages */
142#define	UVM_PGA_ZERO		0x0002	/* returned page must be zeroed */
143
144/*
145 * flags for uvm_pglistalloc() also used by uvm_pmr_getpages()
146 */
147#define UVM_PLA_WAITOK		0x0001	/* may sleep */
148#define UVM_PLA_NOWAIT		0x0002	/* can't sleep (need one of the two) */
149#define UVM_PLA_ZERO		0x0004	/* zero all pages before returning */
150#define UVM_PLA_TRYCONTIG	0x0008	/* try to allocate contig physmem */
151#define UVM_PLA_FAILOK		0x0010	/* caller can handle failure */
152#define UVM_PLA_NOWAKE		0x0020	/* don't wake page daemon on failure */
153#define UVM_PLA_USERESERVE	0x0040	/* can allocate from kernel reserve */
154
155/*
156 * lockflags that control the locking behavior of various functions.
157 */
158#define	UVM_LK_ENTER	0x00000001	/* map locked on entry */
159#define	UVM_LK_EXIT	0x00000002	/* leave map locked on exit */
160
161/*
162 * flags to uvm_page_physload.
163 */
164#define	PHYSLOAD_DEVICE	0x01	/* don't add to the page queue */
165
166#include <sys/queue.h>
167#include <sys/tree.h>
168#include <sys/mman.h>
169
170#ifdef _KERNEL
171struct buf;
172struct mount;
173struct pglist;
174struct vmspace;
175struct pmap;
176#endif
177
178#include <uvm/uvm_param.h>
179
180#include <uvm/uvm_pmap.h>
181#include <uvm/uvm_object.h>
182#include <uvm/uvm_page.h>
183#include <uvm/uvm_map.h>
184
185#ifdef _KERNEL
186#include <uvm/uvm_fault.h>
187#include <uvm/uvm_pager.h>
188#endif
189
190/*
191 * Shareable process virtual address space.
192 * May eventually be merged with vm_map.
193 * Several fields are temporary (text, data stuff).
194 *
195 *  Locks used to protect struct members in this file:
196 *	K	kernel lock
197 *	I	immutable after creation
198 *	v	vm_map's lock
199 */
200struct vmspace {
201	struct	vm_map vm_map;	/* VM address map */
202	int	vm_refcnt;	/* [K] number of references */
203	caddr_t	vm_shm;		/* SYS5 shared memory private data XXX */
204/* we copy from vm_startcopy to the end of the structure on fork */
205#define vm_startcopy vm_rssize
206	segsz_t vm_rssize; 	/* current resident set size in pages */
207	segsz_t vm_swrss;	/* resident set size before last swap */
208	segsz_t vm_tsize;	/* text size (pages) XXX */
209	segsz_t vm_dsize;	/* data size (pages) XXX */
210	segsz_t vm_dused;	/* data segment length (pages) XXX */
211	segsz_t vm_ssize;	/* [v] stack size (pages) */
212	caddr_t	vm_taddr;	/* [I] user virtual address of text */
213	caddr_t	vm_daddr;	/* [I] user virtual address of data */
214	caddr_t vm_maxsaddr;	/* [I] user VA at max stack growth */
215	caddr_t vm_minsaddr;	/* [I] user VA at top of stack */
216};
217
218/*
219 * uvm_constraint_range's:
220 * MD code is allowed to setup constraint ranges for memory allocators, the
221 * primary use for this is to keep allocation for certain memory consumers
222 * such as mbuf pools within address ranges that are reachable by devices
223 * that perform DMA.
224 *
225 * It is also to discourage memory allocations from being satisfied from ranges
226 * such as the ISA memory range, if they can be satisfied with allocation
227 * from other ranges.
228 *
229 * the MD ranges are defined in arch/ARCH/ARCH/machdep.c
230 */
231struct uvm_constraint_range {
232	paddr_t	ucr_low;
233	paddr_t ucr_high;
234};
235
236#ifdef _KERNEL
237
238#include <uvm/uvmexp.h>
239extern struct uvmexp uvmexp;
240
241/* Constraint ranges, set by MD code. */
242extern struct uvm_constraint_range  isa_constraint;
243extern struct uvm_constraint_range  dma_constraint;
244extern struct uvm_constraint_range  no_constraint;
245extern struct uvm_constraint_range *uvm_md_constraints[];
246
247/*
248 * the various kernel maps, owned by MD code
249 */
250extern struct vm_map *exec_map;
251extern struct vm_map *kernel_map;
252extern struct vm_map *kmem_map;
253extern struct vm_map *phys_map;
254
255/* base of kernel virtual memory */
256extern vaddr_t vm_min_kernel_address;
257
258/* zalloc zeros memory, alloc does not */
259#define uvm_km_zalloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,0,TRUE)
260#define uvm_km_alloc(MAP,SIZE)  uvm_km_alloc1(MAP,SIZE,0,FALSE)
261
262#define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
263
264struct plimit;
265
266void			vmapbuf(struct buf *, vsize_t);
267void			vunmapbuf(struct buf *, vsize_t);
268struct uvm_object	*uao_create(vsize_t, int);
269void			uao_detach(struct uvm_object *);
270void			uao_reference(struct uvm_object *);
271int			uvm_fault(vm_map_t, vaddr_t, vm_fault_t, vm_prot_t);
272
273vaddr_t			uvm_uarea_alloc(void);
274void			uvm_uarea_free(struct proc *);
275void			uvm_exit(struct process *);
276void			uvm_init_limits(struct plimit *);
277boolean_t		uvm_kernacc(caddr_t, size_t, int);
278
279int			uvm_vslock(struct proc *, caddr_t, size_t,
280			    vm_prot_t);
281void			uvm_vsunlock(struct proc *, caddr_t, size_t);
282int			uvm_vslock_device(struct proc *, void *, size_t,
283			    vm_prot_t, void **);
284void			uvm_vsunlock_device(struct proc *, void *, size_t,
285			    void *);
286void			uvm_pause(void);
287void			uvm_init(void);
288void			uvm_init_percpu(void);
289int			uvm_io(vm_map_t, struct uio *, int);
290
291#define	UVM_IO_FIXPROT	0x01
292
293vaddr_t			uvm_km_alloc1(vm_map_t, vsize_t, vsize_t, boolean_t);
294void			uvm_km_free(vm_map_t, vaddr_t, vsize_t);
295vaddr_t			uvm_km_kmemalloc_pla(struct vm_map *,
296			    struct uvm_object *, vsize_t, vsize_t, int,
297			    paddr_t, paddr_t, paddr_t, paddr_t, int);
298#define uvm_km_kmemalloc(map, obj, sz, flags)				\
299	uvm_km_kmemalloc_pla(map, obj, sz, 0, flags, 0, (paddr_t)-1, 0, 0, 0)
300struct vm_map		*uvm_km_suballoc(vm_map_t, vaddr_t *, vaddr_t *,
301			    vsize_t, int, boolean_t, vm_map_t);
302/*
303 * Allocation mode for virtual space.
304 *
305 *  kv_map - pointer to the pointer to the map we're allocating from.
306 *  kv_align - alignment.
307 *  kv_wait - wait for free space in the map if it's full. The default
308 *   allocators don't wait since running out of space in kernel_map and
309 *   kmem_map is usually fatal. Special maps like exec_map are specifically
310 *   limited, so waiting for space in them is necessary.
311 *  kv_singlepage - use the single page allocator.
312 *  kv_executable - map the physical pages with PROT_EXEC.
313 */
314struct kmem_va_mode {
315	struct vm_map **kv_map;
316	vsize_t kv_align;
317	char kv_wait;
318	char kv_singlepage;
319};
320
321/*
322 * Allocation mode for physical pages.
323 *
324 *  kp_constraint - allocation constraint for physical pages.
325 *  kp_object - if the pages should be allocated from an object.
326 *  kp_align - physical alignment of the first page in the allocation.
327 *  kp_boundary - boundary that the physical addresses can't cross if
328 *   the allocation is contiguous.
329 *  kp_nomem - don't allocate any backing pages.
330 *  kp_maxseg - maximal amount of contiguous segments.
331 *  kp_zero - zero the returned memory.
332 *  kp_pageable - allocate pageable memory.
333 */
334struct kmem_pa_mode {
335	struct uvm_constraint_range *kp_constraint;
336	struct uvm_object **kp_object;
337	paddr_t kp_align;
338	paddr_t kp_boundary;
339	int kp_maxseg;
340	char kp_nomem;
341	char kp_zero;
342	char kp_pageable;
343};
344
345/*
346 * Dynamic allocation parameters. Stuff that changes too often or too much
347 * to create separate va and pa modes for.
348 *
349 * kd_waitok - is it ok to sleep?
350 * kd_trylock - don't sleep on map locks.
351 * kd_prefer - offset to feed to PMAP_PREFER
352 * kd_slowdown - special parameter for the singlepage va allocator
353 *  that tells the caller to sleep if possible to let the singlepage
354 *  allocator catch up.
355 */
356struct kmem_dyn_mode {
357	voff_t kd_prefer;
358	int *kd_slowdown;
359	char kd_waitok;
360	char kd_trylock;
361};
362
363#define KMEM_DYN_INITIALIZER { UVM_UNKNOWN_OFFSET, NULL, 0, 0 }
364
365/*
366 * Notice that for kv_ waiting has a different meaning. It's only supposed
367 * to be used for very space constrained maps where waiting is a way
368 * to throttle some other operation.
369 * The exception is kv_page which needs to wait relatively often.
370 * All kv_ except kv_intrsafe will potentially sleep.
371 */
372extern const struct kmem_va_mode kv_any;
373extern const struct kmem_va_mode kv_intrsafe;
374extern const struct kmem_va_mode kv_page;
375
376extern const struct kmem_pa_mode kp_dirty;
377extern const struct kmem_pa_mode kp_zero;
378extern const struct kmem_pa_mode kp_dma;
379extern const struct kmem_pa_mode kp_dma_contig;
380extern const struct kmem_pa_mode kp_dma_zero;
381extern const struct kmem_pa_mode kp_pageable;
382extern const struct kmem_pa_mode kp_none;
383
384extern const struct kmem_dyn_mode kd_waitok;
385extern const struct kmem_dyn_mode kd_nowait;
386extern const struct kmem_dyn_mode kd_trylock;
387
388void			*km_alloc(size_t, const struct kmem_va_mode *,
389			    const struct kmem_pa_mode *,
390			    const struct kmem_dyn_mode *);
391void			km_free(void *, size_t, const struct kmem_va_mode *,
392			    const struct kmem_pa_mode *);
393int			uvm_map(vm_map_t, vaddr_t *, vsize_t,
394			    struct uvm_object *, voff_t, vsize_t, unsigned int);
395int			uvm_mapanon(vm_map_t, vaddr_t *, vsize_t, vsize_t, unsigned int);
396int			uvm_map_pageable(vm_map_t, vaddr_t,
397			    vaddr_t, boolean_t, int);
398int			uvm_map_pageable_all(vm_map_t, int, vsize_t);
399boolean_t		uvm_map_checkprot(vm_map_t, vaddr_t,
400			    vaddr_t, vm_prot_t);
401int			uvm_map_protect(vm_map_t, vaddr_t,
402			    vaddr_t, vm_prot_t, int etype, boolean_t, boolean_t);
403struct vmspace		*uvmspace_alloc(vaddr_t, vaddr_t,
404			    boolean_t, boolean_t);
405void			uvmspace_init(struct vmspace *, struct pmap *,
406			    vaddr_t, vaddr_t, boolean_t, boolean_t);
407void			uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
408struct vmspace		*uvmspace_fork(struct process *);
409void			uvmspace_addref(struct vmspace *);
410void			uvmspace_free(struct vmspace *);
411struct vmspace		*uvmspace_share(struct process *);
412int			uvm_share(vm_map_t, vaddr_t, vm_prot_t,
413			    vm_map_t, vaddr_t, vsize_t);
414int			uvm_sysctl(int *, u_int, void *, size_t *,
415			    void *, size_t, struct proc *);
416struct vm_page		*uvm_pagealloc(struct uvm_object *,
417			    voff_t, struct vm_anon *, int);
418int			uvm_pagealloc_multi(struct uvm_object *, voff_t,
419    			    vsize_t, int);
420void			uvm_pagerealloc(struct vm_page *,
421			    struct uvm_object *, voff_t);
422int			uvm_pagerealloc_multi(struct uvm_object *, voff_t,
423			    vsize_t, int, struct uvm_constraint_range *);
424/* Actually, uvm_page_physload takes PF#s which need their own type */
425void			uvm_page_physload(paddr_t, paddr_t, paddr_t,
426			    paddr_t, int);
427void			uvm_setpagesize(void);
428void			uvm_shutdown(void);
429void			uvm_aio_biodone(struct buf *);
430void			uvm_aio_aiodone(struct buf *);
431void			uvm_pageout(void *);
432void			uvm_aiodone_daemon(void *);
433void			uvm_wait(const char *);
434int			uvm_pglistalloc(psize_t, paddr_t, paddr_t,
435			    paddr_t, paddr_t, struct pglist *, int, int);
436void			uvm_pglistfree(struct pglist *);
437void			uvm_pmr_use_inc(paddr_t, paddr_t);
438void			uvm_swap_init(void);
439typedef int		uvm_coredump_setup_cb(int _nsegment, void *_cookie);
440typedef int		uvm_coredump_walk_cb(vaddr_t _start, vaddr_t _realend,
441			    vaddr_t _end, vm_prot_t _prot, int _isvnode,
442			    int _nsegment, void *_cookie);
443int			uvm_coredump_walkmap(struct proc *_p,
444			    uvm_coredump_setup_cb *_setup,
445			    uvm_coredump_walk_cb *_walk, void *_cookie);
446void			uvm_grow(struct proc *, vaddr_t);
447void			uvm_pagezero_thread(void *);
448void			kmeminit_nkmempages(void);
449void			kmeminit(void);
450extern u_int		nkmempages;
451
452struct vnode;
453struct uvm_object	*uvn_attach(struct vnode *, vm_prot_t);
454
455struct process;
456struct kinfo_vmentry;
457int			fill_vmmap(struct process *, struct kinfo_vmentry *,
458			    size_t *);
459
460#endif /* _KERNEL */
461
462#endif /* _UVM_UVM_EXTERN_H_ */
463