vm_map.c revision 311516
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Virtual memory mapping module.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD: stable/10/sys/vm/vm_map.c 311516 2017-01-06 12:13:03Z kib $");
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/vmmeter.h>
76#include <sys/mman.h>
77#include <sys/vnode.h>
78#include <sys/racct.h>
79#include <sys/resourcevar.h>
80#include <sys/rwlock.h>
81#include <sys/file.h>
82#include <sys/sysctl.h>
83#include <sys/sysent.h>
84#include <sys/shm.h>
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_page.h>
91#include <vm/vm_object.h>
92#include <vm/vm_pager.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_extern.h>
95#include <vm/vnode_pager.h>
96#include <vm/swap_pager.h>
97#include <vm/uma.h>
98
99/*
100 *	Virtual memory maps provide for the mapping, protection,
101 *	and sharing of virtual memory objects.  In addition,
102 *	this module provides for an efficient virtual copy of
103 *	memory from one map to another.
104 *
105 *	Synchronization is required prior to most operations.
106 *
107 *	Maps consist of an ordered doubly-linked list of simple
108 *	entries; a self-adjusting binary search tree of these
109 *	entries is used to speed up lookups.
110 *
111 *	Since portions of maps are specified by start/end addresses,
112 *	which may not align with existing map entries, all
113 *	routines merely "clip" entries to these start/end values.
114 *	[That is, an entry is split into two, bordering at a
115 *	start or end value.]  Note that these clippings may not
116 *	always be necessary (as the two resulting entries are then
117 *	not changed); however, the clipping is done for convenience.
118 *
119 *	As mentioned above, virtual copy operations are performed
120 *	by copying VM object references from one map to
121 *	another, and then marking both regions as copy-on-write.
122 */
123
124static struct mtx map_sleep_mtx;
125static uma_zone_t mapentzone;
126static uma_zone_t kmapentzone;
127static uma_zone_t mapzone;
128static uma_zone_t vmspace_zone;
129static int vmspace_zinit(void *mem, int size, int flags);
130static int vm_map_zinit(void *mem, int ize, int flags);
131static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132    vm_offset_t max);
133static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
136#ifdef INVARIANTS
137static void vm_map_zdtor(void *mem, int size, void *arg);
138static void vmspace_zdtor(void *mem, int size, void *arg);
139#endif
140static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
141    vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
142    int cow);
143static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
144    vm_offset_t failed_addr);
145
146#define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
147    ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
148     !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
149
150/*
151 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
152 * stable.
153 */
154#define PROC_VMSPACE_LOCK(p) do { } while (0)
155#define PROC_VMSPACE_UNLOCK(p) do { } while (0)
156
157/*
158 *	VM_MAP_RANGE_CHECK:	[ internal use only ]
159 *
160 *	Asserts that the starting and ending region
161 *	addresses fall within the valid range of the map.
162 */
163#define	VM_MAP_RANGE_CHECK(map, start, end)		\
164		{					\
165		if (start < vm_map_min(map))		\
166			start = vm_map_min(map);	\
167		if (end > vm_map_max(map))		\
168			end = vm_map_max(map);		\
169		if (start > end)			\
170			start = end;			\
171		}
172
173/*
174 *	vm_map_startup:
175 *
176 *	Initialize the vm_map module.  Must be called before
177 *	any other vm_map routines.
178 *
179 *	Map and entry structures are allocated from the general
180 *	purpose memory pool with some exceptions:
181 *
182 *	- The kernel map and kmem submap are allocated statically.
183 *	- Kernel map entries are allocated out of a static pool.
184 *
185 *	These restrictions are necessary since malloc() uses the
186 *	maps and requires map entries.
187 */
188
189void
190vm_map_startup(void)
191{
192	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
193	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
194#ifdef INVARIANTS
195	    vm_map_zdtor,
196#else
197	    NULL,
198#endif
199	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
200	uma_prealloc(mapzone, MAX_KMAP);
201	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
202	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
203	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
204	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
205	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
206	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
207#ifdef INVARIANTS
208	    vmspace_zdtor,
209#else
210	    NULL,
211#endif
212	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
213}
214
215static int
216vmspace_zinit(void *mem, int size, int flags)
217{
218	struct vmspace *vm;
219
220	vm = (struct vmspace *)mem;
221
222	vm->vm_map.pmap = NULL;
223	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
224	PMAP_LOCK_INIT(vmspace_pmap(vm));
225	return (0);
226}
227
228static int
229vm_map_zinit(void *mem, int size, int flags)
230{
231	vm_map_t map;
232
233	map = (vm_map_t)mem;
234	memset(map, 0, sizeof(*map));
235	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
236	sx_init(&map->lock, "vm map (user)");
237	return (0);
238}
239
240#ifdef INVARIANTS
241static void
242vmspace_zdtor(void *mem, int size, void *arg)
243{
244	struct vmspace *vm;
245
246	vm = (struct vmspace *)mem;
247
248	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
249}
250static void
251vm_map_zdtor(void *mem, int size, void *arg)
252{
253	vm_map_t map;
254
255	map = (vm_map_t)mem;
256	KASSERT(map->nentries == 0,
257	    ("map %p nentries == %d on free.",
258	    map, map->nentries));
259	KASSERT(map->size == 0,
260	    ("map %p size == %lu on free.",
261	    map, (unsigned long)map->size));
262}
263#endif	/* INVARIANTS */
264
265/*
266 * Allocate a vmspace structure, including a vm_map and pmap,
267 * and initialize those structures.  The refcnt is set to 1.
268 *
269 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
270 */
271struct vmspace *
272vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
273{
274	struct vmspace *vm;
275
276	vm = uma_zalloc(vmspace_zone, M_WAITOK);
277
278	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
279
280	if (pinit == NULL)
281		pinit = &pmap_pinit;
282
283	if (!pinit(vmspace_pmap(vm))) {
284		uma_zfree(vmspace_zone, vm);
285		return (NULL);
286	}
287	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
288	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
289	vm->vm_refcnt = 1;
290	vm->vm_shm = NULL;
291	vm->vm_swrss = 0;
292	vm->vm_tsize = 0;
293	vm->vm_dsize = 0;
294	vm->vm_ssize = 0;
295	vm->vm_taddr = 0;
296	vm->vm_daddr = 0;
297	vm->vm_maxsaddr = 0;
298	return (vm);
299}
300
301#ifdef RACCT
302static void
303vmspace_container_reset(struct proc *p)
304{
305
306	PROC_LOCK(p);
307	racct_set(p, RACCT_DATA, 0);
308	racct_set(p, RACCT_STACK, 0);
309	racct_set(p, RACCT_RSS, 0);
310	racct_set(p, RACCT_MEMLOCK, 0);
311	racct_set(p, RACCT_VMEM, 0);
312	PROC_UNLOCK(p);
313}
314#endif
315
316static inline void
317vmspace_dofree(struct vmspace *vm)
318{
319
320	CTR1(KTR_VM, "vmspace_free: %p", vm);
321
322	/*
323	 * Make sure any SysV shm is freed, it might not have been in
324	 * exit1().
325	 */
326	shmexit(vm);
327
328	/*
329	 * Lock the map, to wait out all other references to it.
330	 * Delete all of the mappings and pages they hold, then call
331	 * the pmap module to reclaim anything left.
332	 */
333	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
334	    vm->vm_map.max_offset);
335
336	pmap_release(vmspace_pmap(vm));
337	vm->vm_map.pmap = NULL;
338	uma_zfree(vmspace_zone, vm);
339}
340
341void
342vmspace_free(struct vmspace *vm)
343{
344
345	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
346	    "vmspace_free() called with non-sleepable lock held");
347
348	if (vm->vm_refcnt == 0)
349		panic("vmspace_free: attempt to free already freed vmspace");
350
351	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
352		vmspace_dofree(vm);
353}
354
355void
356vmspace_exitfree(struct proc *p)
357{
358	struct vmspace *vm;
359
360	PROC_VMSPACE_LOCK(p);
361	vm = p->p_vmspace;
362	p->p_vmspace = NULL;
363	PROC_VMSPACE_UNLOCK(p);
364	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
365	vmspace_free(vm);
366}
367
368void
369vmspace_exit(struct thread *td)
370{
371	int refcnt;
372	struct vmspace *vm;
373	struct proc *p;
374
375	/*
376	 * Release user portion of address space.
377	 * This releases references to vnodes,
378	 * which could cause I/O if the file has been unlinked.
379	 * Need to do this early enough that we can still sleep.
380	 *
381	 * The last exiting process to reach this point releases as
382	 * much of the environment as it can. vmspace_dofree() is the
383	 * slower fallback in case another process had a temporary
384	 * reference to the vmspace.
385	 */
386
387	p = td->td_proc;
388	vm = p->p_vmspace;
389	atomic_add_int(&vmspace0.vm_refcnt, 1);
390	do {
391		refcnt = vm->vm_refcnt;
392		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
393			/* Switch now since other proc might free vmspace */
394			PROC_VMSPACE_LOCK(p);
395			p->p_vmspace = &vmspace0;
396			PROC_VMSPACE_UNLOCK(p);
397			pmap_activate(td);
398		}
399	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
400	if (refcnt == 1) {
401		if (p->p_vmspace != vm) {
402			/* vmspace not yet freed, switch back */
403			PROC_VMSPACE_LOCK(p);
404			p->p_vmspace = vm;
405			PROC_VMSPACE_UNLOCK(p);
406			pmap_activate(td);
407		}
408		pmap_remove_pages(vmspace_pmap(vm));
409		/* Switch now since this proc will free vmspace */
410		PROC_VMSPACE_LOCK(p);
411		p->p_vmspace = &vmspace0;
412		PROC_VMSPACE_UNLOCK(p);
413		pmap_activate(td);
414		vmspace_dofree(vm);
415	}
416#ifdef RACCT
417	if (racct_enable)
418		vmspace_container_reset(p);
419#endif
420}
421
422/* Acquire reference to vmspace owned by another process. */
423
424struct vmspace *
425vmspace_acquire_ref(struct proc *p)
426{
427	struct vmspace *vm;
428	int refcnt;
429
430	PROC_VMSPACE_LOCK(p);
431	vm = p->p_vmspace;
432	if (vm == NULL) {
433		PROC_VMSPACE_UNLOCK(p);
434		return (NULL);
435	}
436	do {
437		refcnt = vm->vm_refcnt;
438		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
439			PROC_VMSPACE_UNLOCK(p);
440			return (NULL);
441		}
442	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
443	if (vm != p->p_vmspace) {
444		PROC_VMSPACE_UNLOCK(p);
445		vmspace_free(vm);
446		return (NULL);
447	}
448	PROC_VMSPACE_UNLOCK(p);
449	return (vm);
450}
451
452void
453_vm_map_lock(vm_map_t map, const char *file, int line)
454{
455
456	if (map->system_map)
457		mtx_lock_flags_(&map->system_mtx, 0, file, line);
458	else
459		sx_xlock_(&map->lock, file, line);
460	map->timestamp++;
461}
462
463static void
464vm_map_process_deferred(void)
465{
466	struct thread *td;
467	vm_map_entry_t entry, next;
468	vm_object_t object;
469
470	td = curthread;
471	entry = td->td_map_def_user;
472	td->td_map_def_user = NULL;
473	while (entry != NULL) {
474		next = entry->next;
475		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
476			/*
477			 * Decrement the object's writemappings and
478			 * possibly the vnode's v_writecount.
479			 */
480			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
481			    ("Submap with writecount"));
482			object = entry->object.vm_object;
483			KASSERT(object != NULL, ("No object for writecount"));
484			vnode_pager_release_writecount(object, entry->start,
485			    entry->end);
486		}
487		vm_map_entry_deallocate(entry, FALSE);
488		entry = next;
489	}
490}
491
492void
493_vm_map_unlock(vm_map_t map, const char *file, int line)
494{
495
496	if (map->system_map)
497		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
498	else {
499		sx_xunlock_(&map->lock, file, line);
500		vm_map_process_deferred();
501	}
502}
503
504void
505_vm_map_lock_read(vm_map_t map, const char *file, int line)
506{
507
508	if (map->system_map)
509		mtx_lock_flags_(&map->system_mtx, 0, file, line);
510	else
511		sx_slock_(&map->lock, file, line);
512}
513
514void
515_vm_map_unlock_read(vm_map_t map, const char *file, int line)
516{
517
518	if (map->system_map)
519		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
520	else {
521		sx_sunlock_(&map->lock, file, line);
522		vm_map_process_deferred();
523	}
524}
525
526int
527_vm_map_trylock(vm_map_t map, const char *file, int line)
528{
529	int error;
530
531	error = map->system_map ?
532	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
533	    !sx_try_xlock_(&map->lock, file, line);
534	if (error == 0)
535		map->timestamp++;
536	return (error == 0);
537}
538
539int
540_vm_map_trylock_read(vm_map_t map, const char *file, int line)
541{
542	int error;
543
544	error = map->system_map ?
545	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
546	    !sx_try_slock_(&map->lock, file, line);
547	return (error == 0);
548}
549
550/*
551 *	_vm_map_lock_upgrade:	[ internal use only ]
552 *
553 *	Tries to upgrade a read (shared) lock on the specified map to a write
554 *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
555 *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
556 *	returned without a read or write lock held.
557 *
558 *	Requires that the map be read locked.
559 */
560int
561_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
562{
563	unsigned int last_timestamp;
564
565	if (map->system_map) {
566		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
567	} else {
568		if (!sx_try_upgrade_(&map->lock, file, line)) {
569			last_timestamp = map->timestamp;
570			sx_sunlock_(&map->lock, file, line);
571			vm_map_process_deferred();
572			/*
573			 * If the map's timestamp does not change while the
574			 * map is unlocked, then the upgrade succeeds.
575			 */
576			sx_xlock_(&map->lock, file, line);
577			if (last_timestamp != map->timestamp) {
578				sx_xunlock_(&map->lock, file, line);
579				return (1);
580			}
581		}
582	}
583	map->timestamp++;
584	return (0);
585}
586
587void
588_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
589{
590
591	if (map->system_map) {
592		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
593	} else
594		sx_downgrade_(&map->lock, file, line);
595}
596
597/*
598 *	vm_map_locked:
599 *
600 *	Returns a non-zero value if the caller holds a write (exclusive) lock
601 *	on the specified map and the value "0" otherwise.
602 */
603int
604vm_map_locked(vm_map_t map)
605{
606
607	if (map->system_map)
608		return (mtx_owned(&map->system_mtx));
609	else
610		return (sx_xlocked(&map->lock));
611}
612
613#ifdef INVARIANTS
614static void
615_vm_map_assert_locked(vm_map_t map, const char *file, int line)
616{
617
618	if (map->system_map)
619		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
620	else
621		sx_assert_(&map->lock, SA_XLOCKED, file, line);
622}
623
624#define	VM_MAP_ASSERT_LOCKED(map) \
625    _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
626#else
627#define	VM_MAP_ASSERT_LOCKED(map)
628#endif
629
630/*
631 *	_vm_map_unlock_and_wait:
632 *
633 *	Atomically releases the lock on the specified map and puts the calling
634 *	thread to sleep.  The calling thread will remain asleep until either
635 *	vm_map_wakeup() is performed on the map or the specified timeout is
636 *	exceeded.
637 *
638 *	WARNING!  This function does not perform deferred deallocations of
639 *	objects and map	entries.  Therefore, the calling thread is expected to
640 *	reacquire the map lock after reawakening and later perform an ordinary
641 *	unlock operation, such as vm_map_unlock(), before completing its
642 *	operation on the map.
643 */
644int
645_vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
646{
647
648	mtx_lock(&map_sleep_mtx);
649	if (map->system_map)
650		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
651	else
652		sx_xunlock_(&map->lock, file, line);
653	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
654	    timo));
655}
656
657/*
658 *	vm_map_wakeup:
659 *
660 *	Awaken any threads that have slept on the map using
661 *	vm_map_unlock_and_wait().
662 */
663void
664vm_map_wakeup(vm_map_t map)
665{
666
667	/*
668	 * Acquire and release map_sleep_mtx to prevent a wakeup()
669	 * from being performed (and lost) between the map unlock
670	 * and the msleep() in _vm_map_unlock_and_wait().
671	 */
672	mtx_lock(&map_sleep_mtx);
673	mtx_unlock(&map_sleep_mtx);
674	wakeup(&map->root);
675}
676
677void
678vm_map_busy(vm_map_t map)
679{
680
681	VM_MAP_ASSERT_LOCKED(map);
682	map->busy++;
683}
684
685void
686vm_map_unbusy(vm_map_t map)
687{
688
689	VM_MAP_ASSERT_LOCKED(map);
690	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
691	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
692		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
693		wakeup(&map->busy);
694	}
695}
696
697void
698vm_map_wait_busy(vm_map_t map)
699{
700
701	VM_MAP_ASSERT_LOCKED(map);
702	while (map->busy) {
703		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
704		if (map->system_map)
705			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
706		else
707			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
708	}
709	map->timestamp++;
710}
711
712long
713vmspace_resident_count(struct vmspace *vmspace)
714{
715	return pmap_resident_count(vmspace_pmap(vmspace));
716}
717
718/*
719 *	vm_map_create:
720 *
721 *	Creates and returns a new empty VM map with
722 *	the given physical map structure, and having
723 *	the given lower and upper address bounds.
724 */
725vm_map_t
726vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
727{
728	vm_map_t result;
729
730	result = uma_zalloc(mapzone, M_WAITOK);
731	CTR1(KTR_VM, "vm_map_create: %p", result);
732	_vm_map_init(result, pmap, min, max);
733	return (result);
734}
735
736/*
737 * Initialize an existing vm_map structure
738 * such as that in the vmspace structure.
739 */
740static void
741_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
742{
743
744	map->header.next = map->header.prev = &map->header;
745	map->needs_wakeup = FALSE;
746	map->system_map = 0;
747	map->pmap = pmap;
748	map->min_offset = min;
749	map->max_offset = max;
750	map->flags = 0;
751	map->root = NULL;
752	map->timestamp = 0;
753	map->busy = 0;
754}
755
756void
757vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
758{
759
760	_vm_map_init(map, pmap, min, max);
761	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
762	sx_init(&map->lock, "user map");
763}
764
765/*
766 *	vm_map_entry_dispose:	[ internal use only ]
767 *
768 *	Inverse of vm_map_entry_create.
769 */
770static void
771vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
772{
773	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
774}
775
776/*
777 *	vm_map_entry_create:	[ internal use only ]
778 *
779 *	Allocates a VM map entry for insertion.
780 *	No entry fields are filled in.
781 */
782static vm_map_entry_t
783vm_map_entry_create(vm_map_t map)
784{
785	vm_map_entry_t new_entry;
786
787	if (map->system_map)
788		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
789	else
790		new_entry = uma_zalloc(mapentzone, M_WAITOK);
791	if (new_entry == NULL)
792		panic("vm_map_entry_create: kernel resources exhausted");
793	return (new_entry);
794}
795
796/*
797 *	vm_map_entry_set_behavior:
798 *
799 *	Set the expected access behavior, either normal, random, or
800 *	sequential.
801 */
802static inline void
803vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
804{
805	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
806	    (behavior & MAP_ENTRY_BEHAV_MASK);
807}
808
809/*
810 *	vm_map_entry_set_max_free:
811 *
812 *	Set the max_free field in a vm_map_entry.
813 */
814static inline void
815vm_map_entry_set_max_free(vm_map_entry_t entry)
816{
817
818	entry->max_free = entry->adj_free;
819	if (entry->left != NULL && entry->left->max_free > entry->max_free)
820		entry->max_free = entry->left->max_free;
821	if (entry->right != NULL && entry->right->max_free > entry->max_free)
822		entry->max_free = entry->right->max_free;
823}
824
825/*
826 *	vm_map_entry_splay:
827 *
828 *	The Sleator and Tarjan top-down splay algorithm with the
829 *	following variation.  Max_free must be computed bottom-up, so
830 *	on the downward pass, maintain the left and right spines in
831 *	reverse order.  Then, make a second pass up each side to fix
832 *	the pointers and compute max_free.  The time bound is O(log n)
833 *	amortized.
834 *
835 *	The new root is the vm_map_entry containing "addr", or else an
836 *	adjacent entry (lower or higher) if addr is not in the tree.
837 *
838 *	The map must be locked, and leaves it so.
839 *
840 *	Returns: the new root.
841 */
842static vm_map_entry_t
843vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
844{
845	vm_map_entry_t llist, rlist;
846	vm_map_entry_t ltree, rtree;
847	vm_map_entry_t y;
848
849	/* Special case of empty tree. */
850	if (root == NULL)
851		return (root);
852
853	/*
854	 * Pass One: Splay down the tree until we find addr or a NULL
855	 * pointer where addr would go.  llist and rlist are the two
856	 * sides in reverse order (bottom-up), with llist linked by
857	 * the right pointer and rlist linked by the left pointer in
858	 * the vm_map_entry.  Wait until Pass Two to set max_free on
859	 * the two spines.
860	 */
861	llist = NULL;
862	rlist = NULL;
863	for (;;) {
864		/* root is never NULL in here. */
865		if (addr < root->start) {
866			y = root->left;
867			if (y == NULL)
868				break;
869			if (addr < y->start && y->left != NULL) {
870				/* Rotate right and put y on rlist. */
871				root->left = y->right;
872				y->right = root;
873				vm_map_entry_set_max_free(root);
874				root = y->left;
875				y->left = rlist;
876				rlist = y;
877			} else {
878				/* Put root on rlist. */
879				root->left = rlist;
880				rlist = root;
881				root = y;
882			}
883		} else if (addr >= root->end) {
884			y = root->right;
885			if (y == NULL)
886				break;
887			if (addr >= y->end && y->right != NULL) {
888				/* Rotate left and put y on llist. */
889				root->right = y->left;
890				y->left = root;
891				vm_map_entry_set_max_free(root);
892				root = y->right;
893				y->right = llist;
894				llist = y;
895			} else {
896				/* Put root on llist. */
897				root->right = llist;
898				llist = root;
899				root = y;
900			}
901		} else
902			break;
903	}
904
905	/*
906	 * Pass Two: Walk back up the two spines, flip the pointers
907	 * and set max_free.  The subtrees of the root go at the
908	 * bottom of llist and rlist.
909	 */
910	ltree = root->left;
911	while (llist != NULL) {
912		y = llist->right;
913		llist->right = ltree;
914		vm_map_entry_set_max_free(llist);
915		ltree = llist;
916		llist = y;
917	}
918	rtree = root->right;
919	while (rlist != NULL) {
920		y = rlist->left;
921		rlist->left = rtree;
922		vm_map_entry_set_max_free(rlist);
923		rtree = rlist;
924		rlist = y;
925	}
926
927	/*
928	 * Final assembly: add ltree and rtree as subtrees of root.
929	 */
930	root->left = ltree;
931	root->right = rtree;
932	vm_map_entry_set_max_free(root);
933
934	return (root);
935}
936
937/*
938 *	vm_map_entry_{un,}link:
939 *
940 *	Insert/remove entries from maps.
941 */
942static void
943vm_map_entry_link(vm_map_t map,
944		  vm_map_entry_t after_where,
945		  vm_map_entry_t entry)
946{
947
948	CTR4(KTR_VM,
949	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
950	    map->nentries, entry, after_where);
951	VM_MAP_ASSERT_LOCKED(map);
952	KASSERT(after_where == &map->header ||
953	    after_where->end <= entry->start,
954	    ("vm_map_entry_link: prev end %jx new start %jx overlap",
955	    (uintmax_t)after_where->end, (uintmax_t)entry->start));
956	KASSERT(after_where->next == &map->header ||
957	    entry->end <= after_where->next->start,
958	    ("vm_map_entry_link: new end %jx next start %jx overlap",
959	    (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
960
961	map->nentries++;
962	entry->prev = after_where;
963	entry->next = after_where->next;
964	entry->next->prev = entry;
965	after_where->next = entry;
966
967	if (after_where != &map->header) {
968		if (after_where != map->root)
969			vm_map_entry_splay(after_where->start, map->root);
970		entry->right = after_where->right;
971		entry->left = after_where;
972		after_where->right = NULL;
973		after_where->adj_free = entry->start - after_where->end;
974		vm_map_entry_set_max_free(after_where);
975	} else {
976		entry->right = map->root;
977		entry->left = NULL;
978	}
979	entry->adj_free = (entry->next == &map->header ? map->max_offset :
980	    entry->next->start) - entry->end;
981	vm_map_entry_set_max_free(entry);
982	map->root = entry;
983}
984
985static void
986vm_map_entry_unlink(vm_map_t map,
987		    vm_map_entry_t entry)
988{
989	vm_map_entry_t next, prev, root;
990
991	VM_MAP_ASSERT_LOCKED(map);
992	if (entry != map->root)
993		vm_map_entry_splay(entry->start, map->root);
994	if (entry->left == NULL)
995		root = entry->right;
996	else {
997		root = vm_map_entry_splay(entry->start, entry->left);
998		root->right = entry->right;
999		root->adj_free = (entry->next == &map->header ? map->max_offset :
1000		    entry->next->start) - root->end;
1001		vm_map_entry_set_max_free(root);
1002	}
1003	map->root = root;
1004
1005	prev = entry->prev;
1006	next = entry->next;
1007	next->prev = prev;
1008	prev->next = next;
1009	map->nentries--;
1010	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1011	    map->nentries, entry);
1012}
1013
1014/*
1015 *	vm_map_entry_resize_free:
1016 *
1017 *	Recompute the amount of free space following a vm_map_entry
1018 *	and propagate that value up the tree.  Call this function after
1019 *	resizing a map entry in-place, that is, without a call to
1020 *	vm_map_entry_link() or _unlink().
1021 *
1022 *	The map must be locked, and leaves it so.
1023 */
1024static void
1025vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1026{
1027
1028	/*
1029	 * Using splay trees without parent pointers, propagating
1030	 * max_free up the tree is done by moving the entry to the
1031	 * root and making the change there.
1032	 */
1033	if (entry != map->root)
1034		map->root = vm_map_entry_splay(entry->start, map->root);
1035
1036	entry->adj_free = (entry->next == &map->header ? map->max_offset :
1037	    entry->next->start) - entry->end;
1038	vm_map_entry_set_max_free(entry);
1039}
1040
1041/*
1042 *	vm_map_lookup_entry:	[ internal use only ]
1043 *
1044 *	Finds the map entry containing (or
1045 *	immediately preceding) the specified address
1046 *	in the given map; the entry is returned
1047 *	in the "entry" parameter.  The boolean
1048 *	result indicates whether the address is
1049 *	actually contained in the map.
1050 */
1051boolean_t
1052vm_map_lookup_entry(
1053	vm_map_t map,
1054	vm_offset_t address,
1055	vm_map_entry_t *entry)	/* OUT */
1056{
1057	vm_map_entry_t cur;
1058	boolean_t locked;
1059
1060	/*
1061	 * If the map is empty, then the map entry immediately preceding
1062	 * "address" is the map's header.
1063	 */
1064	cur = map->root;
1065	if (cur == NULL)
1066		*entry = &map->header;
1067	else if (address >= cur->start && cur->end > address) {
1068		*entry = cur;
1069		return (TRUE);
1070	} else if ((locked = vm_map_locked(map)) ||
1071	    sx_try_upgrade(&map->lock)) {
1072		/*
1073		 * Splay requires a write lock on the map.  However, it only
1074		 * restructures the binary search tree; it does not otherwise
1075		 * change the map.  Thus, the map's timestamp need not change
1076		 * on a temporary upgrade.
1077		 */
1078		map->root = cur = vm_map_entry_splay(address, cur);
1079		if (!locked)
1080			sx_downgrade(&map->lock);
1081
1082		/*
1083		 * If "address" is contained within a map entry, the new root
1084		 * is that map entry.  Otherwise, the new root is a map entry
1085		 * immediately before or after "address".
1086		 */
1087		if (address >= cur->start) {
1088			*entry = cur;
1089			if (cur->end > address)
1090				return (TRUE);
1091		} else
1092			*entry = cur->prev;
1093	} else
1094		/*
1095		 * Since the map is only locked for read access, perform a
1096		 * standard binary search tree lookup for "address".
1097		 */
1098		for (;;) {
1099			if (address < cur->start) {
1100				if (cur->left == NULL) {
1101					*entry = cur->prev;
1102					break;
1103				}
1104				cur = cur->left;
1105			} else if (cur->end > address) {
1106				*entry = cur;
1107				return (TRUE);
1108			} else {
1109				if (cur->right == NULL) {
1110					*entry = cur;
1111					break;
1112				}
1113				cur = cur->right;
1114			}
1115		}
1116	return (FALSE);
1117}
1118
1119/*
1120 *	vm_map_insert:
1121 *
1122 *	Inserts the given whole VM object into the target
1123 *	map at the specified address range.  The object's
1124 *	size should match that of the address range.
1125 *
1126 *	Requires that the map be locked, and leaves it so.
1127 *
1128 *	If object is non-NULL, ref count must be bumped by caller
1129 *	prior to making call to account for the new entry.
1130 */
1131int
1132vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1133	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1134	      int cow)
1135{
1136	vm_map_entry_t new_entry;
1137	vm_map_entry_t prev_entry;
1138	vm_map_entry_t temp_entry;
1139	vm_eflags_t protoeflags;
1140	struct ucred *cred;
1141	vm_inherit_t inheritance;
1142	boolean_t charge_prev_obj;
1143
1144	VM_MAP_ASSERT_LOCKED(map);
1145
1146	/*
1147	 * Check that the start and end points are not bogus.
1148	 */
1149	if ((start < map->min_offset) || (end > map->max_offset) ||
1150	    (start >= end))
1151		return (KERN_INVALID_ADDRESS);
1152
1153	/*
1154	 * Find the entry prior to the proposed starting address; if it's part
1155	 * of an existing entry, this range is bogus.
1156	 */
1157	if (vm_map_lookup_entry(map, start, &temp_entry))
1158		return (KERN_NO_SPACE);
1159
1160	prev_entry = temp_entry;
1161
1162	/*
1163	 * Assert that the next entry doesn't overlap the end point.
1164	 */
1165	if ((prev_entry->next != &map->header) &&
1166	    (prev_entry->next->start < end))
1167		return (KERN_NO_SPACE);
1168
1169	protoeflags = 0;
1170	charge_prev_obj = FALSE;
1171
1172	if (cow & MAP_COPY_ON_WRITE)
1173		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1174
1175	if (cow & MAP_NOFAULT) {
1176		protoeflags |= MAP_ENTRY_NOFAULT;
1177
1178		KASSERT(object == NULL,
1179			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1180	}
1181	if (cow & MAP_DISABLE_SYNCER)
1182		protoeflags |= MAP_ENTRY_NOSYNC;
1183	if (cow & MAP_DISABLE_COREDUMP)
1184		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1185	if (cow & MAP_VN_WRITECOUNT)
1186		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1187	if (cow & MAP_INHERIT_SHARE)
1188		inheritance = VM_INHERIT_SHARE;
1189	else
1190		inheritance = VM_INHERIT_DEFAULT;
1191
1192	cred = NULL;
1193	KASSERT((object != kmem_object && object != kernel_object) ||
1194	    ((object == kmem_object || object == kernel_object) &&
1195		!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1196	    ("kmem or kernel object and cow"));
1197	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1198		goto charged;
1199	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1200	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1201		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1202			return (KERN_RESOURCE_SHORTAGE);
1203		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1204		    object->cred == NULL,
1205		    ("OVERCOMMIT: vm_map_insert o %p", object));
1206		cred = curthread->td_ucred;
1207		crhold(cred);
1208		if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1209			charge_prev_obj = TRUE;
1210	}
1211
1212charged:
1213	/* Expand the kernel pmap, if necessary. */
1214	if (map == kernel_map && end > kernel_vm_end)
1215		pmap_growkernel(end);
1216	if (object != NULL) {
1217		/*
1218		 * OBJ_ONEMAPPING must be cleared unless this mapping
1219		 * is trivially proven to be the only mapping for any
1220		 * of the object's pages.  (Object granularity
1221		 * reference counting is insufficient to recognize
1222		 * aliases with precision.)
1223		 */
1224		VM_OBJECT_WLOCK(object);
1225		if (object->ref_count > 1 || object->shadow_count != 0)
1226			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1227		VM_OBJECT_WUNLOCK(object);
1228	}
1229	else if ((prev_entry != &map->header) &&
1230		 (prev_entry->eflags == protoeflags) &&
1231		 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1232		 (prev_entry->end == start) &&
1233		 (prev_entry->wired_count == 0) &&
1234		 (prev_entry->cred == cred ||
1235		  (prev_entry->object.vm_object != NULL &&
1236		   (prev_entry->object.vm_object->cred == cred))) &&
1237		   vm_object_coalesce(prev_entry->object.vm_object,
1238		       prev_entry->offset,
1239		       (vm_size_t)(prev_entry->end - prev_entry->start),
1240		       (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1241		/*
1242		 * We were able to extend the object.  Determine if we
1243		 * can extend the previous map entry to include the
1244		 * new range as well.
1245		 */
1246		if ((prev_entry->inheritance == inheritance) &&
1247		    (prev_entry->protection == prot) &&
1248		    (prev_entry->max_protection == max)) {
1249			map->size += (end - prev_entry->end);
1250			prev_entry->end = end;
1251			vm_map_entry_resize_free(map, prev_entry);
1252			vm_map_simplify_entry(map, prev_entry);
1253			if (cred != NULL)
1254				crfree(cred);
1255			return (KERN_SUCCESS);
1256		}
1257
1258		/*
1259		 * If we can extend the object but cannot extend the
1260		 * map entry, we have to create a new map entry.  We
1261		 * must bump the ref count on the extended object to
1262		 * account for it.  object may be NULL.
1263		 */
1264		object = prev_entry->object.vm_object;
1265		offset = prev_entry->offset +
1266			(prev_entry->end - prev_entry->start);
1267		vm_object_reference(object);
1268		if (cred != NULL && object != NULL && object->cred != NULL &&
1269		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1270			/* Object already accounts for this uid. */
1271			crfree(cred);
1272			cred = NULL;
1273		}
1274	}
1275
1276	/*
1277	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1278	 * in things like the buffer map where we manage kva but do not manage
1279	 * backing objects.
1280	 */
1281
1282	/*
1283	 * Create a new entry
1284	 */
1285	new_entry = vm_map_entry_create(map);
1286	new_entry->start = start;
1287	new_entry->end = end;
1288	new_entry->cred = NULL;
1289
1290	new_entry->eflags = protoeflags;
1291	new_entry->object.vm_object = object;
1292	new_entry->offset = offset;
1293	new_entry->avail_ssize = 0;
1294
1295	new_entry->inheritance = inheritance;
1296	new_entry->protection = prot;
1297	new_entry->max_protection = max;
1298	new_entry->wired_count = 0;
1299	new_entry->wiring_thread = NULL;
1300	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1301	new_entry->next_read = OFF_TO_IDX(offset);
1302
1303	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1304	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1305	new_entry->cred = cred;
1306
1307	/*
1308	 * Insert the new entry into the list
1309	 */
1310	vm_map_entry_link(map, prev_entry, new_entry);
1311	map->size += new_entry->end - new_entry->start;
1312
1313	/*
1314	 * It may be possible to merge the new entry with the next and/or
1315	 * previous entries.  However, due to MAP_STACK_* being a hack, a
1316	 * panic can result from merging such entries.
1317	 */
1318	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1319		vm_map_simplify_entry(map, new_entry);
1320
1321	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1322		vm_map_pmap_enter(map, start, prot,
1323				    object, OFF_TO_IDX(offset), end - start,
1324				    cow & MAP_PREFAULT_PARTIAL);
1325	}
1326
1327	return (KERN_SUCCESS);
1328}
1329
1330/*
1331 *	vm_map_findspace:
1332 *
1333 *	Find the first fit (lowest VM address) for "length" free bytes
1334 *	beginning at address >= start in the given map.
1335 *
1336 *	In a vm_map_entry, "adj_free" is the amount of free space
1337 *	adjacent (higher address) to this entry, and "max_free" is the
1338 *	maximum amount of contiguous free space in its subtree.  This
1339 *	allows finding a free region in one path down the tree, so
1340 *	O(log n) amortized with splay trees.
1341 *
1342 *	The map must be locked, and leaves it so.
1343 *
1344 *	Returns: 0 on success, and starting address in *addr,
1345 *		 1 if insufficient space.
1346 */
1347int
1348vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1349    vm_offset_t *addr)	/* OUT */
1350{
1351	vm_map_entry_t entry;
1352	vm_offset_t st;
1353
1354	/*
1355	 * Request must fit within min/max VM address and must avoid
1356	 * address wrap.
1357	 */
1358	if (start < map->min_offset)
1359		start = map->min_offset;
1360	if (start + length > map->max_offset || start + length < start)
1361		return (1);
1362
1363	/* Empty tree means wide open address space. */
1364	if (map->root == NULL) {
1365		*addr = start;
1366		return (0);
1367	}
1368
1369	/*
1370	 * After splay, if start comes before root node, then there
1371	 * must be a gap from start to the root.
1372	 */
1373	map->root = vm_map_entry_splay(start, map->root);
1374	if (start + length <= map->root->start) {
1375		*addr = start;
1376		return (0);
1377	}
1378
1379	/*
1380	 * Root is the last node that might begin its gap before
1381	 * start, and this is the last comparison where address
1382	 * wrap might be a problem.
1383	 */
1384	st = (start > map->root->end) ? start : map->root->end;
1385	if (length <= map->root->end + map->root->adj_free - st) {
1386		*addr = st;
1387		return (0);
1388	}
1389
1390	/* With max_free, can immediately tell if no solution. */
1391	entry = map->root->right;
1392	if (entry == NULL || length > entry->max_free)
1393		return (1);
1394
1395	/*
1396	 * Search the right subtree in the order: left subtree, root,
1397	 * right subtree (first fit).  The previous splay implies that
1398	 * all regions in the right subtree have addresses > start.
1399	 */
1400	while (entry != NULL) {
1401		if (entry->left != NULL && entry->left->max_free >= length)
1402			entry = entry->left;
1403		else if (entry->adj_free >= length) {
1404			*addr = entry->end;
1405			return (0);
1406		} else
1407			entry = entry->right;
1408	}
1409
1410	/* Can't get here, so panic if we do. */
1411	panic("vm_map_findspace: max_free corrupt");
1412}
1413
1414int
1415vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1416    vm_offset_t start, vm_size_t length, vm_prot_t prot,
1417    vm_prot_t max, int cow)
1418{
1419	vm_offset_t end;
1420	int result;
1421
1422	end = start + length;
1423	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1424	    object == NULL,
1425	    ("vm_map_fixed: non-NULL backing object for stack"));
1426	vm_map_lock(map);
1427	VM_MAP_RANGE_CHECK(map, start, end);
1428	if ((cow & MAP_CHECK_EXCL) == 0)
1429		vm_map_delete(map, start, end);
1430	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1431		result = vm_map_stack_locked(map, start, length, sgrowsiz,
1432		    prot, max, cow);
1433	} else {
1434		result = vm_map_insert(map, object, offset, start, end,
1435		    prot, max, cow);
1436	}
1437	vm_map_unlock(map);
1438	return (result);
1439}
1440
1441/*
1442 *	vm_map_find finds an unallocated region in the target address
1443 *	map with the given length.  The search is defined to be
1444 *	first-fit from the specified address; the region found is
1445 *	returned in the same parameter.
1446 *
1447 *	If object is non-NULL, ref count must be bumped by caller
1448 *	prior to making call to account for the new entry.
1449 */
1450int
1451vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1452	    vm_offset_t *addr,	/* IN/OUT */
1453	    vm_size_t length, vm_offset_t max_addr, int find_space,
1454	    vm_prot_t prot, vm_prot_t max, int cow)
1455{
1456	vm_offset_t alignment, initial_addr, start;
1457	int result;
1458
1459	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1460	    object == NULL,
1461	    ("vm_map_find: non-NULL backing object for stack"));
1462	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1463	    (object->flags & OBJ_COLORED) == 0))
1464		find_space = VMFS_ANY_SPACE;
1465	if (find_space >> 8 != 0) {
1466		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1467		alignment = (vm_offset_t)1 << (find_space >> 8);
1468	} else
1469		alignment = 0;
1470	initial_addr = *addr;
1471again:
1472	start = initial_addr;
1473	vm_map_lock(map);
1474	do {
1475		if (find_space != VMFS_NO_SPACE) {
1476			if (vm_map_findspace(map, start, length, addr) ||
1477			    (max_addr != 0 && *addr + length > max_addr)) {
1478				vm_map_unlock(map);
1479				if (find_space == VMFS_OPTIMAL_SPACE) {
1480					find_space = VMFS_ANY_SPACE;
1481					goto again;
1482				}
1483				return (KERN_NO_SPACE);
1484			}
1485			switch (find_space) {
1486			case VMFS_SUPER_SPACE:
1487			case VMFS_OPTIMAL_SPACE:
1488				pmap_align_superpage(object, offset, addr,
1489				    length);
1490				break;
1491			case VMFS_ANY_SPACE:
1492				break;
1493			default:
1494				if ((*addr & (alignment - 1)) != 0) {
1495					*addr &= ~(alignment - 1);
1496					*addr += alignment;
1497				}
1498				break;
1499			}
1500
1501			start = *addr;
1502		}
1503		if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1504			result = vm_map_stack_locked(map, start, length,
1505			    sgrowsiz, prot, max, cow);
1506		} else {
1507			result = vm_map_insert(map, object, offset, start,
1508			    start + length, prot, max, cow);
1509		}
1510	} while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1511	    find_space != VMFS_ANY_SPACE);
1512	vm_map_unlock(map);
1513	return (result);
1514}
1515
1516/*
1517 *	vm_map_simplify_entry:
1518 *
1519 *	Simplify the given map entry by merging with either neighbor.  This
1520 *	routine also has the ability to merge with both neighbors.
1521 *
1522 *	The map must be locked.
1523 *
1524 *	This routine guarentees that the passed entry remains valid (though
1525 *	possibly extended).  When merging, this routine may delete one or
1526 *	both neighbors.
1527 */
1528void
1529vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1530{
1531	vm_map_entry_t next, prev;
1532	vm_size_t prevsize, esize;
1533
1534	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1535		return;
1536
1537	prev = entry->prev;
1538	if (prev != &map->header) {
1539		prevsize = prev->end - prev->start;
1540		if ( (prev->end == entry->start) &&
1541		     (prev->object.vm_object == entry->object.vm_object) &&
1542		     (!prev->object.vm_object ||
1543			(prev->offset + prevsize == entry->offset)) &&
1544		     (prev->eflags == entry->eflags) &&
1545		     (prev->protection == entry->protection) &&
1546		     (prev->max_protection == entry->max_protection) &&
1547		     (prev->inheritance == entry->inheritance) &&
1548		     (prev->wired_count == entry->wired_count) &&
1549		     (prev->cred == entry->cred)) {
1550			vm_map_entry_unlink(map, prev);
1551			entry->start = prev->start;
1552			entry->offset = prev->offset;
1553			if (entry->prev != &map->header)
1554				vm_map_entry_resize_free(map, entry->prev);
1555
1556			/*
1557			 * If the backing object is a vnode object,
1558			 * vm_object_deallocate() calls vrele().
1559			 * However, vrele() does not lock the vnode
1560			 * because the vnode has additional
1561			 * references.  Thus, the map lock can be kept
1562			 * without causing a lock-order reversal with
1563			 * the vnode lock.
1564			 *
1565			 * Since we count the number of virtual page
1566			 * mappings in object->un_pager.vnp.writemappings,
1567			 * the writemappings value should not be adjusted
1568			 * when the entry is disposed of.
1569			 */
1570			if (prev->object.vm_object)
1571				vm_object_deallocate(prev->object.vm_object);
1572			if (prev->cred != NULL)
1573				crfree(prev->cred);
1574			vm_map_entry_dispose(map, prev);
1575		}
1576	}
1577
1578	next = entry->next;
1579	if (next != &map->header) {
1580		esize = entry->end - entry->start;
1581		if ((entry->end == next->start) &&
1582		    (next->object.vm_object == entry->object.vm_object) &&
1583		     (!entry->object.vm_object ||
1584			(entry->offset + esize == next->offset)) &&
1585		    (next->eflags == entry->eflags) &&
1586		    (next->protection == entry->protection) &&
1587		    (next->max_protection == entry->max_protection) &&
1588		    (next->inheritance == entry->inheritance) &&
1589		    (next->wired_count == entry->wired_count) &&
1590		    (next->cred == entry->cred)) {
1591			vm_map_entry_unlink(map, next);
1592			entry->end = next->end;
1593			vm_map_entry_resize_free(map, entry);
1594
1595			/*
1596			 * See comment above.
1597			 */
1598			if (next->object.vm_object)
1599				vm_object_deallocate(next->object.vm_object);
1600			if (next->cred != NULL)
1601				crfree(next->cred);
1602			vm_map_entry_dispose(map, next);
1603		}
1604	}
1605}
1606/*
1607 *	vm_map_clip_start:	[ internal use only ]
1608 *
1609 *	Asserts that the given entry begins at or after
1610 *	the specified address; if necessary,
1611 *	it splits the entry into two.
1612 */
1613#define vm_map_clip_start(map, entry, startaddr) \
1614{ \
1615	if (startaddr > entry->start) \
1616		_vm_map_clip_start(map, entry, startaddr); \
1617}
1618
1619/*
1620 *	This routine is called only when it is known that
1621 *	the entry must be split.
1622 */
1623static void
1624_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1625{
1626	vm_map_entry_t new_entry;
1627
1628	VM_MAP_ASSERT_LOCKED(map);
1629
1630	/*
1631	 * Split off the front portion -- note that we must insert the new
1632	 * entry BEFORE this one, so that this entry has the specified
1633	 * starting address.
1634	 */
1635	vm_map_simplify_entry(map, entry);
1636
1637	/*
1638	 * If there is no object backing this entry, we might as well create
1639	 * one now.  If we defer it, an object can get created after the map
1640	 * is clipped, and individual objects will be created for the split-up
1641	 * map.  This is a bit of a hack, but is also about the best place to
1642	 * put this improvement.
1643	 */
1644	if (entry->object.vm_object == NULL && !map->system_map) {
1645		vm_object_t object;
1646		object = vm_object_allocate(OBJT_DEFAULT,
1647				atop(entry->end - entry->start));
1648		entry->object.vm_object = object;
1649		entry->offset = 0;
1650		if (entry->cred != NULL) {
1651			object->cred = entry->cred;
1652			object->charge = entry->end - entry->start;
1653			entry->cred = NULL;
1654		}
1655	} else if (entry->object.vm_object != NULL &&
1656		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1657		   entry->cred != NULL) {
1658		VM_OBJECT_WLOCK(entry->object.vm_object);
1659		KASSERT(entry->object.vm_object->cred == NULL,
1660		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1661		entry->object.vm_object->cred = entry->cred;
1662		entry->object.vm_object->charge = entry->end - entry->start;
1663		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1664		entry->cred = NULL;
1665	}
1666
1667	new_entry = vm_map_entry_create(map);
1668	*new_entry = *entry;
1669
1670	new_entry->end = start;
1671	entry->offset += (start - entry->start);
1672	entry->start = start;
1673	if (new_entry->cred != NULL)
1674		crhold(entry->cred);
1675
1676	vm_map_entry_link(map, entry->prev, new_entry);
1677
1678	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1679		vm_object_reference(new_entry->object.vm_object);
1680		/*
1681		 * The object->un_pager.vnp.writemappings for the
1682		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1683		 * kept as is here.  The virtual pages are
1684		 * re-distributed among the clipped entries, so the sum is
1685		 * left the same.
1686		 */
1687	}
1688}
1689
1690/*
1691 *	vm_map_clip_end:	[ internal use only ]
1692 *
1693 *	Asserts that the given entry ends at or before
1694 *	the specified address; if necessary,
1695 *	it splits the entry into two.
1696 */
1697#define vm_map_clip_end(map, entry, endaddr) \
1698{ \
1699	if ((endaddr) < (entry->end)) \
1700		_vm_map_clip_end((map), (entry), (endaddr)); \
1701}
1702
1703/*
1704 *	This routine is called only when it is known that
1705 *	the entry must be split.
1706 */
1707static void
1708_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1709{
1710	vm_map_entry_t new_entry;
1711
1712	VM_MAP_ASSERT_LOCKED(map);
1713
1714	/*
1715	 * If there is no object backing this entry, we might as well create
1716	 * one now.  If we defer it, an object can get created after the map
1717	 * is clipped, and individual objects will be created for the split-up
1718	 * map.  This is a bit of a hack, but is also about the best place to
1719	 * put this improvement.
1720	 */
1721	if (entry->object.vm_object == NULL && !map->system_map) {
1722		vm_object_t object;
1723		object = vm_object_allocate(OBJT_DEFAULT,
1724				atop(entry->end - entry->start));
1725		entry->object.vm_object = object;
1726		entry->offset = 0;
1727		if (entry->cred != NULL) {
1728			object->cred = entry->cred;
1729			object->charge = entry->end - entry->start;
1730			entry->cred = NULL;
1731		}
1732	} else if (entry->object.vm_object != NULL &&
1733		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1734		   entry->cred != NULL) {
1735		VM_OBJECT_WLOCK(entry->object.vm_object);
1736		KASSERT(entry->object.vm_object->cred == NULL,
1737		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1738		entry->object.vm_object->cred = entry->cred;
1739		entry->object.vm_object->charge = entry->end - entry->start;
1740		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1741		entry->cred = NULL;
1742	}
1743
1744	/*
1745	 * Create a new entry and insert it AFTER the specified entry
1746	 */
1747	new_entry = vm_map_entry_create(map);
1748	*new_entry = *entry;
1749
1750	new_entry->start = entry->end = end;
1751	new_entry->offset += (end - entry->start);
1752	if (new_entry->cred != NULL)
1753		crhold(entry->cred);
1754
1755	vm_map_entry_link(map, entry, new_entry);
1756
1757	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1758		vm_object_reference(new_entry->object.vm_object);
1759	}
1760}
1761
1762/*
1763 *	vm_map_submap:		[ kernel use only ]
1764 *
1765 *	Mark the given range as handled by a subordinate map.
1766 *
1767 *	This range must have been created with vm_map_find,
1768 *	and no other operations may have been performed on this
1769 *	range prior to calling vm_map_submap.
1770 *
1771 *	Only a limited number of operations can be performed
1772 *	within this rage after calling vm_map_submap:
1773 *		vm_fault
1774 *	[Don't try vm_map_copy!]
1775 *
1776 *	To remove a submapping, one must first remove the
1777 *	range from the superior map, and then destroy the
1778 *	submap (if desired).  [Better yet, don't try it.]
1779 */
1780int
1781vm_map_submap(
1782	vm_map_t map,
1783	vm_offset_t start,
1784	vm_offset_t end,
1785	vm_map_t submap)
1786{
1787	vm_map_entry_t entry;
1788	int result = KERN_INVALID_ARGUMENT;
1789
1790	vm_map_lock(map);
1791
1792	VM_MAP_RANGE_CHECK(map, start, end);
1793
1794	if (vm_map_lookup_entry(map, start, &entry)) {
1795		vm_map_clip_start(map, entry, start);
1796	} else
1797		entry = entry->next;
1798
1799	vm_map_clip_end(map, entry, end);
1800
1801	if ((entry->start == start) && (entry->end == end) &&
1802	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1803	    (entry->object.vm_object == NULL)) {
1804		entry->object.sub_map = submap;
1805		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1806		result = KERN_SUCCESS;
1807	}
1808	vm_map_unlock(map);
1809
1810	return (result);
1811}
1812
1813/*
1814 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1815 */
1816#define	MAX_INIT_PT	96
1817
1818/*
1819 *	vm_map_pmap_enter:
1820 *
1821 *	Preload the specified map's pmap with mappings to the specified
1822 *	object's memory-resident pages.  No further physical pages are
1823 *	allocated, and no further virtual pages are retrieved from secondary
1824 *	storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
1825 *	limited number of page mappings are created at the low-end of the
1826 *	specified address range.  (For this purpose, a superpage mapping
1827 *	counts as one page mapping.)  Otherwise, all resident pages within
1828 *	the specified address range are mapped.  Because these mappings are
1829 *	being created speculatively, cached pages are not reactivated and
1830 *	mapped.
1831 */
1832void
1833vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1834    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1835{
1836	vm_offset_t start;
1837	vm_page_t p, p_start;
1838	vm_pindex_t mask, psize, threshold, tmpidx;
1839
1840	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1841		return;
1842	VM_OBJECT_RLOCK(object);
1843	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1844		VM_OBJECT_RUNLOCK(object);
1845		VM_OBJECT_WLOCK(object);
1846		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1847			pmap_object_init_pt(map->pmap, addr, object, pindex,
1848			    size);
1849			VM_OBJECT_WUNLOCK(object);
1850			return;
1851		}
1852		VM_OBJECT_LOCK_DOWNGRADE(object);
1853	}
1854
1855	psize = atop(size);
1856	if (psize + pindex > object->size) {
1857		if (object->size < pindex) {
1858			VM_OBJECT_RUNLOCK(object);
1859			return;
1860		}
1861		psize = object->size - pindex;
1862	}
1863
1864	start = 0;
1865	p_start = NULL;
1866	threshold = MAX_INIT_PT;
1867
1868	p = vm_page_find_least(object, pindex);
1869	/*
1870	 * Assert: the variable p is either (1) the page with the
1871	 * least pindex greater than or equal to the parameter pindex
1872	 * or (2) NULL.
1873	 */
1874	for (;
1875	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1876	     p = TAILQ_NEXT(p, listq)) {
1877		/*
1878		 * don't allow an madvise to blow away our really
1879		 * free pages allocating pv entries.
1880		 */
1881		if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
1882		    cnt.v_free_count < cnt.v_free_reserved) ||
1883		    ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
1884		    tmpidx >= threshold)) {
1885			psize = tmpidx;
1886			break;
1887		}
1888		if (p->valid == VM_PAGE_BITS_ALL) {
1889			if (p_start == NULL) {
1890				start = addr + ptoa(tmpidx);
1891				p_start = p;
1892			}
1893			/* Jump ahead if a superpage mapping is possible. */
1894			if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
1895			    (pagesizes[p->psind] - 1)) == 0) {
1896				mask = atop(pagesizes[p->psind]) - 1;
1897				if (tmpidx + mask < psize &&
1898				    vm_page_ps_is_valid(p)) {
1899					p += mask;
1900					threshold += mask;
1901				}
1902			}
1903		} else if (p_start != NULL) {
1904			pmap_enter_object(map->pmap, start, addr +
1905			    ptoa(tmpidx), p_start, prot);
1906			p_start = NULL;
1907		}
1908	}
1909	if (p_start != NULL)
1910		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1911		    p_start, prot);
1912	VM_OBJECT_RUNLOCK(object);
1913}
1914
1915/*
1916 *	vm_map_protect:
1917 *
1918 *	Sets the protection of the specified address
1919 *	region in the target map.  If "set_max" is
1920 *	specified, the maximum protection is to be set;
1921 *	otherwise, only the current protection is affected.
1922 */
1923int
1924vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1925	       vm_prot_t new_prot, boolean_t set_max)
1926{
1927	vm_map_entry_t current, entry;
1928	vm_object_t obj;
1929	struct ucred *cred;
1930	vm_prot_t old_prot;
1931
1932	if (start == end)
1933		return (KERN_SUCCESS);
1934
1935	vm_map_lock(map);
1936
1937	VM_MAP_RANGE_CHECK(map, start, end);
1938
1939	if (vm_map_lookup_entry(map, start, &entry)) {
1940		vm_map_clip_start(map, entry, start);
1941	} else {
1942		entry = entry->next;
1943	}
1944
1945	/*
1946	 * Make a first pass to check for protection violations.
1947	 */
1948	current = entry;
1949	while ((current != &map->header) && (current->start < end)) {
1950		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1951			vm_map_unlock(map);
1952			return (KERN_INVALID_ARGUMENT);
1953		}
1954		if ((new_prot & current->max_protection) != new_prot) {
1955			vm_map_unlock(map);
1956			return (KERN_PROTECTION_FAILURE);
1957		}
1958		current = current->next;
1959	}
1960
1961
1962	/*
1963	 * Do an accounting pass for private read-only mappings that
1964	 * now will do cow due to allowed write (e.g. debugger sets
1965	 * breakpoint on text segment)
1966	 */
1967	for (current = entry; (current != &map->header) &&
1968	     (current->start < end); current = current->next) {
1969
1970		vm_map_clip_end(map, current, end);
1971
1972		if (set_max ||
1973		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1974		    ENTRY_CHARGED(current)) {
1975			continue;
1976		}
1977
1978		cred = curthread->td_ucred;
1979		obj = current->object.vm_object;
1980
1981		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1982			if (!swap_reserve(current->end - current->start)) {
1983				vm_map_unlock(map);
1984				return (KERN_RESOURCE_SHORTAGE);
1985			}
1986			crhold(cred);
1987			current->cred = cred;
1988			continue;
1989		}
1990
1991		VM_OBJECT_WLOCK(obj);
1992		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1993			VM_OBJECT_WUNLOCK(obj);
1994			continue;
1995		}
1996
1997		/*
1998		 * Charge for the whole object allocation now, since
1999		 * we cannot distinguish between non-charged and
2000		 * charged clipped mapping of the same object later.
2001		 */
2002		KASSERT(obj->charge == 0,
2003		    ("vm_map_protect: object %p overcharged (entry %p)",
2004		    obj, current));
2005		if (!swap_reserve(ptoa(obj->size))) {
2006			VM_OBJECT_WUNLOCK(obj);
2007			vm_map_unlock(map);
2008			return (KERN_RESOURCE_SHORTAGE);
2009		}
2010
2011		crhold(cred);
2012		obj->cred = cred;
2013		obj->charge = ptoa(obj->size);
2014		VM_OBJECT_WUNLOCK(obj);
2015	}
2016
2017	/*
2018	 * Go back and fix up protections. [Note that clipping is not
2019	 * necessary the second time.]
2020	 */
2021	current = entry;
2022	while ((current != &map->header) && (current->start < end)) {
2023		old_prot = current->protection;
2024
2025		if (set_max)
2026			current->protection =
2027			    (current->max_protection = new_prot) &
2028			    old_prot;
2029		else
2030			current->protection = new_prot;
2031
2032		/*
2033		 * For user wired map entries, the normal lazy evaluation of
2034		 * write access upgrades through soft page faults is
2035		 * undesirable.  Instead, immediately copy any pages that are
2036		 * copy-on-write and enable write access in the physical map.
2037		 */
2038		if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2039		    (current->protection & VM_PROT_WRITE) != 0 &&
2040		    (old_prot & VM_PROT_WRITE) == 0)
2041			vm_fault_copy_entry(map, map, current, current, NULL);
2042
2043		/*
2044		 * When restricting access, update the physical map.  Worry
2045		 * about copy-on-write here.
2046		 */
2047		if ((old_prot & ~current->protection) != 0) {
2048#define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2049							VM_PROT_ALL)
2050			pmap_protect(map->pmap, current->start,
2051			    current->end,
2052			    current->protection & MASK(current));
2053#undef	MASK
2054		}
2055		vm_map_simplify_entry(map, current);
2056		current = current->next;
2057	}
2058	vm_map_unlock(map);
2059	return (KERN_SUCCESS);
2060}
2061
2062/*
2063 *	vm_map_madvise:
2064 *
2065 *	This routine traverses a processes map handling the madvise
2066 *	system call.  Advisories are classified as either those effecting
2067 *	the vm_map_entry structure, or those effecting the underlying
2068 *	objects.
2069 */
2070int
2071vm_map_madvise(
2072	vm_map_t map,
2073	vm_offset_t start,
2074	vm_offset_t end,
2075	int behav)
2076{
2077	vm_map_entry_t current, entry;
2078	int modify_map = 0;
2079
2080	/*
2081	 * Some madvise calls directly modify the vm_map_entry, in which case
2082	 * we need to use an exclusive lock on the map and we need to perform
2083	 * various clipping operations.  Otherwise we only need a read-lock
2084	 * on the map.
2085	 */
2086	switch(behav) {
2087	case MADV_NORMAL:
2088	case MADV_SEQUENTIAL:
2089	case MADV_RANDOM:
2090	case MADV_NOSYNC:
2091	case MADV_AUTOSYNC:
2092	case MADV_NOCORE:
2093	case MADV_CORE:
2094		if (start == end)
2095			return (KERN_SUCCESS);
2096		modify_map = 1;
2097		vm_map_lock(map);
2098		break;
2099	case MADV_WILLNEED:
2100	case MADV_DONTNEED:
2101	case MADV_FREE:
2102		if (start == end)
2103			return (KERN_SUCCESS);
2104		vm_map_lock_read(map);
2105		break;
2106	default:
2107		return (KERN_INVALID_ARGUMENT);
2108	}
2109
2110	/*
2111	 * Locate starting entry and clip if necessary.
2112	 */
2113	VM_MAP_RANGE_CHECK(map, start, end);
2114
2115	if (vm_map_lookup_entry(map, start, &entry)) {
2116		if (modify_map)
2117			vm_map_clip_start(map, entry, start);
2118	} else {
2119		entry = entry->next;
2120	}
2121
2122	if (modify_map) {
2123		/*
2124		 * madvise behaviors that are implemented in the vm_map_entry.
2125		 *
2126		 * We clip the vm_map_entry so that behavioral changes are
2127		 * limited to the specified address range.
2128		 */
2129		for (current = entry;
2130		     (current != &map->header) && (current->start < end);
2131		     current = current->next
2132		) {
2133			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2134				continue;
2135
2136			vm_map_clip_end(map, current, end);
2137
2138			switch (behav) {
2139			case MADV_NORMAL:
2140				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2141				break;
2142			case MADV_SEQUENTIAL:
2143				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2144				break;
2145			case MADV_RANDOM:
2146				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2147				break;
2148			case MADV_NOSYNC:
2149				current->eflags |= MAP_ENTRY_NOSYNC;
2150				break;
2151			case MADV_AUTOSYNC:
2152				current->eflags &= ~MAP_ENTRY_NOSYNC;
2153				break;
2154			case MADV_NOCORE:
2155				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2156				break;
2157			case MADV_CORE:
2158				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2159				break;
2160			default:
2161				break;
2162			}
2163			vm_map_simplify_entry(map, current);
2164		}
2165		vm_map_unlock(map);
2166	} else {
2167		vm_pindex_t pstart, pend;
2168
2169		/*
2170		 * madvise behaviors that are implemented in the underlying
2171		 * vm_object.
2172		 *
2173		 * Since we don't clip the vm_map_entry, we have to clip
2174		 * the vm_object pindex and count.
2175		 */
2176		for (current = entry;
2177		     (current != &map->header) && (current->start < end);
2178		     current = current->next
2179		) {
2180			vm_offset_t useEnd, useStart;
2181
2182			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2183				continue;
2184
2185			pstart = OFF_TO_IDX(current->offset);
2186			pend = pstart + atop(current->end - current->start);
2187			useStart = current->start;
2188			useEnd = current->end;
2189
2190			if (current->start < start) {
2191				pstart += atop(start - current->start);
2192				useStart = start;
2193			}
2194			if (current->end > end) {
2195				pend -= atop(current->end - end);
2196				useEnd = end;
2197			}
2198
2199			if (pstart >= pend)
2200				continue;
2201
2202			/*
2203			 * Perform the pmap_advise() before clearing
2204			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2205			 * concurrent pmap operation, such as pmap_remove(),
2206			 * could clear a reference in the pmap and set
2207			 * PGA_REFERENCED on the page before the pmap_advise()
2208			 * had completed.  Consequently, the page would appear
2209			 * referenced based upon an old reference that
2210			 * occurred before this pmap_advise() ran.
2211			 */
2212			if (behav == MADV_DONTNEED || behav == MADV_FREE)
2213				pmap_advise(map->pmap, useStart, useEnd,
2214				    behav);
2215
2216			vm_object_madvise(current->object.vm_object, pstart,
2217			    pend, behav);
2218
2219			/*
2220			 * Pre-populate paging structures in the
2221			 * WILLNEED case.  For wired entries, the
2222			 * paging structures are already populated.
2223			 */
2224			if (behav == MADV_WILLNEED &&
2225			    current->wired_count == 0) {
2226				vm_map_pmap_enter(map,
2227				    useStart,
2228				    current->protection,
2229				    current->object.vm_object,
2230				    pstart,
2231				    ptoa(pend - pstart),
2232				    MAP_PREFAULT_MADVISE
2233				);
2234			}
2235		}
2236		vm_map_unlock_read(map);
2237	}
2238	return (0);
2239}
2240
2241
2242/*
2243 *	vm_map_inherit:
2244 *
2245 *	Sets the inheritance of the specified address
2246 *	range in the target map.  Inheritance
2247 *	affects how the map will be shared with
2248 *	child maps at the time of vmspace_fork.
2249 */
2250int
2251vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2252	       vm_inherit_t new_inheritance)
2253{
2254	vm_map_entry_t entry;
2255	vm_map_entry_t temp_entry;
2256
2257	switch (new_inheritance) {
2258	case VM_INHERIT_NONE:
2259	case VM_INHERIT_COPY:
2260	case VM_INHERIT_SHARE:
2261		break;
2262	default:
2263		return (KERN_INVALID_ARGUMENT);
2264	}
2265	if (start == end)
2266		return (KERN_SUCCESS);
2267	vm_map_lock(map);
2268	VM_MAP_RANGE_CHECK(map, start, end);
2269	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2270		entry = temp_entry;
2271		vm_map_clip_start(map, entry, start);
2272	} else
2273		entry = temp_entry->next;
2274	while ((entry != &map->header) && (entry->start < end)) {
2275		vm_map_clip_end(map, entry, end);
2276		entry->inheritance = new_inheritance;
2277		vm_map_simplify_entry(map, entry);
2278		entry = entry->next;
2279	}
2280	vm_map_unlock(map);
2281	return (KERN_SUCCESS);
2282}
2283
2284/*
2285 *	vm_map_unwire:
2286 *
2287 *	Implements both kernel and user unwiring.
2288 */
2289int
2290vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2291    int flags)
2292{
2293	vm_map_entry_t entry, first_entry, tmp_entry;
2294	vm_offset_t saved_start;
2295	unsigned int last_timestamp;
2296	int rv;
2297	boolean_t need_wakeup, result, user_unwire;
2298
2299	if (start == end)
2300		return (KERN_SUCCESS);
2301	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2302	vm_map_lock(map);
2303	VM_MAP_RANGE_CHECK(map, start, end);
2304	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2305		if (flags & VM_MAP_WIRE_HOLESOK)
2306			first_entry = first_entry->next;
2307		else {
2308			vm_map_unlock(map);
2309			return (KERN_INVALID_ADDRESS);
2310		}
2311	}
2312	last_timestamp = map->timestamp;
2313	entry = first_entry;
2314	while (entry != &map->header && entry->start < end) {
2315		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2316			/*
2317			 * We have not yet clipped the entry.
2318			 */
2319			saved_start = (start >= entry->start) ? start :
2320			    entry->start;
2321			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2322			if (vm_map_unlock_and_wait(map, 0)) {
2323				/*
2324				 * Allow interruption of user unwiring?
2325				 */
2326			}
2327			vm_map_lock(map);
2328			if (last_timestamp+1 != map->timestamp) {
2329				/*
2330				 * Look again for the entry because the map was
2331				 * modified while it was unlocked.
2332				 * Specifically, the entry may have been
2333				 * clipped, merged, or deleted.
2334				 */
2335				if (!vm_map_lookup_entry(map, saved_start,
2336				    &tmp_entry)) {
2337					if (flags & VM_MAP_WIRE_HOLESOK)
2338						tmp_entry = tmp_entry->next;
2339					else {
2340						if (saved_start == start) {
2341							/*
2342							 * First_entry has been deleted.
2343							 */
2344							vm_map_unlock(map);
2345							return (KERN_INVALID_ADDRESS);
2346						}
2347						end = saved_start;
2348						rv = KERN_INVALID_ADDRESS;
2349						goto done;
2350					}
2351				}
2352				if (entry == first_entry)
2353					first_entry = tmp_entry;
2354				else
2355					first_entry = NULL;
2356				entry = tmp_entry;
2357			}
2358			last_timestamp = map->timestamp;
2359			continue;
2360		}
2361		vm_map_clip_start(map, entry, start);
2362		vm_map_clip_end(map, entry, end);
2363		/*
2364		 * Mark the entry in case the map lock is released.  (See
2365		 * above.)
2366		 */
2367		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2368		    entry->wiring_thread == NULL,
2369		    ("owned map entry %p", entry));
2370		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2371		entry->wiring_thread = curthread;
2372		/*
2373		 * Check the map for holes in the specified region.
2374		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2375		 */
2376		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2377		    (entry->end < end && (entry->next == &map->header ||
2378		    entry->next->start > entry->end))) {
2379			end = entry->end;
2380			rv = KERN_INVALID_ADDRESS;
2381			goto done;
2382		}
2383		/*
2384		 * If system unwiring, require that the entry is system wired.
2385		 */
2386		if (!user_unwire &&
2387		    vm_map_entry_system_wired_count(entry) == 0) {
2388			end = entry->end;
2389			rv = KERN_INVALID_ARGUMENT;
2390			goto done;
2391		}
2392		entry = entry->next;
2393	}
2394	rv = KERN_SUCCESS;
2395done:
2396	need_wakeup = FALSE;
2397	if (first_entry == NULL) {
2398		result = vm_map_lookup_entry(map, start, &first_entry);
2399		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2400			first_entry = first_entry->next;
2401		else
2402			KASSERT(result, ("vm_map_unwire: lookup failed"));
2403	}
2404	for (entry = first_entry; entry != &map->header && entry->start < end;
2405	    entry = entry->next) {
2406		/*
2407		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2408		 * space in the unwired region could have been mapped
2409		 * while the map lock was dropped for draining
2410		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2411		 * could be simultaneously wiring this new mapping
2412		 * entry.  Detect these cases and skip any entries
2413		 * marked as in transition by us.
2414		 */
2415		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2416		    entry->wiring_thread != curthread) {
2417			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2418			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2419			continue;
2420		}
2421
2422		if (rv == KERN_SUCCESS && (!user_unwire ||
2423		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2424			if (user_unwire)
2425				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2426			if (entry->wired_count == 1)
2427				vm_map_entry_unwire(map, entry);
2428			else
2429				entry->wired_count--;
2430		}
2431		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2432		    ("vm_map_unwire: in-transition flag missing %p", entry));
2433		KASSERT(entry->wiring_thread == curthread,
2434		    ("vm_map_unwire: alien wire %p", entry));
2435		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2436		entry->wiring_thread = NULL;
2437		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2438			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2439			need_wakeup = TRUE;
2440		}
2441		vm_map_simplify_entry(map, entry);
2442	}
2443	vm_map_unlock(map);
2444	if (need_wakeup)
2445		vm_map_wakeup(map);
2446	return (rv);
2447}
2448
2449/*
2450 *	vm_map_wire_entry_failure:
2451 *
2452 *	Handle a wiring failure on the given entry.
2453 *
2454 *	The map should be locked.
2455 */
2456static void
2457vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2458    vm_offset_t failed_addr)
2459{
2460
2461	VM_MAP_ASSERT_LOCKED(map);
2462	KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2463	    entry->wired_count == 1,
2464	    ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2465	KASSERT(failed_addr < entry->end,
2466	    ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2467
2468	/*
2469	 * If any pages at the start of this entry were successfully wired,
2470	 * then unwire them.
2471	 */
2472	if (failed_addr > entry->start) {
2473		pmap_unwire(map->pmap, entry->start, failed_addr);
2474		vm_object_unwire(entry->object.vm_object, entry->offset,
2475		    failed_addr - entry->start, PQ_ACTIVE);
2476	}
2477
2478	/*
2479	 * Assign an out-of-range value to represent the failure to wire this
2480	 * entry.
2481	 */
2482	entry->wired_count = -1;
2483}
2484
2485/*
2486 *	vm_map_wire:
2487 *
2488 *	Implements both kernel and user wiring.
2489 */
2490int
2491vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2492    int flags)
2493{
2494	vm_map_entry_t entry, first_entry, tmp_entry;
2495	vm_offset_t faddr, saved_end, saved_start;
2496	unsigned int last_timestamp;
2497	int rv;
2498	boolean_t need_wakeup, result, user_wire;
2499	vm_prot_t prot;
2500
2501	if (start == end)
2502		return (KERN_SUCCESS);
2503	prot = 0;
2504	if (flags & VM_MAP_WIRE_WRITE)
2505		prot |= VM_PROT_WRITE;
2506	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2507	vm_map_lock(map);
2508	VM_MAP_RANGE_CHECK(map, start, end);
2509	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2510		if (flags & VM_MAP_WIRE_HOLESOK)
2511			first_entry = first_entry->next;
2512		else {
2513			vm_map_unlock(map);
2514			return (KERN_INVALID_ADDRESS);
2515		}
2516	}
2517	last_timestamp = map->timestamp;
2518	entry = first_entry;
2519	while (entry != &map->header && entry->start < end) {
2520		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2521			/*
2522			 * We have not yet clipped the entry.
2523			 */
2524			saved_start = (start >= entry->start) ? start :
2525			    entry->start;
2526			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2527			if (vm_map_unlock_and_wait(map, 0)) {
2528				/*
2529				 * Allow interruption of user wiring?
2530				 */
2531			}
2532			vm_map_lock(map);
2533			if (last_timestamp + 1 != map->timestamp) {
2534				/*
2535				 * Look again for the entry because the map was
2536				 * modified while it was unlocked.
2537				 * Specifically, the entry may have been
2538				 * clipped, merged, or deleted.
2539				 */
2540				if (!vm_map_lookup_entry(map, saved_start,
2541				    &tmp_entry)) {
2542					if (flags & VM_MAP_WIRE_HOLESOK)
2543						tmp_entry = tmp_entry->next;
2544					else {
2545						if (saved_start == start) {
2546							/*
2547							 * first_entry has been deleted.
2548							 */
2549							vm_map_unlock(map);
2550							return (KERN_INVALID_ADDRESS);
2551						}
2552						end = saved_start;
2553						rv = KERN_INVALID_ADDRESS;
2554						goto done;
2555					}
2556				}
2557				if (entry == first_entry)
2558					first_entry = tmp_entry;
2559				else
2560					first_entry = NULL;
2561				entry = tmp_entry;
2562			}
2563			last_timestamp = map->timestamp;
2564			continue;
2565		}
2566		vm_map_clip_start(map, entry, start);
2567		vm_map_clip_end(map, entry, end);
2568		/*
2569		 * Mark the entry in case the map lock is released.  (See
2570		 * above.)
2571		 */
2572		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2573		    entry->wiring_thread == NULL,
2574		    ("owned map entry %p", entry));
2575		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2576		entry->wiring_thread = curthread;
2577		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2578		    || (entry->protection & prot) != prot) {
2579			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2580			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2581				end = entry->end;
2582				rv = KERN_INVALID_ADDRESS;
2583				goto done;
2584			}
2585			goto next_entry;
2586		}
2587		if (entry->wired_count == 0) {
2588			entry->wired_count++;
2589			saved_start = entry->start;
2590			saved_end = entry->end;
2591
2592			/*
2593			 * Release the map lock, relying on the in-transition
2594			 * mark.  Mark the map busy for fork.
2595			 */
2596			vm_map_busy(map);
2597			vm_map_unlock(map);
2598
2599			faddr = saved_start;
2600			do {
2601				/*
2602				 * Simulate a fault to get the page and enter
2603				 * it into the physical map.
2604				 */
2605				if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2606				    VM_FAULT_WIRE)) != KERN_SUCCESS)
2607					break;
2608			} while ((faddr += PAGE_SIZE) < saved_end);
2609			vm_map_lock(map);
2610			vm_map_unbusy(map);
2611			if (last_timestamp + 1 != map->timestamp) {
2612				/*
2613				 * Look again for the entry because the map was
2614				 * modified while it was unlocked.  The entry
2615				 * may have been clipped, but NOT merged or
2616				 * deleted.
2617				 */
2618				result = vm_map_lookup_entry(map, saved_start,
2619				    &tmp_entry);
2620				KASSERT(result, ("vm_map_wire: lookup failed"));
2621				if (entry == first_entry)
2622					first_entry = tmp_entry;
2623				else
2624					first_entry = NULL;
2625				entry = tmp_entry;
2626				while (entry->end < saved_end) {
2627					/*
2628					 * In case of failure, handle entries
2629					 * that were not fully wired here;
2630					 * fully wired entries are handled
2631					 * later.
2632					 */
2633					if (rv != KERN_SUCCESS &&
2634					    faddr < entry->end)
2635						vm_map_wire_entry_failure(map,
2636						    entry, faddr);
2637					entry = entry->next;
2638				}
2639			}
2640			last_timestamp = map->timestamp;
2641			if (rv != KERN_SUCCESS) {
2642				vm_map_wire_entry_failure(map, entry, faddr);
2643				end = entry->end;
2644				goto done;
2645			}
2646		} else if (!user_wire ||
2647			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2648			entry->wired_count++;
2649		}
2650		/*
2651		 * Check the map for holes in the specified region.
2652		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2653		 */
2654	next_entry:
2655		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2656		    (entry->end < end && (entry->next == &map->header ||
2657		    entry->next->start > entry->end))) {
2658			end = entry->end;
2659			rv = KERN_INVALID_ADDRESS;
2660			goto done;
2661		}
2662		entry = entry->next;
2663	}
2664	rv = KERN_SUCCESS;
2665done:
2666	need_wakeup = FALSE;
2667	if (first_entry == NULL) {
2668		result = vm_map_lookup_entry(map, start, &first_entry);
2669		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2670			first_entry = first_entry->next;
2671		else
2672			KASSERT(result, ("vm_map_wire: lookup failed"));
2673	}
2674	for (entry = first_entry; entry != &map->header && entry->start < end;
2675	    entry = entry->next) {
2676		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2677			goto next_entry_done;
2678
2679		/*
2680		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2681		 * space in the unwired region could have been mapped
2682		 * while the map lock was dropped for faulting in the
2683		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2684		 * Moreover, another thread could be simultaneously
2685		 * wiring this new mapping entry.  Detect these cases
2686		 * and skip any entries marked as in transition by us.
2687		 */
2688		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2689		    entry->wiring_thread != curthread) {
2690			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2691			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2692			continue;
2693		}
2694
2695		if (rv == KERN_SUCCESS) {
2696			if (user_wire)
2697				entry->eflags |= MAP_ENTRY_USER_WIRED;
2698		} else if (entry->wired_count == -1) {
2699			/*
2700			 * Wiring failed on this entry.  Thus, unwiring is
2701			 * unnecessary.
2702			 */
2703			entry->wired_count = 0;
2704		} else if (!user_wire ||
2705		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2706			/*
2707			 * Undo the wiring.  Wiring succeeded on this entry
2708			 * but failed on a later entry.
2709			 */
2710			if (entry->wired_count == 1)
2711				vm_map_entry_unwire(map, entry);
2712			else
2713				entry->wired_count--;
2714		}
2715	next_entry_done:
2716		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2717		    ("vm_map_wire: in-transition flag missing %p", entry));
2718		KASSERT(entry->wiring_thread == curthread,
2719		    ("vm_map_wire: alien wire %p", entry));
2720		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2721		    MAP_ENTRY_WIRE_SKIPPED);
2722		entry->wiring_thread = NULL;
2723		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2724			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2725			need_wakeup = TRUE;
2726		}
2727		vm_map_simplify_entry(map, entry);
2728	}
2729	vm_map_unlock(map);
2730	if (need_wakeup)
2731		vm_map_wakeup(map);
2732	return (rv);
2733}
2734
2735/*
2736 * vm_map_sync
2737 *
2738 * Push any dirty cached pages in the address range to their pager.
2739 * If syncio is TRUE, dirty pages are written synchronously.
2740 * If invalidate is TRUE, any cached pages are freed as well.
2741 *
2742 * If the size of the region from start to end is zero, we are
2743 * supposed to flush all modified pages within the region containing
2744 * start.  Unfortunately, a region can be split or coalesced with
2745 * neighboring regions, making it difficult to determine what the
2746 * original region was.  Therefore, we approximate this requirement by
2747 * flushing the current region containing start.
2748 *
2749 * Returns an error if any part of the specified range is not mapped.
2750 */
2751int
2752vm_map_sync(
2753	vm_map_t map,
2754	vm_offset_t start,
2755	vm_offset_t end,
2756	boolean_t syncio,
2757	boolean_t invalidate)
2758{
2759	vm_map_entry_t current;
2760	vm_map_entry_t entry;
2761	vm_size_t size;
2762	vm_object_t object;
2763	vm_ooffset_t offset;
2764	unsigned int last_timestamp;
2765	boolean_t failed;
2766
2767	vm_map_lock_read(map);
2768	VM_MAP_RANGE_CHECK(map, start, end);
2769	if (!vm_map_lookup_entry(map, start, &entry)) {
2770		vm_map_unlock_read(map);
2771		return (KERN_INVALID_ADDRESS);
2772	} else if (start == end) {
2773		start = entry->start;
2774		end = entry->end;
2775	}
2776	/*
2777	 * Make a first pass to check for user-wired memory and holes.
2778	 */
2779	for (current = entry; current != &map->header && current->start < end;
2780	    current = current->next) {
2781		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2782			vm_map_unlock_read(map);
2783			return (KERN_INVALID_ARGUMENT);
2784		}
2785		if (end > current->end &&
2786		    (current->next == &map->header ||
2787			current->end != current->next->start)) {
2788			vm_map_unlock_read(map);
2789			return (KERN_INVALID_ADDRESS);
2790		}
2791	}
2792
2793	if (invalidate)
2794		pmap_remove(map->pmap, start, end);
2795	failed = FALSE;
2796
2797	/*
2798	 * Make a second pass, cleaning/uncaching pages from the indicated
2799	 * objects as we go.
2800	 */
2801	for (current = entry; current != &map->header && current->start < end;) {
2802		offset = current->offset + (start - current->start);
2803		size = (end <= current->end ? end : current->end) - start;
2804		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2805			vm_map_t smap;
2806			vm_map_entry_t tentry;
2807			vm_size_t tsize;
2808
2809			smap = current->object.sub_map;
2810			vm_map_lock_read(smap);
2811			(void) vm_map_lookup_entry(smap, offset, &tentry);
2812			tsize = tentry->end - offset;
2813			if (tsize < size)
2814				size = tsize;
2815			object = tentry->object.vm_object;
2816			offset = tentry->offset + (offset - tentry->start);
2817			vm_map_unlock_read(smap);
2818		} else {
2819			object = current->object.vm_object;
2820		}
2821		vm_object_reference(object);
2822		last_timestamp = map->timestamp;
2823		vm_map_unlock_read(map);
2824		if (!vm_object_sync(object, offset, size, syncio, invalidate))
2825			failed = TRUE;
2826		start += size;
2827		vm_object_deallocate(object);
2828		vm_map_lock_read(map);
2829		if (last_timestamp == map->timestamp ||
2830		    !vm_map_lookup_entry(map, start, &current))
2831			current = current->next;
2832	}
2833
2834	vm_map_unlock_read(map);
2835	return (failed ? KERN_FAILURE : KERN_SUCCESS);
2836}
2837
2838/*
2839 *	vm_map_entry_unwire:	[ internal use only ]
2840 *
2841 *	Make the region specified by this entry pageable.
2842 *
2843 *	The map in question should be locked.
2844 *	[This is the reason for this routine's existence.]
2845 */
2846static void
2847vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2848{
2849
2850	VM_MAP_ASSERT_LOCKED(map);
2851	KASSERT(entry->wired_count > 0,
2852	    ("vm_map_entry_unwire: entry %p isn't wired", entry));
2853	pmap_unwire(map->pmap, entry->start, entry->end);
2854	vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
2855	    entry->start, PQ_ACTIVE);
2856	entry->wired_count = 0;
2857}
2858
2859static void
2860vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2861{
2862
2863	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2864		vm_object_deallocate(entry->object.vm_object);
2865	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2866}
2867
2868/*
2869 *	vm_map_entry_delete:	[ internal use only ]
2870 *
2871 *	Deallocate the given entry from the target map.
2872 */
2873static void
2874vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2875{
2876	vm_object_t object;
2877	vm_pindex_t offidxstart, offidxend, count, size1;
2878	vm_ooffset_t size;
2879
2880	vm_map_entry_unlink(map, entry);
2881	object = entry->object.vm_object;
2882	size = entry->end - entry->start;
2883	map->size -= size;
2884
2885	if (entry->cred != NULL) {
2886		swap_release_by_cred(size, entry->cred);
2887		crfree(entry->cred);
2888	}
2889
2890	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2891	    (object != NULL)) {
2892		KASSERT(entry->cred == NULL || object->cred == NULL ||
2893		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2894		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2895		count = OFF_TO_IDX(size);
2896		offidxstart = OFF_TO_IDX(entry->offset);
2897		offidxend = offidxstart + count;
2898		VM_OBJECT_WLOCK(object);
2899		if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
2900		    OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2901		    object == kernel_object || object == kmem_object)) {
2902			vm_object_collapse(object);
2903
2904			/*
2905			 * The option OBJPR_NOTMAPPED can be passed here
2906			 * because vm_map_delete() already performed
2907			 * pmap_remove() on the only mapping to this range
2908			 * of pages.
2909			 */
2910			vm_object_page_remove(object, offidxstart, offidxend,
2911			    OBJPR_NOTMAPPED);
2912			if (object->type == OBJT_SWAP)
2913				swap_pager_freespace(object, offidxstart,
2914				    count);
2915			if (offidxend >= object->size &&
2916			    offidxstart < object->size) {
2917				size1 = object->size;
2918				object->size = offidxstart;
2919				if (object->cred != NULL) {
2920					size1 -= object->size;
2921					KASSERT(object->charge >= ptoa(size1),
2922					    ("object %p charge < 0", object));
2923					swap_release_by_cred(ptoa(size1),
2924					    object->cred);
2925					object->charge -= ptoa(size1);
2926				}
2927			}
2928		}
2929		VM_OBJECT_WUNLOCK(object);
2930	} else
2931		entry->object.vm_object = NULL;
2932	if (map->system_map)
2933		vm_map_entry_deallocate(entry, TRUE);
2934	else {
2935		entry->next = curthread->td_map_def_user;
2936		curthread->td_map_def_user = entry;
2937	}
2938}
2939
2940/*
2941 *	vm_map_delete:	[ internal use only ]
2942 *
2943 *	Deallocates the given address range from the target
2944 *	map.
2945 */
2946int
2947vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2948{
2949	vm_map_entry_t entry;
2950	vm_map_entry_t first_entry;
2951
2952	VM_MAP_ASSERT_LOCKED(map);
2953	if (start == end)
2954		return (KERN_SUCCESS);
2955
2956	/*
2957	 * Find the start of the region, and clip it
2958	 */
2959	if (!vm_map_lookup_entry(map, start, &first_entry))
2960		entry = first_entry->next;
2961	else {
2962		entry = first_entry;
2963		vm_map_clip_start(map, entry, start);
2964	}
2965
2966	/*
2967	 * Step through all entries in this region
2968	 */
2969	while ((entry != &map->header) && (entry->start < end)) {
2970		vm_map_entry_t next;
2971
2972		/*
2973		 * Wait for wiring or unwiring of an entry to complete.
2974		 * Also wait for any system wirings to disappear on
2975		 * user maps.
2976		 */
2977		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2978		    (vm_map_pmap(map) != kernel_pmap &&
2979		    vm_map_entry_system_wired_count(entry) != 0)) {
2980			unsigned int last_timestamp;
2981			vm_offset_t saved_start;
2982			vm_map_entry_t tmp_entry;
2983
2984			saved_start = entry->start;
2985			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2986			last_timestamp = map->timestamp;
2987			(void) vm_map_unlock_and_wait(map, 0);
2988			vm_map_lock(map);
2989			if (last_timestamp + 1 != map->timestamp) {
2990				/*
2991				 * Look again for the entry because the map was
2992				 * modified while it was unlocked.
2993				 * Specifically, the entry may have been
2994				 * clipped, merged, or deleted.
2995				 */
2996				if (!vm_map_lookup_entry(map, saved_start,
2997							 &tmp_entry))
2998					entry = tmp_entry->next;
2999				else {
3000					entry = tmp_entry;
3001					vm_map_clip_start(map, entry,
3002							  saved_start);
3003				}
3004			}
3005			continue;
3006		}
3007		vm_map_clip_end(map, entry, end);
3008
3009		next = entry->next;
3010
3011		/*
3012		 * Unwire before removing addresses from the pmap; otherwise,
3013		 * unwiring will put the entries back in the pmap.
3014		 */
3015		if (entry->wired_count != 0) {
3016			vm_map_entry_unwire(map, entry);
3017		}
3018
3019		pmap_remove(map->pmap, entry->start, entry->end);
3020
3021		/*
3022		 * Delete the entry only after removing all pmap
3023		 * entries pointing to its pages.  (Otherwise, its
3024		 * page frames may be reallocated, and any modify bits
3025		 * will be set in the wrong object!)
3026		 */
3027		vm_map_entry_delete(map, entry);
3028		entry = next;
3029	}
3030	return (KERN_SUCCESS);
3031}
3032
3033/*
3034 *	vm_map_remove:
3035 *
3036 *	Remove the given address range from the target map.
3037 *	This is the exported form of vm_map_delete.
3038 */
3039int
3040vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3041{
3042	int result;
3043
3044	vm_map_lock(map);
3045	VM_MAP_RANGE_CHECK(map, start, end);
3046	result = vm_map_delete(map, start, end);
3047	vm_map_unlock(map);
3048	return (result);
3049}
3050
3051/*
3052 *	vm_map_check_protection:
3053 *
3054 *	Assert that the target map allows the specified privilege on the
3055 *	entire address region given.  The entire region must be allocated.
3056 *
3057 *	WARNING!  This code does not and should not check whether the
3058 *	contents of the region is accessible.  For example a smaller file
3059 *	might be mapped into a larger address space.
3060 *
3061 *	NOTE!  This code is also called by munmap().
3062 *
3063 *	The map must be locked.  A read lock is sufficient.
3064 */
3065boolean_t
3066vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3067			vm_prot_t protection)
3068{
3069	vm_map_entry_t entry;
3070	vm_map_entry_t tmp_entry;
3071
3072	if (!vm_map_lookup_entry(map, start, &tmp_entry))
3073		return (FALSE);
3074	entry = tmp_entry;
3075
3076	while (start < end) {
3077		if (entry == &map->header)
3078			return (FALSE);
3079		/*
3080		 * No holes allowed!
3081		 */
3082		if (start < entry->start)
3083			return (FALSE);
3084		/*
3085		 * Check protection associated with entry.
3086		 */
3087		if ((entry->protection & protection) != protection)
3088			return (FALSE);
3089		/* go to next entry */
3090		start = entry->end;
3091		entry = entry->next;
3092	}
3093	return (TRUE);
3094}
3095
3096/*
3097 *	vm_map_copy_entry:
3098 *
3099 *	Copies the contents of the source entry to the destination
3100 *	entry.  The entries *must* be aligned properly.
3101 */
3102static void
3103vm_map_copy_entry(
3104	vm_map_t src_map,
3105	vm_map_t dst_map,
3106	vm_map_entry_t src_entry,
3107	vm_map_entry_t dst_entry,
3108	vm_ooffset_t *fork_charge)
3109{
3110	vm_object_t src_object;
3111	vm_map_entry_t fake_entry;
3112	vm_offset_t size;
3113	struct ucred *cred;
3114	int charged;
3115
3116	VM_MAP_ASSERT_LOCKED(dst_map);
3117
3118	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3119		return;
3120
3121	if (src_entry->wired_count == 0 ||
3122	    (src_entry->protection & VM_PROT_WRITE) == 0) {
3123		/*
3124		 * If the source entry is marked needs_copy, it is already
3125		 * write-protected.
3126		 */
3127		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3128		    (src_entry->protection & VM_PROT_WRITE) != 0) {
3129			pmap_protect(src_map->pmap,
3130			    src_entry->start,
3131			    src_entry->end,
3132			    src_entry->protection & ~VM_PROT_WRITE);
3133		}
3134
3135		/*
3136		 * Make a copy of the object.
3137		 */
3138		size = src_entry->end - src_entry->start;
3139		if ((src_object = src_entry->object.vm_object) != NULL) {
3140			VM_OBJECT_WLOCK(src_object);
3141			charged = ENTRY_CHARGED(src_entry);
3142			if (src_object->handle == NULL &&
3143			    (src_object->type == OBJT_DEFAULT ||
3144			    src_object->type == OBJT_SWAP)) {
3145				vm_object_collapse(src_object);
3146				if ((src_object->flags & (OBJ_NOSPLIT |
3147				    OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3148					vm_object_split(src_entry);
3149					src_object =
3150					    src_entry->object.vm_object;
3151				}
3152			}
3153			vm_object_reference_locked(src_object);
3154			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3155			if (src_entry->cred != NULL &&
3156			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3157				KASSERT(src_object->cred == NULL,
3158				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3159				     src_object));
3160				src_object->cred = src_entry->cred;
3161				src_object->charge = size;
3162			}
3163			VM_OBJECT_WUNLOCK(src_object);
3164			dst_entry->object.vm_object = src_object;
3165			if (charged) {
3166				cred = curthread->td_ucred;
3167				crhold(cred);
3168				dst_entry->cred = cred;
3169				*fork_charge += size;
3170				if (!(src_entry->eflags &
3171				      MAP_ENTRY_NEEDS_COPY)) {
3172					crhold(cred);
3173					src_entry->cred = cred;
3174					*fork_charge += size;
3175				}
3176			}
3177			src_entry->eflags |= MAP_ENTRY_COW |
3178			    MAP_ENTRY_NEEDS_COPY;
3179			dst_entry->eflags |= MAP_ENTRY_COW |
3180			    MAP_ENTRY_NEEDS_COPY;
3181			dst_entry->offset = src_entry->offset;
3182			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3183				/*
3184				 * MAP_ENTRY_VN_WRITECNT cannot
3185				 * indicate write reference from
3186				 * src_entry, since the entry is
3187				 * marked as needs copy.  Allocate a
3188				 * fake entry that is used to
3189				 * decrement object->un_pager.vnp.writecount
3190				 * at the appropriate time.  Attach
3191				 * fake_entry to the deferred list.
3192				 */
3193				fake_entry = vm_map_entry_create(dst_map);
3194				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3195				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3196				vm_object_reference(src_object);
3197				fake_entry->object.vm_object = src_object;
3198				fake_entry->start = src_entry->start;
3199				fake_entry->end = src_entry->end;
3200				fake_entry->next = curthread->td_map_def_user;
3201				curthread->td_map_def_user = fake_entry;
3202			}
3203		} else {
3204			dst_entry->object.vm_object = NULL;
3205			dst_entry->offset = 0;
3206			if (src_entry->cred != NULL) {
3207				dst_entry->cred = curthread->td_ucred;
3208				crhold(dst_entry->cred);
3209				*fork_charge += size;
3210			}
3211		}
3212
3213		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3214		    dst_entry->end - dst_entry->start, src_entry->start);
3215	} else {
3216		/*
3217		 * We don't want to make writeable wired pages copy-on-write.
3218		 * Immediately copy these pages into the new map by simulating
3219		 * page faults.  The new pages are pageable.
3220		 */
3221		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3222		    fork_charge);
3223	}
3224}
3225
3226/*
3227 * vmspace_map_entry_forked:
3228 * Update the newly-forked vmspace each time a map entry is inherited
3229 * or copied.  The values for vm_dsize and vm_tsize are approximate
3230 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3231 */
3232static void
3233vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3234    vm_map_entry_t entry)
3235{
3236	vm_size_t entrysize;
3237	vm_offset_t newend;
3238
3239	entrysize = entry->end - entry->start;
3240	vm2->vm_map.size += entrysize;
3241	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3242		vm2->vm_ssize += btoc(entrysize);
3243	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3244	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3245		newend = MIN(entry->end,
3246		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3247		vm2->vm_dsize += btoc(newend - entry->start);
3248	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3249	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3250		newend = MIN(entry->end,
3251		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3252		vm2->vm_tsize += btoc(newend - entry->start);
3253	}
3254}
3255
3256/*
3257 * vmspace_fork:
3258 * Create a new process vmspace structure and vm_map
3259 * based on those of an existing process.  The new map
3260 * is based on the old map, according to the inheritance
3261 * values on the regions in that map.
3262 *
3263 * XXX It might be worth coalescing the entries added to the new vmspace.
3264 *
3265 * The source map must not be locked.
3266 */
3267struct vmspace *
3268vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3269{
3270	struct vmspace *vm2;
3271	vm_map_t new_map, old_map;
3272	vm_map_entry_t new_entry, old_entry;
3273	vm_object_t object;
3274	int locked;
3275
3276	old_map = &vm1->vm_map;
3277	/* Copy immutable fields of vm1 to vm2. */
3278	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
3279	if (vm2 == NULL)
3280		return (NULL);
3281	vm2->vm_taddr = vm1->vm_taddr;
3282	vm2->vm_daddr = vm1->vm_daddr;
3283	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3284	vm_map_lock(old_map);
3285	if (old_map->busy)
3286		vm_map_wait_busy(old_map);
3287	new_map = &vm2->vm_map;
3288	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3289	KASSERT(locked, ("vmspace_fork: lock failed"));
3290
3291	old_entry = old_map->header.next;
3292
3293	while (old_entry != &old_map->header) {
3294		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3295			panic("vm_map_fork: encountered a submap");
3296
3297		switch (old_entry->inheritance) {
3298		case VM_INHERIT_NONE:
3299			break;
3300
3301		case VM_INHERIT_SHARE:
3302			/*
3303			 * Clone the entry, creating the shared object if necessary.
3304			 */
3305			object = old_entry->object.vm_object;
3306			if (object == NULL) {
3307				object = vm_object_allocate(OBJT_DEFAULT,
3308					atop(old_entry->end - old_entry->start));
3309				old_entry->object.vm_object = object;
3310				old_entry->offset = 0;
3311				if (old_entry->cred != NULL) {
3312					object->cred = old_entry->cred;
3313					object->charge = old_entry->end -
3314					    old_entry->start;
3315					old_entry->cred = NULL;
3316				}
3317			}
3318
3319			/*
3320			 * Add the reference before calling vm_object_shadow
3321			 * to insure that a shadow object is created.
3322			 */
3323			vm_object_reference(object);
3324			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3325				vm_object_shadow(&old_entry->object.vm_object,
3326				    &old_entry->offset,
3327				    old_entry->end - old_entry->start);
3328				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3329				/* Transfer the second reference too. */
3330				vm_object_reference(
3331				    old_entry->object.vm_object);
3332
3333				/*
3334				 * As in vm_map_simplify_entry(), the
3335				 * vnode lock will not be acquired in
3336				 * this call to vm_object_deallocate().
3337				 */
3338				vm_object_deallocate(object);
3339				object = old_entry->object.vm_object;
3340			}
3341			VM_OBJECT_WLOCK(object);
3342			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3343			if (old_entry->cred != NULL) {
3344				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3345				object->cred = old_entry->cred;
3346				object->charge = old_entry->end - old_entry->start;
3347				old_entry->cred = NULL;
3348			}
3349
3350			/*
3351			 * Assert the correct state of the vnode
3352			 * v_writecount while the object is locked, to
3353			 * not relock it later for the assertion
3354			 * correctness.
3355			 */
3356			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3357			    object->type == OBJT_VNODE) {
3358				KASSERT(((struct vnode *)object->handle)->
3359				    v_writecount > 0,
3360				    ("vmspace_fork: v_writecount %p", object));
3361				KASSERT(object->un_pager.vnp.writemappings > 0,
3362				    ("vmspace_fork: vnp.writecount %p",
3363				    object));
3364			}
3365			VM_OBJECT_WUNLOCK(object);
3366
3367			/*
3368			 * Clone the entry, referencing the shared object.
3369			 */
3370			new_entry = vm_map_entry_create(new_map);
3371			*new_entry = *old_entry;
3372			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3373			    MAP_ENTRY_IN_TRANSITION);
3374			new_entry->wiring_thread = NULL;
3375			new_entry->wired_count = 0;
3376			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3377				vnode_pager_update_writecount(object,
3378				    new_entry->start, new_entry->end);
3379			}
3380
3381			/*
3382			 * Insert the entry into the new map -- we know we're
3383			 * inserting at the end of the new map.
3384			 */
3385			vm_map_entry_link(new_map, new_map->header.prev,
3386			    new_entry);
3387			vmspace_map_entry_forked(vm1, vm2, new_entry);
3388
3389			/*
3390			 * Update the physical map
3391			 */
3392			pmap_copy(new_map->pmap, old_map->pmap,
3393			    new_entry->start,
3394			    (old_entry->end - old_entry->start),
3395			    old_entry->start);
3396			break;
3397
3398		case VM_INHERIT_COPY:
3399			/*
3400			 * Clone the entry and link into the map.
3401			 */
3402			new_entry = vm_map_entry_create(new_map);
3403			*new_entry = *old_entry;
3404			/*
3405			 * Copied entry is COW over the old object.
3406			 */
3407			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3408			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3409			new_entry->wiring_thread = NULL;
3410			new_entry->wired_count = 0;
3411			new_entry->object.vm_object = NULL;
3412			new_entry->cred = NULL;
3413			vm_map_entry_link(new_map, new_map->header.prev,
3414			    new_entry);
3415			vmspace_map_entry_forked(vm1, vm2, new_entry);
3416			vm_map_copy_entry(old_map, new_map, old_entry,
3417			    new_entry, fork_charge);
3418			break;
3419		}
3420		old_entry = old_entry->next;
3421	}
3422	/*
3423	 * Use inlined vm_map_unlock() to postpone handling the deferred
3424	 * map entries, which cannot be done until both old_map and
3425	 * new_map locks are released.
3426	 */
3427	sx_xunlock(&old_map->lock);
3428	sx_xunlock(&new_map->lock);
3429	vm_map_process_deferred();
3430
3431	return (vm2);
3432}
3433
3434int
3435vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3436    vm_prot_t prot, vm_prot_t max, int cow)
3437{
3438	vm_size_t growsize, init_ssize;
3439	rlim_t lmemlim, vmemlim;
3440	int rv;
3441
3442	growsize = sgrowsiz;
3443	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3444	vm_map_lock(map);
3445	PROC_LOCK(curproc);
3446	lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3447	vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3448	PROC_UNLOCK(curproc);
3449	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3450		if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3451			rv = KERN_NO_SPACE;
3452			goto out;
3453		}
3454	}
3455	/* If we would blow our VMEM resource limit, no go */
3456	if (map->size + init_ssize > vmemlim) {
3457		rv = KERN_NO_SPACE;
3458		goto out;
3459	}
3460	rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3461	    max, cow);
3462out:
3463	vm_map_unlock(map);
3464	return (rv);
3465}
3466
3467static int
3468vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3469    vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3470{
3471	vm_map_entry_t new_entry, prev_entry;
3472	vm_offset_t bot, top;
3473	vm_size_t init_ssize;
3474	int orient, rv;
3475
3476	/*
3477	 * The stack orientation is piggybacked with the cow argument.
3478	 * Extract it into orient and mask the cow argument so that we
3479	 * don't pass it around further.
3480	 * NOTE: We explicitly allow bi-directional stacks.
3481	 */
3482	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3483	KASSERT(orient != 0, ("No stack grow direction"));
3484
3485	if (addrbos < vm_map_min(map) ||
3486	    addrbos > vm_map_max(map) ||
3487	    addrbos + max_ssize < addrbos)
3488		return (KERN_NO_SPACE);
3489
3490	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3491
3492	/* If addr is already mapped, no go */
3493	if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3494		return (KERN_NO_SPACE);
3495
3496	/*
3497	 * If we can't accomodate max_ssize in the current mapping, no go.
3498	 * However, we need to be aware that subsequent user mappings might
3499	 * map into the space we have reserved for stack, and currently this
3500	 * space is not protected.
3501	 *
3502	 * Hopefully we will at least detect this condition when we try to
3503	 * grow the stack.
3504	 */
3505	if ((prev_entry->next != &map->header) &&
3506	    (prev_entry->next->start < addrbos + max_ssize))
3507		return (KERN_NO_SPACE);
3508
3509	/*
3510	 * We initially map a stack of only init_ssize.  We will grow as
3511	 * needed later.  Depending on the orientation of the stack (i.e.
3512	 * the grow direction) we either map at the top of the range, the
3513	 * bottom of the range or in the middle.
3514	 *
3515	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3516	 * and cow to be 0.  Possibly we should eliminate these as input
3517	 * parameters, and just pass these values here in the insert call.
3518	 */
3519	if (orient == MAP_STACK_GROWS_DOWN)
3520		bot = addrbos + max_ssize - init_ssize;
3521	else if (orient == MAP_STACK_GROWS_UP)
3522		bot = addrbos;
3523	else
3524		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3525	top = bot + init_ssize;
3526	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3527
3528	/* Now set the avail_ssize amount. */
3529	if (rv == KERN_SUCCESS) {
3530		if (prev_entry != &map->header)
3531			vm_map_clip_end(map, prev_entry, bot);
3532		new_entry = prev_entry->next;
3533		if (new_entry->end != top || new_entry->start != bot)
3534			panic("Bad entry start/end for new stack entry");
3535
3536		new_entry->avail_ssize = max_ssize - init_ssize;
3537		if (orient & MAP_STACK_GROWS_DOWN)
3538			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3539		if (orient & MAP_STACK_GROWS_UP)
3540			new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3541	}
3542
3543	return (rv);
3544}
3545
3546static int stack_guard_page = 0;
3547TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3548SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3549    &stack_guard_page, 0,
3550    "Insert stack guard page ahead of the growable segments.");
3551
3552/* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3553 * desired address is already mapped, or if we successfully grow
3554 * the stack.  Also returns KERN_SUCCESS if addr is outside the
3555 * stack range (this is strange, but preserves compatibility with
3556 * the grow function in vm_machdep.c).
3557 */
3558int
3559vm_map_growstack(struct proc *p, vm_offset_t addr)
3560{
3561	vm_map_entry_t next_entry, prev_entry;
3562	vm_map_entry_t new_entry, stack_entry;
3563	struct vmspace *vm = p->p_vmspace;
3564	vm_map_t map = &vm->vm_map;
3565	vm_offset_t end;
3566	vm_size_t growsize;
3567	size_t grow_amount, max_grow;
3568	rlim_t lmemlim, stacklim, vmemlim;
3569	int is_procstack, rv;
3570	struct ucred *cred;
3571#ifdef notyet
3572	uint64_t limit;
3573#endif
3574#ifdef RACCT
3575	int error;
3576#endif
3577
3578Retry:
3579	PROC_LOCK(p);
3580	lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3581	stacklim = lim_cur(p, RLIMIT_STACK);
3582	vmemlim = lim_cur(p, RLIMIT_VMEM);
3583	PROC_UNLOCK(p);
3584
3585	vm_map_lock_read(map);
3586
3587	/* If addr is already in the entry range, no need to grow.*/
3588	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3589		vm_map_unlock_read(map);
3590		return (KERN_SUCCESS);
3591	}
3592
3593	next_entry = prev_entry->next;
3594	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3595		/*
3596		 * This entry does not grow upwards. Since the address lies
3597		 * beyond this entry, the next entry (if one exists) has to
3598		 * be a downward growable entry. The entry list header is
3599		 * never a growable entry, so it suffices to check the flags.
3600		 */
3601		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3602			vm_map_unlock_read(map);
3603			return (KERN_SUCCESS);
3604		}
3605		stack_entry = next_entry;
3606	} else {
3607		/*
3608		 * This entry grows upward. If the next entry does not at
3609		 * least grow downwards, this is the entry we need to grow.
3610		 * otherwise we have two possible choices and we have to
3611		 * select one.
3612		 */
3613		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3614			/*
3615			 * We have two choices; grow the entry closest to
3616			 * the address to minimize the amount of growth.
3617			 */
3618			if (addr - prev_entry->end <= next_entry->start - addr)
3619				stack_entry = prev_entry;
3620			else
3621				stack_entry = next_entry;
3622		} else
3623			stack_entry = prev_entry;
3624	}
3625
3626	if (stack_entry == next_entry) {
3627		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3628		KASSERT(addr < stack_entry->start, ("foo"));
3629		end = (prev_entry != &map->header) ? prev_entry->end :
3630		    stack_entry->start - stack_entry->avail_ssize;
3631		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3632		max_grow = stack_entry->start - end;
3633	} else {
3634		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3635		KASSERT(addr >= stack_entry->end, ("foo"));
3636		end = (next_entry != &map->header) ? next_entry->start :
3637		    stack_entry->end + stack_entry->avail_ssize;
3638		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3639		max_grow = end - stack_entry->end;
3640	}
3641
3642	if (grow_amount > stack_entry->avail_ssize) {
3643		vm_map_unlock_read(map);
3644		return (KERN_NO_SPACE);
3645	}
3646
3647	/*
3648	 * If there is no longer enough space between the entries nogo, and
3649	 * adjust the available space.  Note: this  should only happen if the
3650	 * user has mapped into the stack area after the stack was created,
3651	 * and is probably an error.
3652	 *
3653	 * This also effectively destroys any guard page the user might have
3654	 * intended by limiting the stack size.
3655	 */
3656	if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3657		if (vm_map_lock_upgrade(map))
3658			goto Retry;
3659
3660		stack_entry->avail_ssize = max_grow;
3661
3662		vm_map_unlock(map);
3663		return (KERN_NO_SPACE);
3664	}
3665
3666	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr &&
3667	    addr < (vm_offset_t)p->p_sysent->sv_usrstack) ? 1 : 0;
3668
3669	/*
3670	 * If this is the main process stack, see if we're over the stack
3671	 * limit.
3672	 */
3673	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3674		vm_map_unlock_read(map);
3675		return (KERN_NO_SPACE);
3676	}
3677#ifdef RACCT
3678	if (racct_enable) {
3679		PROC_LOCK(p);
3680		if (is_procstack && racct_set(p, RACCT_STACK,
3681		    ctob(vm->vm_ssize) + grow_amount)) {
3682			PROC_UNLOCK(p);
3683			vm_map_unlock_read(map);
3684			return (KERN_NO_SPACE);
3685		}
3686		PROC_UNLOCK(p);
3687	}
3688#endif
3689
3690	/* Round up the grow amount modulo sgrowsiz */
3691	growsize = sgrowsiz;
3692	grow_amount = roundup(grow_amount, growsize);
3693	if (grow_amount > stack_entry->avail_ssize)
3694		grow_amount = stack_entry->avail_ssize;
3695	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3696		grow_amount = trunc_page((vm_size_t)stacklim) -
3697		    ctob(vm->vm_ssize);
3698	}
3699#ifdef notyet
3700	PROC_LOCK(p);
3701	limit = racct_get_available(p, RACCT_STACK);
3702	PROC_UNLOCK(p);
3703	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3704		grow_amount = limit - ctob(vm->vm_ssize);
3705#endif
3706	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3707		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3708			vm_map_unlock_read(map);
3709			rv = KERN_NO_SPACE;
3710			goto out;
3711		}
3712#ifdef RACCT
3713		if (racct_enable) {
3714			PROC_LOCK(p);
3715			if (racct_set(p, RACCT_MEMLOCK,
3716			    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3717				PROC_UNLOCK(p);
3718				vm_map_unlock_read(map);
3719				rv = KERN_NO_SPACE;
3720				goto out;
3721			}
3722			PROC_UNLOCK(p);
3723		}
3724#endif
3725	}
3726	/* If we would blow our VMEM resource limit, no go */
3727	if (map->size + grow_amount > vmemlim) {
3728		vm_map_unlock_read(map);
3729		rv = KERN_NO_SPACE;
3730		goto out;
3731	}
3732#ifdef RACCT
3733	if (racct_enable) {
3734		PROC_LOCK(p);
3735		if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3736			PROC_UNLOCK(p);
3737			vm_map_unlock_read(map);
3738			rv = KERN_NO_SPACE;
3739			goto out;
3740		}
3741		PROC_UNLOCK(p);
3742	}
3743#endif
3744
3745	if (vm_map_lock_upgrade(map))
3746		goto Retry;
3747
3748	if (stack_entry == next_entry) {
3749		/*
3750		 * Growing downward.
3751		 */
3752		/* Get the preliminary new entry start value */
3753		addr = stack_entry->start - grow_amount;
3754
3755		/*
3756		 * If this puts us into the previous entry, cut back our
3757		 * growth to the available space. Also, see the note above.
3758		 */
3759		if (addr < end) {
3760			stack_entry->avail_ssize = max_grow;
3761			addr = end;
3762			if (stack_guard_page)
3763				addr += PAGE_SIZE;
3764		}
3765
3766		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3767		    next_entry->protection, next_entry->max_protection, 0);
3768
3769		/* Adjust the available stack space by the amount we grew. */
3770		if (rv == KERN_SUCCESS) {
3771			if (prev_entry != &map->header)
3772				vm_map_clip_end(map, prev_entry, addr);
3773			new_entry = prev_entry->next;
3774			KASSERT(new_entry == stack_entry->prev, ("foo"));
3775			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3776			KASSERT(new_entry->start == addr, ("foo"));
3777			grow_amount = new_entry->end - new_entry->start;
3778			new_entry->avail_ssize = stack_entry->avail_ssize -
3779			    grow_amount;
3780			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3781			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3782		}
3783	} else {
3784		/*
3785		 * Growing upward.
3786		 */
3787		addr = stack_entry->end + grow_amount;
3788
3789		/*
3790		 * If this puts us into the next entry, cut back our growth
3791		 * to the available space. Also, see the note above.
3792		 */
3793		if (addr > end) {
3794			stack_entry->avail_ssize = end - stack_entry->end;
3795			addr = end;
3796			if (stack_guard_page)
3797				addr -= PAGE_SIZE;
3798		}
3799
3800		grow_amount = addr - stack_entry->end;
3801		cred = stack_entry->cred;
3802		if (cred == NULL && stack_entry->object.vm_object != NULL)
3803			cred = stack_entry->object.vm_object->cred;
3804		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3805			rv = KERN_NO_SPACE;
3806		/* Grow the underlying object if applicable. */
3807		else if (stack_entry->object.vm_object == NULL ||
3808		    vm_object_coalesce(stack_entry->object.vm_object,
3809		    stack_entry->offset,
3810		    (vm_size_t)(stack_entry->end - stack_entry->start),
3811		    (vm_size_t)grow_amount, cred != NULL)) {
3812			map->size += (addr - stack_entry->end);
3813			/* Update the current entry. */
3814			stack_entry->end = addr;
3815			stack_entry->avail_ssize -= grow_amount;
3816			vm_map_entry_resize_free(map, stack_entry);
3817			rv = KERN_SUCCESS;
3818
3819			if (next_entry != &map->header)
3820				vm_map_clip_start(map, next_entry, addr);
3821		} else
3822			rv = KERN_FAILURE;
3823	}
3824
3825	if (rv == KERN_SUCCESS && is_procstack)
3826		vm->vm_ssize += btoc(grow_amount);
3827
3828	vm_map_unlock(map);
3829
3830	/*
3831	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3832	 */
3833	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3834		vm_map_wire(map,
3835		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3836		    (stack_entry == next_entry) ? stack_entry->start : addr,
3837		    (p->p_flag & P_SYSTEM)
3838		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3839		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3840	}
3841
3842out:
3843#ifdef RACCT
3844	if (racct_enable && rv != KERN_SUCCESS) {
3845		PROC_LOCK(p);
3846		error = racct_set(p, RACCT_VMEM, map->size);
3847		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3848		if (!old_mlock) {
3849			error = racct_set(p, RACCT_MEMLOCK,
3850			    ptoa(pmap_wired_count(map->pmap)));
3851			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3852		}
3853	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3854		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3855		PROC_UNLOCK(p);
3856	}
3857#endif
3858
3859	return (rv);
3860}
3861
3862/*
3863 * Unshare the specified VM space for exec.  If other processes are
3864 * mapped to it, then create a new one.  The new vmspace is null.
3865 */
3866int
3867vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3868{
3869	struct vmspace *oldvmspace = p->p_vmspace;
3870	struct vmspace *newvmspace;
3871
3872	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3873	    ("vmspace_exec recursed"));
3874	newvmspace = vmspace_alloc(minuser, maxuser, NULL);
3875	if (newvmspace == NULL)
3876		return (ENOMEM);
3877	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3878	/*
3879	 * This code is written like this for prototype purposes.  The
3880	 * goal is to avoid running down the vmspace here, but let the
3881	 * other process's that are still using the vmspace to finally
3882	 * run it down.  Even though there is little or no chance of blocking
3883	 * here, it is a good idea to keep this form for future mods.
3884	 */
3885	PROC_VMSPACE_LOCK(p);
3886	p->p_vmspace = newvmspace;
3887	PROC_VMSPACE_UNLOCK(p);
3888	if (p == curthread->td_proc)
3889		pmap_activate(curthread);
3890	curthread->td_pflags |= TDP_EXECVMSPC;
3891	return (0);
3892}
3893
3894/*
3895 * Unshare the specified VM space for forcing COW.  This
3896 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3897 */
3898int
3899vmspace_unshare(struct proc *p)
3900{
3901	struct vmspace *oldvmspace = p->p_vmspace;
3902	struct vmspace *newvmspace;
3903	vm_ooffset_t fork_charge;
3904
3905	if (oldvmspace->vm_refcnt == 1)
3906		return (0);
3907	fork_charge = 0;
3908	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3909	if (newvmspace == NULL)
3910		return (ENOMEM);
3911	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3912		vmspace_free(newvmspace);
3913		return (ENOMEM);
3914	}
3915	PROC_VMSPACE_LOCK(p);
3916	p->p_vmspace = newvmspace;
3917	PROC_VMSPACE_UNLOCK(p);
3918	if (p == curthread->td_proc)
3919		pmap_activate(curthread);
3920	vmspace_free(oldvmspace);
3921	return (0);
3922}
3923
3924/*
3925 *	vm_map_lookup:
3926 *
3927 *	Finds the VM object, offset, and
3928 *	protection for a given virtual address in the
3929 *	specified map, assuming a page fault of the
3930 *	type specified.
3931 *
3932 *	Leaves the map in question locked for read; return
3933 *	values are guaranteed until a vm_map_lookup_done
3934 *	call is performed.  Note that the map argument
3935 *	is in/out; the returned map must be used in
3936 *	the call to vm_map_lookup_done.
3937 *
3938 *	A handle (out_entry) is returned for use in
3939 *	vm_map_lookup_done, to make that fast.
3940 *
3941 *	If a lookup is requested with "write protection"
3942 *	specified, the map may be changed to perform virtual
3943 *	copying operations, although the data referenced will
3944 *	remain the same.
3945 */
3946int
3947vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3948	      vm_offset_t vaddr,
3949	      vm_prot_t fault_typea,
3950	      vm_map_entry_t *out_entry,	/* OUT */
3951	      vm_object_t *object,		/* OUT */
3952	      vm_pindex_t *pindex,		/* OUT */
3953	      vm_prot_t *out_prot,		/* OUT */
3954	      boolean_t *wired)			/* OUT */
3955{
3956	vm_map_entry_t entry;
3957	vm_map_t map = *var_map;
3958	vm_prot_t prot;
3959	vm_prot_t fault_type = fault_typea;
3960	vm_object_t eobject;
3961	vm_size_t size;
3962	struct ucred *cred;
3963
3964RetryLookup:;
3965
3966	vm_map_lock_read(map);
3967
3968	/*
3969	 * Lookup the faulting address.
3970	 */
3971	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3972		vm_map_unlock_read(map);
3973		return (KERN_INVALID_ADDRESS);
3974	}
3975
3976	entry = *out_entry;
3977
3978	/*
3979	 * Handle submaps.
3980	 */
3981	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3982		vm_map_t old_map = map;
3983
3984		*var_map = map = entry->object.sub_map;
3985		vm_map_unlock_read(old_map);
3986		goto RetryLookup;
3987	}
3988
3989	/*
3990	 * Check whether this task is allowed to have this page.
3991	 */
3992	prot = entry->protection;
3993	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3994	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3995		vm_map_unlock_read(map);
3996		return (KERN_PROTECTION_FAILURE);
3997	}
3998	KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
3999	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4000	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4001	    ("entry %p flags %x", entry, entry->eflags));
4002	if ((fault_typea & VM_PROT_COPY) != 0 &&
4003	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
4004	    (entry->eflags & MAP_ENTRY_COW) == 0) {
4005		vm_map_unlock_read(map);
4006		return (KERN_PROTECTION_FAILURE);
4007	}
4008
4009	/*
4010	 * If this page is not pageable, we have to get it for all possible
4011	 * accesses.
4012	 */
4013	*wired = (entry->wired_count != 0);
4014	if (*wired)
4015		fault_type = entry->protection;
4016	size = entry->end - entry->start;
4017	/*
4018	 * If the entry was copy-on-write, we either ...
4019	 */
4020	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4021		/*
4022		 * If we want to write the page, we may as well handle that
4023		 * now since we've got the map locked.
4024		 *
4025		 * If we don't need to write the page, we just demote the
4026		 * permissions allowed.
4027		 */
4028		if ((fault_type & VM_PROT_WRITE) != 0 ||
4029		    (fault_typea & VM_PROT_COPY) != 0) {
4030			/*
4031			 * Make a new object, and place it in the object
4032			 * chain.  Note that no new references have appeared
4033			 * -- one just moved from the map to the new
4034			 * object.
4035			 */
4036			if (vm_map_lock_upgrade(map))
4037				goto RetryLookup;
4038
4039			if (entry->cred == NULL) {
4040				/*
4041				 * The debugger owner is charged for
4042				 * the memory.
4043				 */
4044				cred = curthread->td_ucred;
4045				crhold(cred);
4046				if (!swap_reserve_by_cred(size, cred)) {
4047					crfree(cred);
4048					vm_map_unlock(map);
4049					return (KERN_RESOURCE_SHORTAGE);
4050				}
4051				entry->cred = cred;
4052			}
4053			vm_object_shadow(&entry->object.vm_object,
4054			    &entry->offset, size);
4055			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4056			eobject = entry->object.vm_object;
4057			if (eobject->cred != NULL) {
4058				/*
4059				 * The object was not shadowed.
4060				 */
4061				swap_release_by_cred(size, entry->cred);
4062				crfree(entry->cred);
4063				entry->cred = NULL;
4064			} else if (entry->cred != NULL) {
4065				VM_OBJECT_WLOCK(eobject);
4066				eobject->cred = entry->cred;
4067				eobject->charge = size;
4068				VM_OBJECT_WUNLOCK(eobject);
4069				entry->cred = NULL;
4070			}
4071
4072			vm_map_lock_downgrade(map);
4073		} else {
4074			/*
4075			 * We're attempting to read a copy-on-write page --
4076			 * don't allow writes.
4077			 */
4078			prot &= ~VM_PROT_WRITE;
4079		}
4080	}
4081
4082	/*
4083	 * Create an object if necessary.
4084	 */
4085	if (entry->object.vm_object == NULL &&
4086	    !map->system_map) {
4087		if (vm_map_lock_upgrade(map))
4088			goto RetryLookup;
4089		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4090		    atop(size));
4091		entry->offset = 0;
4092		if (entry->cred != NULL) {
4093			VM_OBJECT_WLOCK(entry->object.vm_object);
4094			entry->object.vm_object->cred = entry->cred;
4095			entry->object.vm_object->charge = size;
4096			VM_OBJECT_WUNLOCK(entry->object.vm_object);
4097			entry->cred = NULL;
4098		}
4099		vm_map_lock_downgrade(map);
4100	}
4101
4102	/*
4103	 * Return the object/offset from this entry.  If the entry was
4104	 * copy-on-write or empty, it has been fixed up.
4105	 */
4106	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4107	*object = entry->object.vm_object;
4108
4109	*out_prot = prot;
4110	return (KERN_SUCCESS);
4111}
4112
4113/*
4114 *	vm_map_lookup_locked:
4115 *
4116 *	Lookup the faulting address.  A version of vm_map_lookup that returns
4117 *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4118 */
4119int
4120vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
4121		     vm_offset_t vaddr,
4122		     vm_prot_t fault_typea,
4123		     vm_map_entry_t *out_entry,	/* OUT */
4124		     vm_object_t *object,	/* OUT */
4125		     vm_pindex_t *pindex,	/* OUT */
4126		     vm_prot_t *out_prot,	/* OUT */
4127		     boolean_t *wired)		/* OUT */
4128{
4129	vm_map_entry_t entry;
4130	vm_map_t map = *var_map;
4131	vm_prot_t prot;
4132	vm_prot_t fault_type = fault_typea;
4133
4134	/*
4135	 * Lookup the faulting address.
4136	 */
4137	if (!vm_map_lookup_entry(map, vaddr, out_entry))
4138		return (KERN_INVALID_ADDRESS);
4139
4140	entry = *out_entry;
4141
4142	/*
4143	 * Fail if the entry refers to a submap.
4144	 */
4145	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4146		return (KERN_FAILURE);
4147
4148	/*
4149	 * Check whether this task is allowed to have this page.
4150	 */
4151	prot = entry->protection;
4152	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4153	if ((fault_type & prot) != fault_type)
4154		return (KERN_PROTECTION_FAILURE);
4155
4156	/*
4157	 * If this page is not pageable, we have to get it for all possible
4158	 * accesses.
4159	 */
4160	*wired = (entry->wired_count != 0);
4161	if (*wired)
4162		fault_type = entry->protection;
4163
4164	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4165		/*
4166		 * Fail if the entry was copy-on-write for a write fault.
4167		 */
4168		if (fault_type & VM_PROT_WRITE)
4169			return (KERN_FAILURE);
4170		/*
4171		 * We're attempting to read a copy-on-write page --
4172		 * don't allow writes.
4173		 */
4174		prot &= ~VM_PROT_WRITE;
4175	}
4176
4177	/*
4178	 * Fail if an object should be created.
4179	 */
4180	if (entry->object.vm_object == NULL && !map->system_map)
4181		return (KERN_FAILURE);
4182
4183	/*
4184	 * Return the object/offset from this entry.  If the entry was
4185	 * copy-on-write or empty, it has been fixed up.
4186	 */
4187	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4188	*object = entry->object.vm_object;
4189
4190	*out_prot = prot;
4191	return (KERN_SUCCESS);
4192}
4193
4194/*
4195 *	vm_map_lookup_done:
4196 *
4197 *	Releases locks acquired by a vm_map_lookup
4198 *	(according to the handle returned by that lookup).
4199 */
4200void
4201vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4202{
4203	/*
4204	 * Unlock the main-level map
4205	 */
4206	vm_map_unlock_read(map);
4207}
4208
4209#include "opt_ddb.h"
4210#ifdef DDB
4211#include <sys/kernel.h>
4212
4213#include <ddb/ddb.h>
4214
4215static void
4216vm_map_print(vm_map_t map)
4217{
4218	vm_map_entry_t entry;
4219
4220	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4221	    (void *)map,
4222	    (void *)map->pmap, map->nentries, map->timestamp);
4223
4224	db_indent += 2;
4225	for (entry = map->header.next; entry != &map->header;
4226	    entry = entry->next) {
4227		db_iprintf("map entry %p: start=%p, end=%p\n",
4228		    (void *)entry, (void *)entry->start, (void *)entry->end);
4229		{
4230			static char *inheritance_name[4] =
4231			{"share", "copy", "none", "donate_copy"};
4232
4233			db_iprintf(" prot=%x/%x/%s",
4234			    entry->protection,
4235			    entry->max_protection,
4236			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4237			if (entry->wired_count != 0)
4238				db_printf(", wired");
4239		}
4240		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4241			db_printf(", share=%p, offset=0x%jx\n",
4242			    (void *)entry->object.sub_map,
4243			    (uintmax_t)entry->offset);
4244			if ((entry->prev == &map->header) ||
4245			    (entry->prev->object.sub_map !=
4246				entry->object.sub_map)) {
4247				db_indent += 2;
4248				vm_map_print((vm_map_t)entry->object.sub_map);
4249				db_indent -= 2;
4250			}
4251		} else {
4252			if (entry->cred != NULL)
4253				db_printf(", ruid %d", entry->cred->cr_ruid);
4254			db_printf(", object=%p, offset=0x%jx",
4255			    (void *)entry->object.vm_object,
4256			    (uintmax_t)entry->offset);
4257			if (entry->object.vm_object && entry->object.vm_object->cred)
4258				db_printf(", obj ruid %d charge %jx",
4259				    entry->object.vm_object->cred->cr_ruid,
4260				    (uintmax_t)entry->object.vm_object->charge);
4261			if (entry->eflags & MAP_ENTRY_COW)
4262				db_printf(", copy (%s)",
4263				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4264			db_printf("\n");
4265
4266			if ((entry->prev == &map->header) ||
4267			    (entry->prev->object.vm_object !=
4268				entry->object.vm_object)) {
4269				db_indent += 2;
4270				vm_object_print((db_expr_t)(intptr_t)
4271						entry->object.vm_object,
4272						0, 0, (char *)0);
4273				db_indent -= 2;
4274			}
4275		}
4276	}
4277	db_indent -= 2;
4278}
4279
4280DB_SHOW_COMMAND(map, map)
4281{
4282
4283	if (!have_addr) {
4284		db_printf("usage: show map <addr>\n");
4285		return;
4286	}
4287	vm_map_print((vm_map_t)addr);
4288}
4289
4290DB_SHOW_COMMAND(procvm, procvm)
4291{
4292	struct proc *p;
4293
4294	if (have_addr) {
4295		p = (struct proc *) addr;
4296	} else {
4297		p = curproc;
4298	}
4299
4300	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4301	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4302	    (void *)vmspace_pmap(p->p_vmspace));
4303
4304	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4305}
4306
4307#endif /* DDB */
4308