vm_map.c revision 267059
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Virtual memory mapping module.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD: stable/10/sys/vm/vm_map.c 267059 2014-06-04 15:18:46Z kib $");
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/vmmeter.h>
76#include <sys/mman.h>
77#include <sys/vnode.h>
78#include <sys/racct.h>
79#include <sys/resourcevar.h>
80#include <sys/rwlock.h>
81#include <sys/file.h>
82#include <sys/sysctl.h>
83#include <sys/sysent.h>
84#include <sys/shm.h>
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_page.h>
91#include <vm/vm_object.h>
92#include <vm/vm_pager.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_extern.h>
95#include <vm/vnode_pager.h>
96#include <vm/swap_pager.h>
97#include <vm/uma.h>
98
99/*
100 *	Virtual memory maps provide for the mapping, protection,
101 *	and sharing of virtual memory objects.  In addition,
102 *	this module provides for an efficient virtual copy of
103 *	memory from one map to another.
104 *
105 *	Synchronization is required prior to most operations.
106 *
107 *	Maps consist of an ordered doubly-linked list of simple
108 *	entries; a self-adjusting binary search tree of these
109 *	entries is used to speed up lookups.
110 *
111 *	Since portions of maps are specified by start/end addresses,
112 *	which may not align with existing map entries, all
113 *	routines merely "clip" entries to these start/end values.
114 *	[That is, an entry is split into two, bordering at a
115 *	start or end value.]  Note that these clippings may not
116 *	always be necessary (as the two resulting entries are then
117 *	not changed); however, the clipping is done for convenience.
118 *
119 *	As mentioned above, virtual copy operations are performed
120 *	by copying VM object references from one map to
121 *	another, and then marking both regions as copy-on-write.
122 */
123
124static struct mtx map_sleep_mtx;
125static uma_zone_t mapentzone;
126static uma_zone_t kmapentzone;
127static uma_zone_t mapzone;
128static uma_zone_t vmspace_zone;
129static int vmspace_zinit(void *mem, int size, int flags);
130static int vm_map_zinit(void *mem, int ize, int flags);
131static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132    vm_offset_t max);
133static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135#ifdef INVARIANTS
136static void vm_map_zdtor(void *mem, int size, void *arg);
137static void vmspace_zdtor(void *mem, int size, void *arg);
138#endif
139
140#define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
141    ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
142     !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
143
144/*
145 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
146 * stable.
147 */
148#define PROC_VMSPACE_LOCK(p) do { } while (0)
149#define PROC_VMSPACE_UNLOCK(p) do { } while (0)
150
151/*
152 *	VM_MAP_RANGE_CHECK:	[ internal use only ]
153 *
154 *	Asserts that the starting and ending region
155 *	addresses fall within the valid range of the map.
156 */
157#define	VM_MAP_RANGE_CHECK(map, start, end)		\
158		{					\
159		if (start < vm_map_min(map))		\
160			start = vm_map_min(map);	\
161		if (end > vm_map_max(map))		\
162			end = vm_map_max(map);		\
163		if (start > end)			\
164			start = end;			\
165		}
166
167/*
168 *	vm_map_startup:
169 *
170 *	Initialize the vm_map module.  Must be called before
171 *	any other vm_map routines.
172 *
173 *	Map and entry structures are allocated from the general
174 *	purpose memory pool with some exceptions:
175 *
176 *	- The kernel map and kmem submap are allocated statically.
177 *	- Kernel map entries are allocated out of a static pool.
178 *
179 *	These restrictions are necessary since malloc() uses the
180 *	maps and requires map entries.
181 */
182
183void
184vm_map_startup(void)
185{
186	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
187	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
188#ifdef INVARIANTS
189	    vm_map_zdtor,
190#else
191	    NULL,
192#endif
193	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
194	uma_prealloc(mapzone, MAX_KMAP);
195	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
196	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
197	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
198	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
199	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
200	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
201#ifdef INVARIANTS
202	    vmspace_zdtor,
203#else
204	    NULL,
205#endif
206	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
207}
208
209static int
210vmspace_zinit(void *mem, int size, int flags)
211{
212	struct vmspace *vm;
213
214	vm = (struct vmspace *)mem;
215
216	vm->vm_map.pmap = NULL;
217	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
218	PMAP_LOCK_INIT(vmspace_pmap(vm));
219	return (0);
220}
221
222static int
223vm_map_zinit(void *mem, int size, int flags)
224{
225	vm_map_t map;
226
227	map = (vm_map_t)mem;
228	memset(map, 0, sizeof(*map));
229	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
230	sx_init(&map->lock, "vm map (user)");
231	return (0);
232}
233
234#ifdef INVARIANTS
235static void
236vmspace_zdtor(void *mem, int size, void *arg)
237{
238	struct vmspace *vm;
239
240	vm = (struct vmspace *)mem;
241
242	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
243}
244static void
245vm_map_zdtor(void *mem, int size, void *arg)
246{
247	vm_map_t map;
248
249	map = (vm_map_t)mem;
250	KASSERT(map->nentries == 0,
251	    ("map %p nentries == %d on free.",
252	    map, map->nentries));
253	KASSERT(map->size == 0,
254	    ("map %p size == %lu on free.",
255	    map, (unsigned long)map->size));
256}
257#endif	/* INVARIANTS */
258
259/*
260 * Allocate a vmspace structure, including a vm_map and pmap,
261 * and initialize those structures.  The refcnt is set to 1.
262 *
263 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
264 */
265struct vmspace *
266vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
267{
268	struct vmspace *vm;
269
270	vm = uma_zalloc(vmspace_zone, M_WAITOK);
271
272	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
273
274	if (pinit == NULL)
275		pinit = &pmap_pinit;
276
277	if (!pinit(vmspace_pmap(vm))) {
278		uma_zfree(vmspace_zone, vm);
279		return (NULL);
280	}
281	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
282	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
283	vm->vm_refcnt = 1;
284	vm->vm_shm = NULL;
285	vm->vm_swrss = 0;
286	vm->vm_tsize = 0;
287	vm->vm_dsize = 0;
288	vm->vm_ssize = 0;
289	vm->vm_taddr = 0;
290	vm->vm_daddr = 0;
291	vm->vm_maxsaddr = 0;
292	return (vm);
293}
294
295static void
296vmspace_container_reset(struct proc *p)
297{
298
299#ifdef RACCT
300	PROC_LOCK(p);
301	racct_set(p, RACCT_DATA, 0);
302	racct_set(p, RACCT_STACK, 0);
303	racct_set(p, RACCT_RSS, 0);
304	racct_set(p, RACCT_MEMLOCK, 0);
305	racct_set(p, RACCT_VMEM, 0);
306	PROC_UNLOCK(p);
307#endif
308}
309
310static inline void
311vmspace_dofree(struct vmspace *vm)
312{
313
314	CTR1(KTR_VM, "vmspace_free: %p", vm);
315
316	/*
317	 * Make sure any SysV shm is freed, it might not have been in
318	 * exit1().
319	 */
320	shmexit(vm);
321
322	/*
323	 * Lock the map, to wait out all other references to it.
324	 * Delete all of the mappings and pages they hold, then call
325	 * the pmap module to reclaim anything left.
326	 */
327	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
328	    vm->vm_map.max_offset);
329
330	pmap_release(vmspace_pmap(vm));
331	vm->vm_map.pmap = NULL;
332	uma_zfree(vmspace_zone, vm);
333}
334
335void
336vmspace_free(struct vmspace *vm)
337{
338
339	if (vm->vm_refcnt == 0)
340		panic("vmspace_free: attempt to free already freed vmspace");
341
342	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
343		vmspace_dofree(vm);
344}
345
346void
347vmspace_exitfree(struct proc *p)
348{
349	struct vmspace *vm;
350
351	PROC_VMSPACE_LOCK(p);
352	vm = p->p_vmspace;
353	p->p_vmspace = NULL;
354	PROC_VMSPACE_UNLOCK(p);
355	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
356	vmspace_free(vm);
357}
358
359void
360vmspace_exit(struct thread *td)
361{
362	int refcnt;
363	struct vmspace *vm;
364	struct proc *p;
365
366	/*
367	 * Release user portion of address space.
368	 * This releases references to vnodes,
369	 * which could cause I/O if the file has been unlinked.
370	 * Need to do this early enough that we can still sleep.
371	 *
372	 * The last exiting process to reach this point releases as
373	 * much of the environment as it can. vmspace_dofree() is the
374	 * slower fallback in case another process had a temporary
375	 * reference to the vmspace.
376	 */
377
378	p = td->td_proc;
379	vm = p->p_vmspace;
380	atomic_add_int(&vmspace0.vm_refcnt, 1);
381	do {
382		refcnt = vm->vm_refcnt;
383		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
384			/* Switch now since other proc might free vmspace */
385			PROC_VMSPACE_LOCK(p);
386			p->p_vmspace = &vmspace0;
387			PROC_VMSPACE_UNLOCK(p);
388			pmap_activate(td);
389		}
390	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
391	if (refcnt == 1) {
392		if (p->p_vmspace != vm) {
393			/* vmspace not yet freed, switch back */
394			PROC_VMSPACE_LOCK(p);
395			p->p_vmspace = vm;
396			PROC_VMSPACE_UNLOCK(p);
397			pmap_activate(td);
398		}
399		pmap_remove_pages(vmspace_pmap(vm));
400		/* Switch now since this proc will free vmspace */
401		PROC_VMSPACE_LOCK(p);
402		p->p_vmspace = &vmspace0;
403		PROC_VMSPACE_UNLOCK(p);
404		pmap_activate(td);
405		vmspace_dofree(vm);
406	}
407	vmspace_container_reset(p);
408}
409
410/* Acquire reference to vmspace owned by another process. */
411
412struct vmspace *
413vmspace_acquire_ref(struct proc *p)
414{
415	struct vmspace *vm;
416	int refcnt;
417
418	PROC_VMSPACE_LOCK(p);
419	vm = p->p_vmspace;
420	if (vm == NULL) {
421		PROC_VMSPACE_UNLOCK(p);
422		return (NULL);
423	}
424	do {
425		refcnt = vm->vm_refcnt;
426		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
427			PROC_VMSPACE_UNLOCK(p);
428			return (NULL);
429		}
430	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
431	if (vm != p->p_vmspace) {
432		PROC_VMSPACE_UNLOCK(p);
433		vmspace_free(vm);
434		return (NULL);
435	}
436	PROC_VMSPACE_UNLOCK(p);
437	return (vm);
438}
439
440void
441_vm_map_lock(vm_map_t map, const char *file, int line)
442{
443
444	if (map->system_map)
445		mtx_lock_flags_(&map->system_mtx, 0, file, line);
446	else
447		sx_xlock_(&map->lock, file, line);
448	map->timestamp++;
449}
450
451static void
452vm_map_process_deferred(void)
453{
454	struct thread *td;
455	vm_map_entry_t entry, next;
456	vm_object_t object;
457
458	td = curthread;
459	entry = td->td_map_def_user;
460	td->td_map_def_user = NULL;
461	while (entry != NULL) {
462		next = entry->next;
463		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
464			/*
465			 * Decrement the object's writemappings and
466			 * possibly the vnode's v_writecount.
467			 */
468			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
469			    ("Submap with writecount"));
470			object = entry->object.vm_object;
471			KASSERT(object != NULL, ("No object for writecount"));
472			vnode_pager_release_writecount(object, entry->start,
473			    entry->end);
474		}
475		vm_map_entry_deallocate(entry, FALSE);
476		entry = next;
477	}
478}
479
480void
481_vm_map_unlock(vm_map_t map, const char *file, int line)
482{
483
484	if (map->system_map)
485		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
486	else {
487		sx_xunlock_(&map->lock, file, line);
488		vm_map_process_deferred();
489	}
490}
491
492void
493_vm_map_lock_read(vm_map_t map, const char *file, int line)
494{
495
496	if (map->system_map)
497		mtx_lock_flags_(&map->system_mtx, 0, file, line);
498	else
499		sx_slock_(&map->lock, file, line);
500}
501
502void
503_vm_map_unlock_read(vm_map_t map, const char *file, int line)
504{
505
506	if (map->system_map)
507		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
508	else {
509		sx_sunlock_(&map->lock, file, line);
510		vm_map_process_deferred();
511	}
512}
513
514int
515_vm_map_trylock(vm_map_t map, const char *file, int line)
516{
517	int error;
518
519	error = map->system_map ?
520	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
521	    !sx_try_xlock_(&map->lock, file, line);
522	if (error == 0)
523		map->timestamp++;
524	return (error == 0);
525}
526
527int
528_vm_map_trylock_read(vm_map_t map, const char *file, int line)
529{
530	int error;
531
532	error = map->system_map ?
533	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
534	    !sx_try_slock_(&map->lock, file, line);
535	return (error == 0);
536}
537
538/*
539 *	_vm_map_lock_upgrade:	[ internal use only ]
540 *
541 *	Tries to upgrade a read (shared) lock on the specified map to a write
542 *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
543 *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
544 *	returned without a read or write lock held.
545 *
546 *	Requires that the map be read locked.
547 */
548int
549_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
550{
551	unsigned int last_timestamp;
552
553	if (map->system_map) {
554		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
555	} else {
556		if (!sx_try_upgrade_(&map->lock, file, line)) {
557			last_timestamp = map->timestamp;
558			sx_sunlock_(&map->lock, file, line);
559			vm_map_process_deferred();
560			/*
561			 * If the map's timestamp does not change while the
562			 * map is unlocked, then the upgrade succeeds.
563			 */
564			sx_xlock_(&map->lock, file, line);
565			if (last_timestamp != map->timestamp) {
566				sx_xunlock_(&map->lock, file, line);
567				return (1);
568			}
569		}
570	}
571	map->timestamp++;
572	return (0);
573}
574
575void
576_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
577{
578
579	if (map->system_map) {
580		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
581	} else
582		sx_downgrade_(&map->lock, file, line);
583}
584
585/*
586 *	vm_map_locked:
587 *
588 *	Returns a non-zero value if the caller holds a write (exclusive) lock
589 *	on the specified map and the value "0" otherwise.
590 */
591int
592vm_map_locked(vm_map_t map)
593{
594
595	if (map->system_map)
596		return (mtx_owned(&map->system_mtx));
597	else
598		return (sx_xlocked(&map->lock));
599}
600
601#ifdef INVARIANTS
602static void
603_vm_map_assert_locked(vm_map_t map, const char *file, int line)
604{
605
606	if (map->system_map)
607		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
608	else
609		sx_assert_(&map->lock, SA_XLOCKED, file, line);
610}
611
612#define	VM_MAP_ASSERT_LOCKED(map) \
613    _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
614#else
615#define	VM_MAP_ASSERT_LOCKED(map)
616#endif
617
618/*
619 *	_vm_map_unlock_and_wait:
620 *
621 *	Atomically releases the lock on the specified map and puts the calling
622 *	thread to sleep.  The calling thread will remain asleep until either
623 *	vm_map_wakeup() is performed on the map or the specified timeout is
624 *	exceeded.
625 *
626 *	WARNING!  This function does not perform deferred deallocations of
627 *	objects and map	entries.  Therefore, the calling thread is expected to
628 *	reacquire the map lock after reawakening and later perform an ordinary
629 *	unlock operation, such as vm_map_unlock(), before completing its
630 *	operation on the map.
631 */
632int
633_vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
634{
635
636	mtx_lock(&map_sleep_mtx);
637	if (map->system_map)
638		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
639	else
640		sx_xunlock_(&map->lock, file, line);
641	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
642	    timo));
643}
644
645/*
646 *	vm_map_wakeup:
647 *
648 *	Awaken any threads that have slept on the map using
649 *	vm_map_unlock_and_wait().
650 */
651void
652vm_map_wakeup(vm_map_t map)
653{
654
655	/*
656	 * Acquire and release map_sleep_mtx to prevent a wakeup()
657	 * from being performed (and lost) between the map unlock
658	 * and the msleep() in _vm_map_unlock_and_wait().
659	 */
660	mtx_lock(&map_sleep_mtx);
661	mtx_unlock(&map_sleep_mtx);
662	wakeup(&map->root);
663}
664
665void
666vm_map_busy(vm_map_t map)
667{
668
669	VM_MAP_ASSERT_LOCKED(map);
670	map->busy++;
671}
672
673void
674vm_map_unbusy(vm_map_t map)
675{
676
677	VM_MAP_ASSERT_LOCKED(map);
678	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
679	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
680		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
681		wakeup(&map->busy);
682	}
683}
684
685void
686vm_map_wait_busy(vm_map_t map)
687{
688
689	VM_MAP_ASSERT_LOCKED(map);
690	while (map->busy) {
691		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
692		if (map->system_map)
693			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
694		else
695			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
696	}
697	map->timestamp++;
698}
699
700long
701vmspace_resident_count(struct vmspace *vmspace)
702{
703	return pmap_resident_count(vmspace_pmap(vmspace));
704}
705
706/*
707 *	vm_map_create:
708 *
709 *	Creates and returns a new empty VM map with
710 *	the given physical map structure, and having
711 *	the given lower and upper address bounds.
712 */
713vm_map_t
714vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
715{
716	vm_map_t result;
717
718	result = uma_zalloc(mapzone, M_WAITOK);
719	CTR1(KTR_VM, "vm_map_create: %p", result);
720	_vm_map_init(result, pmap, min, max);
721	return (result);
722}
723
724/*
725 * Initialize an existing vm_map structure
726 * such as that in the vmspace structure.
727 */
728static void
729_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
730{
731
732	map->header.next = map->header.prev = &map->header;
733	map->needs_wakeup = FALSE;
734	map->system_map = 0;
735	map->pmap = pmap;
736	map->min_offset = min;
737	map->max_offset = max;
738	map->flags = 0;
739	map->root = NULL;
740	map->timestamp = 0;
741	map->busy = 0;
742}
743
744void
745vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
746{
747
748	_vm_map_init(map, pmap, min, max);
749	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
750	sx_init(&map->lock, "user map");
751}
752
753/*
754 *	vm_map_entry_dispose:	[ internal use only ]
755 *
756 *	Inverse of vm_map_entry_create.
757 */
758static void
759vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
760{
761	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
762}
763
764/*
765 *	vm_map_entry_create:	[ internal use only ]
766 *
767 *	Allocates a VM map entry for insertion.
768 *	No entry fields are filled in.
769 */
770static vm_map_entry_t
771vm_map_entry_create(vm_map_t map)
772{
773	vm_map_entry_t new_entry;
774
775	if (map->system_map)
776		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
777	else
778		new_entry = uma_zalloc(mapentzone, M_WAITOK);
779	if (new_entry == NULL)
780		panic("vm_map_entry_create: kernel resources exhausted");
781	return (new_entry);
782}
783
784/*
785 *	vm_map_entry_set_behavior:
786 *
787 *	Set the expected access behavior, either normal, random, or
788 *	sequential.
789 */
790static inline void
791vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
792{
793	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
794	    (behavior & MAP_ENTRY_BEHAV_MASK);
795}
796
797/*
798 *	vm_map_entry_set_max_free:
799 *
800 *	Set the max_free field in a vm_map_entry.
801 */
802static inline void
803vm_map_entry_set_max_free(vm_map_entry_t entry)
804{
805
806	entry->max_free = entry->adj_free;
807	if (entry->left != NULL && entry->left->max_free > entry->max_free)
808		entry->max_free = entry->left->max_free;
809	if (entry->right != NULL && entry->right->max_free > entry->max_free)
810		entry->max_free = entry->right->max_free;
811}
812
813/*
814 *	vm_map_entry_splay:
815 *
816 *	The Sleator and Tarjan top-down splay algorithm with the
817 *	following variation.  Max_free must be computed bottom-up, so
818 *	on the downward pass, maintain the left and right spines in
819 *	reverse order.  Then, make a second pass up each side to fix
820 *	the pointers and compute max_free.  The time bound is O(log n)
821 *	amortized.
822 *
823 *	The new root is the vm_map_entry containing "addr", or else an
824 *	adjacent entry (lower or higher) if addr is not in the tree.
825 *
826 *	The map must be locked, and leaves it so.
827 *
828 *	Returns: the new root.
829 */
830static vm_map_entry_t
831vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
832{
833	vm_map_entry_t llist, rlist;
834	vm_map_entry_t ltree, rtree;
835	vm_map_entry_t y;
836
837	/* Special case of empty tree. */
838	if (root == NULL)
839		return (root);
840
841	/*
842	 * Pass One: Splay down the tree until we find addr or a NULL
843	 * pointer where addr would go.  llist and rlist are the two
844	 * sides in reverse order (bottom-up), with llist linked by
845	 * the right pointer and rlist linked by the left pointer in
846	 * the vm_map_entry.  Wait until Pass Two to set max_free on
847	 * the two spines.
848	 */
849	llist = NULL;
850	rlist = NULL;
851	for (;;) {
852		/* root is never NULL in here. */
853		if (addr < root->start) {
854			y = root->left;
855			if (y == NULL)
856				break;
857			if (addr < y->start && y->left != NULL) {
858				/* Rotate right and put y on rlist. */
859				root->left = y->right;
860				y->right = root;
861				vm_map_entry_set_max_free(root);
862				root = y->left;
863				y->left = rlist;
864				rlist = y;
865			} else {
866				/* Put root on rlist. */
867				root->left = rlist;
868				rlist = root;
869				root = y;
870			}
871		} else if (addr >= root->end) {
872			y = root->right;
873			if (y == NULL)
874				break;
875			if (addr >= y->end && y->right != NULL) {
876				/* Rotate left and put y on llist. */
877				root->right = y->left;
878				y->left = root;
879				vm_map_entry_set_max_free(root);
880				root = y->right;
881				y->right = llist;
882				llist = y;
883			} else {
884				/* Put root on llist. */
885				root->right = llist;
886				llist = root;
887				root = y;
888			}
889		} else
890			break;
891	}
892
893	/*
894	 * Pass Two: Walk back up the two spines, flip the pointers
895	 * and set max_free.  The subtrees of the root go at the
896	 * bottom of llist and rlist.
897	 */
898	ltree = root->left;
899	while (llist != NULL) {
900		y = llist->right;
901		llist->right = ltree;
902		vm_map_entry_set_max_free(llist);
903		ltree = llist;
904		llist = y;
905	}
906	rtree = root->right;
907	while (rlist != NULL) {
908		y = rlist->left;
909		rlist->left = rtree;
910		vm_map_entry_set_max_free(rlist);
911		rtree = rlist;
912		rlist = y;
913	}
914
915	/*
916	 * Final assembly: add ltree and rtree as subtrees of root.
917	 */
918	root->left = ltree;
919	root->right = rtree;
920	vm_map_entry_set_max_free(root);
921
922	return (root);
923}
924
925/*
926 *	vm_map_entry_{un,}link:
927 *
928 *	Insert/remove entries from maps.
929 */
930static void
931vm_map_entry_link(vm_map_t map,
932		  vm_map_entry_t after_where,
933		  vm_map_entry_t entry)
934{
935
936	CTR4(KTR_VM,
937	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
938	    map->nentries, entry, after_where);
939	VM_MAP_ASSERT_LOCKED(map);
940	map->nentries++;
941	entry->prev = after_where;
942	entry->next = after_where->next;
943	entry->next->prev = entry;
944	after_where->next = entry;
945
946	if (after_where != &map->header) {
947		if (after_where != map->root)
948			vm_map_entry_splay(after_where->start, map->root);
949		entry->right = after_where->right;
950		entry->left = after_where;
951		after_where->right = NULL;
952		after_where->adj_free = entry->start - after_where->end;
953		vm_map_entry_set_max_free(after_where);
954	} else {
955		entry->right = map->root;
956		entry->left = NULL;
957	}
958	entry->adj_free = (entry->next == &map->header ? map->max_offset :
959	    entry->next->start) - entry->end;
960	vm_map_entry_set_max_free(entry);
961	map->root = entry;
962}
963
964static void
965vm_map_entry_unlink(vm_map_t map,
966		    vm_map_entry_t entry)
967{
968	vm_map_entry_t next, prev, root;
969
970	VM_MAP_ASSERT_LOCKED(map);
971	if (entry != map->root)
972		vm_map_entry_splay(entry->start, map->root);
973	if (entry->left == NULL)
974		root = entry->right;
975	else {
976		root = vm_map_entry_splay(entry->start, entry->left);
977		root->right = entry->right;
978		root->adj_free = (entry->next == &map->header ? map->max_offset :
979		    entry->next->start) - root->end;
980		vm_map_entry_set_max_free(root);
981	}
982	map->root = root;
983
984	prev = entry->prev;
985	next = entry->next;
986	next->prev = prev;
987	prev->next = next;
988	map->nentries--;
989	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
990	    map->nentries, entry);
991}
992
993/*
994 *	vm_map_entry_resize_free:
995 *
996 *	Recompute the amount of free space following a vm_map_entry
997 *	and propagate that value up the tree.  Call this function after
998 *	resizing a map entry in-place, that is, without a call to
999 *	vm_map_entry_link() or _unlink().
1000 *
1001 *	The map must be locked, and leaves it so.
1002 */
1003static void
1004vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1005{
1006
1007	/*
1008	 * Using splay trees without parent pointers, propagating
1009	 * max_free up the tree is done by moving the entry to the
1010	 * root and making the change there.
1011	 */
1012	if (entry != map->root)
1013		map->root = vm_map_entry_splay(entry->start, map->root);
1014
1015	entry->adj_free = (entry->next == &map->header ? map->max_offset :
1016	    entry->next->start) - entry->end;
1017	vm_map_entry_set_max_free(entry);
1018}
1019
1020/*
1021 *	vm_map_lookup_entry:	[ internal use only ]
1022 *
1023 *	Finds the map entry containing (or
1024 *	immediately preceding) the specified address
1025 *	in the given map; the entry is returned
1026 *	in the "entry" parameter.  The boolean
1027 *	result indicates whether the address is
1028 *	actually contained in the map.
1029 */
1030boolean_t
1031vm_map_lookup_entry(
1032	vm_map_t map,
1033	vm_offset_t address,
1034	vm_map_entry_t *entry)	/* OUT */
1035{
1036	vm_map_entry_t cur;
1037	boolean_t locked;
1038
1039	/*
1040	 * If the map is empty, then the map entry immediately preceding
1041	 * "address" is the map's header.
1042	 */
1043	cur = map->root;
1044	if (cur == NULL)
1045		*entry = &map->header;
1046	else if (address >= cur->start && cur->end > address) {
1047		*entry = cur;
1048		return (TRUE);
1049	} else if ((locked = vm_map_locked(map)) ||
1050	    sx_try_upgrade(&map->lock)) {
1051		/*
1052		 * Splay requires a write lock on the map.  However, it only
1053		 * restructures the binary search tree; it does not otherwise
1054		 * change the map.  Thus, the map's timestamp need not change
1055		 * on a temporary upgrade.
1056		 */
1057		map->root = cur = vm_map_entry_splay(address, cur);
1058		if (!locked)
1059			sx_downgrade(&map->lock);
1060
1061		/*
1062		 * If "address" is contained within a map entry, the new root
1063		 * is that map entry.  Otherwise, the new root is a map entry
1064		 * immediately before or after "address".
1065		 */
1066		if (address >= cur->start) {
1067			*entry = cur;
1068			if (cur->end > address)
1069				return (TRUE);
1070		} else
1071			*entry = cur->prev;
1072	} else
1073		/*
1074		 * Since the map is only locked for read access, perform a
1075		 * standard binary search tree lookup for "address".
1076		 */
1077		for (;;) {
1078			if (address < cur->start) {
1079				if (cur->left == NULL) {
1080					*entry = cur->prev;
1081					break;
1082				}
1083				cur = cur->left;
1084			} else if (cur->end > address) {
1085				*entry = cur;
1086				return (TRUE);
1087			} else {
1088				if (cur->right == NULL) {
1089					*entry = cur;
1090					break;
1091				}
1092				cur = cur->right;
1093			}
1094		}
1095	return (FALSE);
1096}
1097
1098/*
1099 *	vm_map_insert:
1100 *
1101 *	Inserts the given whole VM object into the target
1102 *	map at the specified address range.  The object's
1103 *	size should match that of the address range.
1104 *
1105 *	Requires that the map be locked, and leaves it so.
1106 *
1107 *	If object is non-NULL, ref count must be bumped by caller
1108 *	prior to making call to account for the new entry.
1109 */
1110int
1111vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1112	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1113	      int cow)
1114{
1115	vm_map_entry_t new_entry;
1116	vm_map_entry_t prev_entry;
1117	vm_map_entry_t temp_entry;
1118	vm_eflags_t protoeflags;
1119	struct ucred *cred;
1120	vm_inherit_t inheritance;
1121	boolean_t charge_prev_obj;
1122
1123	VM_MAP_ASSERT_LOCKED(map);
1124
1125	/*
1126	 * Check that the start and end points are not bogus.
1127	 */
1128	if ((start < map->min_offset) || (end > map->max_offset) ||
1129	    (start >= end))
1130		return (KERN_INVALID_ADDRESS);
1131
1132	/*
1133	 * Find the entry prior to the proposed starting address; if it's part
1134	 * of an existing entry, this range is bogus.
1135	 */
1136	if (vm_map_lookup_entry(map, start, &temp_entry))
1137		return (KERN_NO_SPACE);
1138
1139	prev_entry = temp_entry;
1140
1141	/*
1142	 * Assert that the next entry doesn't overlap the end point.
1143	 */
1144	if ((prev_entry->next != &map->header) &&
1145	    (prev_entry->next->start < end))
1146		return (KERN_NO_SPACE);
1147
1148	protoeflags = 0;
1149	charge_prev_obj = FALSE;
1150
1151	if (cow & MAP_COPY_ON_WRITE)
1152		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1153
1154	if (cow & MAP_NOFAULT) {
1155		protoeflags |= MAP_ENTRY_NOFAULT;
1156
1157		KASSERT(object == NULL,
1158			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1159	}
1160	if (cow & MAP_DISABLE_SYNCER)
1161		protoeflags |= MAP_ENTRY_NOSYNC;
1162	if (cow & MAP_DISABLE_COREDUMP)
1163		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1164	if (cow & MAP_VN_WRITECOUNT)
1165		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1166	if (cow & MAP_INHERIT_SHARE)
1167		inheritance = VM_INHERIT_SHARE;
1168	else
1169		inheritance = VM_INHERIT_DEFAULT;
1170
1171	cred = NULL;
1172	KASSERT((object != kmem_object && object != kernel_object) ||
1173	    ((object == kmem_object || object == kernel_object) &&
1174		!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1175	    ("kmem or kernel object and cow"));
1176	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1177		goto charged;
1178	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1179	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1180		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1181			return (KERN_RESOURCE_SHORTAGE);
1182		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1183		    object->cred == NULL,
1184		    ("OVERCOMMIT: vm_map_insert o %p", object));
1185		cred = curthread->td_ucred;
1186		crhold(cred);
1187		if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1188			charge_prev_obj = TRUE;
1189	}
1190
1191charged:
1192	/* Expand the kernel pmap, if necessary. */
1193	if (map == kernel_map && end > kernel_vm_end)
1194		pmap_growkernel(end);
1195	if (object != NULL) {
1196		/*
1197		 * OBJ_ONEMAPPING must be cleared unless this mapping
1198		 * is trivially proven to be the only mapping for any
1199		 * of the object's pages.  (Object granularity
1200		 * reference counting is insufficient to recognize
1201		 * aliases with precision.)
1202		 */
1203		VM_OBJECT_WLOCK(object);
1204		if (object->ref_count > 1 || object->shadow_count != 0)
1205			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1206		VM_OBJECT_WUNLOCK(object);
1207	}
1208	else if ((prev_entry != &map->header) &&
1209		 (prev_entry->eflags == protoeflags) &&
1210		 (cow & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) == 0 &&
1211		 (prev_entry->end == start) &&
1212		 (prev_entry->wired_count == 0) &&
1213		 (prev_entry->cred == cred ||
1214		  (prev_entry->object.vm_object != NULL &&
1215		   (prev_entry->object.vm_object->cred == cred))) &&
1216		   vm_object_coalesce(prev_entry->object.vm_object,
1217		       prev_entry->offset,
1218		       (vm_size_t)(prev_entry->end - prev_entry->start),
1219		       (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1220		/*
1221		 * We were able to extend the object.  Determine if we
1222		 * can extend the previous map entry to include the
1223		 * new range as well.
1224		 */
1225		if ((prev_entry->inheritance == inheritance) &&
1226		    (prev_entry->protection == prot) &&
1227		    (prev_entry->max_protection == max)) {
1228			map->size += (end - prev_entry->end);
1229			prev_entry->end = end;
1230			vm_map_entry_resize_free(map, prev_entry);
1231			vm_map_simplify_entry(map, prev_entry);
1232			if (cred != NULL)
1233				crfree(cred);
1234			return (KERN_SUCCESS);
1235		}
1236
1237		/*
1238		 * If we can extend the object but cannot extend the
1239		 * map entry, we have to create a new map entry.  We
1240		 * must bump the ref count on the extended object to
1241		 * account for it.  object may be NULL.
1242		 */
1243		object = prev_entry->object.vm_object;
1244		offset = prev_entry->offset +
1245			(prev_entry->end - prev_entry->start);
1246		vm_object_reference(object);
1247		if (cred != NULL && object != NULL && object->cred != NULL &&
1248		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1249			/* Object already accounts for this uid. */
1250			crfree(cred);
1251			cred = NULL;
1252		}
1253	}
1254
1255	/*
1256	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1257	 * in things like the buffer map where we manage kva but do not manage
1258	 * backing objects.
1259	 */
1260
1261	/*
1262	 * Create a new entry
1263	 */
1264	new_entry = vm_map_entry_create(map);
1265	new_entry->start = start;
1266	new_entry->end = end;
1267	new_entry->cred = NULL;
1268
1269	new_entry->eflags = protoeflags;
1270	new_entry->object.vm_object = object;
1271	new_entry->offset = offset;
1272	new_entry->avail_ssize = 0;
1273
1274	new_entry->inheritance = inheritance;
1275	new_entry->protection = prot;
1276	new_entry->max_protection = max;
1277	new_entry->wired_count = 0;
1278	new_entry->wiring_thread = NULL;
1279	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1280	new_entry->next_read = OFF_TO_IDX(offset);
1281
1282	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1283	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1284	new_entry->cred = cred;
1285
1286	/*
1287	 * Insert the new entry into the list
1288	 */
1289	vm_map_entry_link(map, prev_entry, new_entry);
1290	map->size += new_entry->end - new_entry->start;
1291
1292	/*
1293	 * It may be possible to merge the new entry with the next and/or
1294	 * previous entries.  However, due to MAP_STACK_* being a hack, a
1295	 * panic can result from merging such entries.
1296	 */
1297	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1298		vm_map_simplify_entry(map, new_entry);
1299
1300	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1301		vm_map_pmap_enter(map, start, prot,
1302				    object, OFF_TO_IDX(offset), end - start,
1303				    cow & MAP_PREFAULT_PARTIAL);
1304	}
1305
1306	return (KERN_SUCCESS);
1307}
1308
1309/*
1310 *	vm_map_findspace:
1311 *
1312 *	Find the first fit (lowest VM address) for "length" free bytes
1313 *	beginning at address >= start in the given map.
1314 *
1315 *	In a vm_map_entry, "adj_free" is the amount of free space
1316 *	adjacent (higher address) to this entry, and "max_free" is the
1317 *	maximum amount of contiguous free space in its subtree.  This
1318 *	allows finding a free region in one path down the tree, so
1319 *	O(log n) amortized with splay trees.
1320 *
1321 *	The map must be locked, and leaves it so.
1322 *
1323 *	Returns: 0 on success, and starting address in *addr,
1324 *		 1 if insufficient space.
1325 */
1326int
1327vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1328    vm_offset_t *addr)	/* OUT */
1329{
1330	vm_map_entry_t entry;
1331	vm_offset_t st;
1332
1333	/*
1334	 * Request must fit within min/max VM address and must avoid
1335	 * address wrap.
1336	 */
1337	if (start < map->min_offset)
1338		start = map->min_offset;
1339	if (start + length > map->max_offset || start + length < start)
1340		return (1);
1341
1342	/* Empty tree means wide open address space. */
1343	if (map->root == NULL) {
1344		*addr = start;
1345		return (0);
1346	}
1347
1348	/*
1349	 * After splay, if start comes before root node, then there
1350	 * must be a gap from start to the root.
1351	 */
1352	map->root = vm_map_entry_splay(start, map->root);
1353	if (start + length <= map->root->start) {
1354		*addr = start;
1355		return (0);
1356	}
1357
1358	/*
1359	 * Root is the last node that might begin its gap before
1360	 * start, and this is the last comparison where address
1361	 * wrap might be a problem.
1362	 */
1363	st = (start > map->root->end) ? start : map->root->end;
1364	if (length <= map->root->end + map->root->adj_free - st) {
1365		*addr = st;
1366		return (0);
1367	}
1368
1369	/* With max_free, can immediately tell if no solution. */
1370	entry = map->root->right;
1371	if (entry == NULL || length > entry->max_free)
1372		return (1);
1373
1374	/*
1375	 * Search the right subtree in the order: left subtree, root,
1376	 * right subtree (first fit).  The previous splay implies that
1377	 * all regions in the right subtree have addresses > start.
1378	 */
1379	while (entry != NULL) {
1380		if (entry->left != NULL && entry->left->max_free >= length)
1381			entry = entry->left;
1382		else if (entry->adj_free >= length) {
1383			*addr = entry->end;
1384			return (0);
1385		} else
1386			entry = entry->right;
1387	}
1388
1389	/* Can't get here, so panic if we do. */
1390	panic("vm_map_findspace: max_free corrupt");
1391}
1392
1393int
1394vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1395    vm_offset_t start, vm_size_t length, vm_prot_t prot,
1396    vm_prot_t max, int cow)
1397{
1398	vm_offset_t end;
1399	int result;
1400
1401	end = start + length;
1402	vm_map_lock(map);
1403	VM_MAP_RANGE_CHECK(map, start, end);
1404	(void) vm_map_delete(map, start, end);
1405	result = vm_map_insert(map, object, offset, start, end, prot,
1406	    max, cow);
1407	vm_map_unlock(map);
1408	return (result);
1409}
1410
1411/*
1412 *	vm_map_find finds an unallocated region in the target address
1413 *	map with the given length.  The search is defined to be
1414 *	first-fit from the specified address; the region found is
1415 *	returned in the same parameter.
1416 *
1417 *	If object is non-NULL, ref count must be bumped by caller
1418 *	prior to making call to account for the new entry.
1419 */
1420int
1421vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1422	    vm_offset_t *addr,	/* IN/OUT */
1423	    vm_size_t length, vm_offset_t max_addr, int find_space,
1424	    vm_prot_t prot, vm_prot_t max, int cow)
1425{
1426	vm_offset_t alignment, initial_addr, start;
1427	int result;
1428
1429	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1430	    (object->flags & OBJ_COLORED) == 0))
1431		find_space = VMFS_ANY_SPACE;
1432	if (find_space >> 8 != 0) {
1433		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1434		alignment = (vm_offset_t)1 << (find_space >> 8);
1435	} else
1436		alignment = 0;
1437	initial_addr = *addr;
1438again:
1439	start = initial_addr;
1440	vm_map_lock(map);
1441	do {
1442		if (find_space != VMFS_NO_SPACE) {
1443			if (vm_map_findspace(map, start, length, addr) ||
1444			    (max_addr != 0 && *addr + length > max_addr)) {
1445				vm_map_unlock(map);
1446				if (find_space == VMFS_OPTIMAL_SPACE) {
1447					find_space = VMFS_ANY_SPACE;
1448					goto again;
1449				}
1450				return (KERN_NO_SPACE);
1451			}
1452			switch (find_space) {
1453			case VMFS_SUPER_SPACE:
1454			case VMFS_OPTIMAL_SPACE:
1455				pmap_align_superpage(object, offset, addr,
1456				    length);
1457				break;
1458			case VMFS_ANY_SPACE:
1459				break;
1460			default:
1461				if ((*addr & (alignment - 1)) != 0) {
1462					*addr &= ~(alignment - 1);
1463					*addr += alignment;
1464				}
1465				break;
1466			}
1467
1468			start = *addr;
1469		}
1470		result = vm_map_insert(map, object, offset, start, start +
1471		    length, prot, max, cow);
1472	} while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1473	    find_space != VMFS_ANY_SPACE);
1474	vm_map_unlock(map);
1475	return (result);
1476}
1477
1478/*
1479 *	vm_map_simplify_entry:
1480 *
1481 *	Simplify the given map entry by merging with either neighbor.  This
1482 *	routine also has the ability to merge with both neighbors.
1483 *
1484 *	The map must be locked.
1485 *
1486 *	This routine guarentees that the passed entry remains valid (though
1487 *	possibly extended).  When merging, this routine may delete one or
1488 *	both neighbors.
1489 */
1490void
1491vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1492{
1493	vm_map_entry_t next, prev;
1494	vm_size_t prevsize, esize;
1495
1496	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1497		return;
1498
1499	prev = entry->prev;
1500	if (prev != &map->header) {
1501		prevsize = prev->end - prev->start;
1502		if ( (prev->end == entry->start) &&
1503		     (prev->object.vm_object == entry->object.vm_object) &&
1504		     (!prev->object.vm_object ||
1505			(prev->offset + prevsize == entry->offset)) &&
1506		     (prev->eflags == entry->eflags) &&
1507		     (prev->protection == entry->protection) &&
1508		     (prev->max_protection == entry->max_protection) &&
1509		     (prev->inheritance == entry->inheritance) &&
1510		     (prev->wired_count == entry->wired_count) &&
1511		     (prev->cred == entry->cred)) {
1512			vm_map_entry_unlink(map, prev);
1513			entry->start = prev->start;
1514			entry->offset = prev->offset;
1515			if (entry->prev != &map->header)
1516				vm_map_entry_resize_free(map, entry->prev);
1517
1518			/*
1519			 * If the backing object is a vnode object,
1520			 * vm_object_deallocate() calls vrele().
1521			 * However, vrele() does not lock the vnode
1522			 * because the vnode has additional
1523			 * references.  Thus, the map lock can be kept
1524			 * without causing a lock-order reversal with
1525			 * the vnode lock.
1526			 *
1527			 * Since we count the number of virtual page
1528			 * mappings in object->un_pager.vnp.writemappings,
1529			 * the writemappings value should not be adjusted
1530			 * when the entry is disposed of.
1531			 */
1532			if (prev->object.vm_object)
1533				vm_object_deallocate(prev->object.vm_object);
1534			if (prev->cred != NULL)
1535				crfree(prev->cred);
1536			vm_map_entry_dispose(map, prev);
1537		}
1538	}
1539
1540	next = entry->next;
1541	if (next != &map->header) {
1542		esize = entry->end - entry->start;
1543		if ((entry->end == next->start) &&
1544		    (next->object.vm_object == entry->object.vm_object) &&
1545		     (!entry->object.vm_object ||
1546			(entry->offset + esize == next->offset)) &&
1547		    (next->eflags == entry->eflags) &&
1548		    (next->protection == entry->protection) &&
1549		    (next->max_protection == entry->max_protection) &&
1550		    (next->inheritance == entry->inheritance) &&
1551		    (next->wired_count == entry->wired_count) &&
1552		    (next->cred == entry->cred)) {
1553			vm_map_entry_unlink(map, next);
1554			entry->end = next->end;
1555			vm_map_entry_resize_free(map, entry);
1556
1557			/*
1558			 * See comment above.
1559			 */
1560			if (next->object.vm_object)
1561				vm_object_deallocate(next->object.vm_object);
1562			if (next->cred != NULL)
1563				crfree(next->cred);
1564			vm_map_entry_dispose(map, next);
1565		}
1566	}
1567}
1568/*
1569 *	vm_map_clip_start:	[ internal use only ]
1570 *
1571 *	Asserts that the given entry begins at or after
1572 *	the specified address; if necessary,
1573 *	it splits the entry into two.
1574 */
1575#define vm_map_clip_start(map, entry, startaddr) \
1576{ \
1577	if (startaddr > entry->start) \
1578		_vm_map_clip_start(map, entry, startaddr); \
1579}
1580
1581/*
1582 *	This routine is called only when it is known that
1583 *	the entry must be split.
1584 */
1585static void
1586_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1587{
1588	vm_map_entry_t new_entry;
1589
1590	VM_MAP_ASSERT_LOCKED(map);
1591
1592	/*
1593	 * Split off the front portion -- note that we must insert the new
1594	 * entry BEFORE this one, so that this entry has the specified
1595	 * starting address.
1596	 */
1597	vm_map_simplify_entry(map, entry);
1598
1599	/*
1600	 * If there is no object backing this entry, we might as well create
1601	 * one now.  If we defer it, an object can get created after the map
1602	 * is clipped, and individual objects will be created for the split-up
1603	 * map.  This is a bit of a hack, but is also about the best place to
1604	 * put this improvement.
1605	 */
1606	if (entry->object.vm_object == NULL && !map->system_map) {
1607		vm_object_t object;
1608		object = vm_object_allocate(OBJT_DEFAULT,
1609				atop(entry->end - entry->start));
1610		entry->object.vm_object = object;
1611		entry->offset = 0;
1612		if (entry->cred != NULL) {
1613			object->cred = entry->cred;
1614			object->charge = entry->end - entry->start;
1615			entry->cred = NULL;
1616		}
1617	} else if (entry->object.vm_object != NULL &&
1618		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1619		   entry->cred != NULL) {
1620		VM_OBJECT_WLOCK(entry->object.vm_object);
1621		KASSERT(entry->object.vm_object->cred == NULL,
1622		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1623		entry->object.vm_object->cred = entry->cred;
1624		entry->object.vm_object->charge = entry->end - entry->start;
1625		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1626		entry->cred = NULL;
1627	}
1628
1629	new_entry = vm_map_entry_create(map);
1630	*new_entry = *entry;
1631
1632	new_entry->end = start;
1633	entry->offset += (start - entry->start);
1634	entry->start = start;
1635	if (new_entry->cred != NULL)
1636		crhold(entry->cred);
1637
1638	vm_map_entry_link(map, entry->prev, new_entry);
1639
1640	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1641		vm_object_reference(new_entry->object.vm_object);
1642		/*
1643		 * The object->un_pager.vnp.writemappings for the
1644		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1645		 * kept as is here.  The virtual pages are
1646		 * re-distributed among the clipped entries, so the sum is
1647		 * left the same.
1648		 */
1649	}
1650}
1651
1652/*
1653 *	vm_map_clip_end:	[ internal use only ]
1654 *
1655 *	Asserts that the given entry ends at or before
1656 *	the specified address; if necessary,
1657 *	it splits the entry into two.
1658 */
1659#define vm_map_clip_end(map, entry, endaddr) \
1660{ \
1661	if ((endaddr) < (entry->end)) \
1662		_vm_map_clip_end((map), (entry), (endaddr)); \
1663}
1664
1665/*
1666 *	This routine is called only when it is known that
1667 *	the entry must be split.
1668 */
1669static void
1670_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1671{
1672	vm_map_entry_t new_entry;
1673
1674	VM_MAP_ASSERT_LOCKED(map);
1675
1676	/*
1677	 * If there is no object backing this entry, we might as well create
1678	 * one now.  If we defer it, an object can get created after the map
1679	 * is clipped, and individual objects will be created for the split-up
1680	 * map.  This is a bit of a hack, but is also about the best place to
1681	 * put this improvement.
1682	 */
1683	if (entry->object.vm_object == NULL && !map->system_map) {
1684		vm_object_t object;
1685		object = vm_object_allocate(OBJT_DEFAULT,
1686				atop(entry->end - entry->start));
1687		entry->object.vm_object = object;
1688		entry->offset = 0;
1689		if (entry->cred != NULL) {
1690			object->cred = entry->cred;
1691			object->charge = entry->end - entry->start;
1692			entry->cred = NULL;
1693		}
1694	} else if (entry->object.vm_object != NULL &&
1695		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1696		   entry->cred != NULL) {
1697		VM_OBJECT_WLOCK(entry->object.vm_object);
1698		KASSERT(entry->object.vm_object->cred == NULL,
1699		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1700		entry->object.vm_object->cred = entry->cred;
1701		entry->object.vm_object->charge = entry->end - entry->start;
1702		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1703		entry->cred = NULL;
1704	}
1705
1706	/*
1707	 * Create a new entry and insert it AFTER the specified entry
1708	 */
1709	new_entry = vm_map_entry_create(map);
1710	*new_entry = *entry;
1711
1712	new_entry->start = entry->end = end;
1713	new_entry->offset += (end - entry->start);
1714	if (new_entry->cred != NULL)
1715		crhold(entry->cred);
1716
1717	vm_map_entry_link(map, entry, new_entry);
1718
1719	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1720		vm_object_reference(new_entry->object.vm_object);
1721	}
1722}
1723
1724/*
1725 *	vm_map_submap:		[ kernel use only ]
1726 *
1727 *	Mark the given range as handled by a subordinate map.
1728 *
1729 *	This range must have been created with vm_map_find,
1730 *	and no other operations may have been performed on this
1731 *	range prior to calling vm_map_submap.
1732 *
1733 *	Only a limited number of operations can be performed
1734 *	within this rage after calling vm_map_submap:
1735 *		vm_fault
1736 *	[Don't try vm_map_copy!]
1737 *
1738 *	To remove a submapping, one must first remove the
1739 *	range from the superior map, and then destroy the
1740 *	submap (if desired).  [Better yet, don't try it.]
1741 */
1742int
1743vm_map_submap(
1744	vm_map_t map,
1745	vm_offset_t start,
1746	vm_offset_t end,
1747	vm_map_t submap)
1748{
1749	vm_map_entry_t entry;
1750	int result = KERN_INVALID_ARGUMENT;
1751
1752	vm_map_lock(map);
1753
1754	VM_MAP_RANGE_CHECK(map, start, end);
1755
1756	if (vm_map_lookup_entry(map, start, &entry)) {
1757		vm_map_clip_start(map, entry, start);
1758	} else
1759		entry = entry->next;
1760
1761	vm_map_clip_end(map, entry, end);
1762
1763	if ((entry->start == start) && (entry->end == end) &&
1764	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1765	    (entry->object.vm_object == NULL)) {
1766		entry->object.sub_map = submap;
1767		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1768		result = KERN_SUCCESS;
1769	}
1770	vm_map_unlock(map);
1771
1772	return (result);
1773}
1774
1775/*
1776 * The maximum number of pages to map
1777 */
1778#define	MAX_INIT_PT	96
1779
1780/*
1781 *	vm_map_pmap_enter:
1782 *
1783 *	Preload read-only mappings for the specified object's resident pages
1784 *	into the target map.  If "flags" is MAP_PREFAULT_PARTIAL, then only
1785 *	the resident pages within the address range [addr, addr + ulmin(size,
1786 *	ptoa(MAX_INIT_PT))) are mapped.  Otherwise, all resident pages within
1787 *	the specified address range are mapped.  This eliminates many soft
1788 *	faults on process startup and immediately after an mmap(2).  Because
1789 *	these are speculative mappings, cached pages are not reactivated and
1790 *	mapped.
1791 */
1792void
1793vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1794    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1795{
1796	vm_offset_t start;
1797	vm_page_t p, p_start;
1798	vm_pindex_t psize, tmpidx;
1799
1800	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1801		return;
1802	VM_OBJECT_RLOCK(object);
1803	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1804		VM_OBJECT_RUNLOCK(object);
1805		VM_OBJECT_WLOCK(object);
1806		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1807			pmap_object_init_pt(map->pmap, addr, object, pindex,
1808			    size);
1809			VM_OBJECT_WUNLOCK(object);
1810			return;
1811		}
1812		VM_OBJECT_LOCK_DOWNGRADE(object);
1813	}
1814
1815	psize = atop(size);
1816	if (psize > MAX_INIT_PT && (flags & MAP_PREFAULT_PARTIAL) != 0)
1817		psize = MAX_INIT_PT;
1818	if (psize + pindex > object->size) {
1819		if (object->size < pindex) {
1820			VM_OBJECT_RUNLOCK(object);
1821			return;
1822		}
1823		psize = object->size - pindex;
1824	}
1825
1826	start = 0;
1827	p_start = NULL;
1828
1829	p = vm_page_find_least(object, pindex);
1830	/*
1831	 * Assert: the variable p is either (1) the page with the
1832	 * least pindex greater than or equal to the parameter pindex
1833	 * or (2) NULL.
1834	 */
1835	for (;
1836	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1837	     p = TAILQ_NEXT(p, listq)) {
1838		/*
1839		 * don't allow an madvise to blow away our really
1840		 * free pages allocating pv entries.
1841		 */
1842		if ((flags & MAP_PREFAULT_MADVISE) &&
1843		    cnt.v_free_count < cnt.v_free_reserved) {
1844			psize = tmpidx;
1845			break;
1846		}
1847		if (p->valid == VM_PAGE_BITS_ALL) {
1848			if (p_start == NULL) {
1849				start = addr + ptoa(tmpidx);
1850				p_start = p;
1851			}
1852		} else if (p_start != NULL) {
1853			pmap_enter_object(map->pmap, start, addr +
1854			    ptoa(tmpidx), p_start, prot);
1855			p_start = NULL;
1856		}
1857	}
1858	if (p_start != NULL)
1859		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1860		    p_start, prot);
1861	VM_OBJECT_RUNLOCK(object);
1862}
1863
1864/*
1865 *	vm_map_protect:
1866 *
1867 *	Sets the protection of the specified address
1868 *	region in the target map.  If "set_max" is
1869 *	specified, the maximum protection is to be set;
1870 *	otherwise, only the current protection is affected.
1871 */
1872int
1873vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1874	       vm_prot_t new_prot, boolean_t set_max)
1875{
1876	vm_map_entry_t current, entry;
1877	vm_object_t obj;
1878	struct ucred *cred;
1879	vm_prot_t old_prot;
1880
1881	if (start == end)
1882		return (KERN_SUCCESS);
1883
1884	vm_map_lock(map);
1885
1886	VM_MAP_RANGE_CHECK(map, start, end);
1887
1888	if (vm_map_lookup_entry(map, start, &entry)) {
1889		vm_map_clip_start(map, entry, start);
1890	} else {
1891		entry = entry->next;
1892	}
1893
1894	/*
1895	 * Make a first pass to check for protection violations.
1896	 */
1897	current = entry;
1898	while ((current != &map->header) && (current->start < end)) {
1899		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1900			vm_map_unlock(map);
1901			return (KERN_INVALID_ARGUMENT);
1902		}
1903		if ((new_prot & current->max_protection) != new_prot) {
1904			vm_map_unlock(map);
1905			return (KERN_PROTECTION_FAILURE);
1906		}
1907		current = current->next;
1908	}
1909
1910
1911	/*
1912	 * Do an accounting pass for private read-only mappings that
1913	 * now will do cow due to allowed write (e.g. debugger sets
1914	 * breakpoint on text segment)
1915	 */
1916	for (current = entry; (current != &map->header) &&
1917	     (current->start < end); current = current->next) {
1918
1919		vm_map_clip_end(map, current, end);
1920
1921		if (set_max ||
1922		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1923		    ENTRY_CHARGED(current)) {
1924			continue;
1925		}
1926
1927		cred = curthread->td_ucred;
1928		obj = current->object.vm_object;
1929
1930		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1931			if (!swap_reserve(current->end - current->start)) {
1932				vm_map_unlock(map);
1933				return (KERN_RESOURCE_SHORTAGE);
1934			}
1935			crhold(cred);
1936			current->cred = cred;
1937			continue;
1938		}
1939
1940		VM_OBJECT_WLOCK(obj);
1941		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1942			VM_OBJECT_WUNLOCK(obj);
1943			continue;
1944		}
1945
1946		/*
1947		 * Charge for the whole object allocation now, since
1948		 * we cannot distinguish between non-charged and
1949		 * charged clipped mapping of the same object later.
1950		 */
1951		KASSERT(obj->charge == 0,
1952		    ("vm_map_protect: object %p overcharged (entry %p)",
1953		    obj, current));
1954		if (!swap_reserve(ptoa(obj->size))) {
1955			VM_OBJECT_WUNLOCK(obj);
1956			vm_map_unlock(map);
1957			return (KERN_RESOURCE_SHORTAGE);
1958		}
1959
1960		crhold(cred);
1961		obj->cred = cred;
1962		obj->charge = ptoa(obj->size);
1963		VM_OBJECT_WUNLOCK(obj);
1964	}
1965
1966	/*
1967	 * Go back and fix up protections. [Note that clipping is not
1968	 * necessary the second time.]
1969	 */
1970	current = entry;
1971	while ((current != &map->header) && (current->start < end)) {
1972		old_prot = current->protection;
1973
1974		if (set_max)
1975			current->protection =
1976			    (current->max_protection = new_prot) &
1977			    old_prot;
1978		else
1979			current->protection = new_prot;
1980
1981		/*
1982		 * For user wired map entries, the normal lazy evaluation of
1983		 * write access upgrades through soft page faults is
1984		 * undesirable.  Instead, immediately copy any pages that are
1985		 * copy-on-write and enable write access in the physical map.
1986		 */
1987		if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
1988		    (current->protection & VM_PROT_WRITE) != 0 &&
1989		    (old_prot & VM_PROT_WRITE) == 0)
1990			vm_fault_copy_entry(map, map, current, current, NULL);
1991
1992		/*
1993		 * When restricting access, update the physical map.  Worry
1994		 * about copy-on-write here.
1995		 */
1996		if ((old_prot & ~current->protection) != 0) {
1997#define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1998							VM_PROT_ALL)
1999			pmap_protect(map->pmap, current->start,
2000			    current->end,
2001			    current->protection & MASK(current));
2002#undef	MASK
2003		}
2004		vm_map_simplify_entry(map, current);
2005		current = current->next;
2006	}
2007	vm_map_unlock(map);
2008	return (KERN_SUCCESS);
2009}
2010
2011/*
2012 *	vm_map_madvise:
2013 *
2014 *	This routine traverses a processes map handling the madvise
2015 *	system call.  Advisories are classified as either those effecting
2016 *	the vm_map_entry structure, or those effecting the underlying
2017 *	objects.
2018 */
2019int
2020vm_map_madvise(
2021	vm_map_t map,
2022	vm_offset_t start,
2023	vm_offset_t end,
2024	int behav)
2025{
2026	vm_map_entry_t current, entry;
2027	int modify_map = 0;
2028
2029	/*
2030	 * Some madvise calls directly modify the vm_map_entry, in which case
2031	 * we need to use an exclusive lock on the map and we need to perform
2032	 * various clipping operations.  Otherwise we only need a read-lock
2033	 * on the map.
2034	 */
2035	switch(behav) {
2036	case MADV_NORMAL:
2037	case MADV_SEQUENTIAL:
2038	case MADV_RANDOM:
2039	case MADV_NOSYNC:
2040	case MADV_AUTOSYNC:
2041	case MADV_NOCORE:
2042	case MADV_CORE:
2043		if (start == end)
2044			return (KERN_SUCCESS);
2045		modify_map = 1;
2046		vm_map_lock(map);
2047		break;
2048	case MADV_WILLNEED:
2049	case MADV_DONTNEED:
2050	case MADV_FREE:
2051		if (start == end)
2052			return (KERN_SUCCESS);
2053		vm_map_lock_read(map);
2054		break;
2055	default:
2056		return (KERN_INVALID_ARGUMENT);
2057	}
2058
2059	/*
2060	 * Locate starting entry and clip if necessary.
2061	 */
2062	VM_MAP_RANGE_CHECK(map, start, end);
2063
2064	if (vm_map_lookup_entry(map, start, &entry)) {
2065		if (modify_map)
2066			vm_map_clip_start(map, entry, start);
2067	} else {
2068		entry = entry->next;
2069	}
2070
2071	if (modify_map) {
2072		/*
2073		 * madvise behaviors that are implemented in the vm_map_entry.
2074		 *
2075		 * We clip the vm_map_entry so that behavioral changes are
2076		 * limited to the specified address range.
2077		 */
2078		for (current = entry;
2079		     (current != &map->header) && (current->start < end);
2080		     current = current->next
2081		) {
2082			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2083				continue;
2084
2085			vm_map_clip_end(map, current, end);
2086
2087			switch (behav) {
2088			case MADV_NORMAL:
2089				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2090				break;
2091			case MADV_SEQUENTIAL:
2092				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2093				break;
2094			case MADV_RANDOM:
2095				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2096				break;
2097			case MADV_NOSYNC:
2098				current->eflags |= MAP_ENTRY_NOSYNC;
2099				break;
2100			case MADV_AUTOSYNC:
2101				current->eflags &= ~MAP_ENTRY_NOSYNC;
2102				break;
2103			case MADV_NOCORE:
2104				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2105				break;
2106			case MADV_CORE:
2107				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2108				break;
2109			default:
2110				break;
2111			}
2112			vm_map_simplify_entry(map, current);
2113		}
2114		vm_map_unlock(map);
2115	} else {
2116		vm_pindex_t pstart, pend;
2117
2118		/*
2119		 * madvise behaviors that are implemented in the underlying
2120		 * vm_object.
2121		 *
2122		 * Since we don't clip the vm_map_entry, we have to clip
2123		 * the vm_object pindex and count.
2124		 */
2125		for (current = entry;
2126		     (current != &map->header) && (current->start < end);
2127		     current = current->next
2128		) {
2129			vm_offset_t useEnd, useStart;
2130
2131			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2132				continue;
2133
2134			pstart = OFF_TO_IDX(current->offset);
2135			pend = pstart + atop(current->end - current->start);
2136			useStart = current->start;
2137			useEnd = current->end;
2138
2139			if (current->start < start) {
2140				pstart += atop(start - current->start);
2141				useStart = start;
2142			}
2143			if (current->end > end) {
2144				pend -= atop(current->end - end);
2145				useEnd = end;
2146			}
2147
2148			if (pstart >= pend)
2149				continue;
2150
2151			/*
2152			 * Perform the pmap_advise() before clearing
2153			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2154			 * concurrent pmap operation, such as pmap_remove(),
2155			 * could clear a reference in the pmap and set
2156			 * PGA_REFERENCED on the page before the pmap_advise()
2157			 * had completed.  Consequently, the page would appear
2158			 * referenced based upon an old reference that
2159			 * occurred before this pmap_advise() ran.
2160			 */
2161			if (behav == MADV_DONTNEED || behav == MADV_FREE)
2162				pmap_advise(map->pmap, useStart, useEnd,
2163				    behav);
2164
2165			vm_object_madvise(current->object.vm_object, pstart,
2166			    pend, behav);
2167			if (behav == MADV_WILLNEED) {
2168				vm_map_pmap_enter(map,
2169				    useStart,
2170				    current->protection,
2171				    current->object.vm_object,
2172				    pstart,
2173				    ptoa(pend - pstart),
2174				    MAP_PREFAULT_MADVISE
2175				);
2176			}
2177		}
2178		vm_map_unlock_read(map);
2179	}
2180	return (0);
2181}
2182
2183
2184/*
2185 *	vm_map_inherit:
2186 *
2187 *	Sets the inheritance of the specified address
2188 *	range in the target map.  Inheritance
2189 *	affects how the map will be shared with
2190 *	child maps at the time of vmspace_fork.
2191 */
2192int
2193vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2194	       vm_inherit_t new_inheritance)
2195{
2196	vm_map_entry_t entry;
2197	vm_map_entry_t temp_entry;
2198
2199	switch (new_inheritance) {
2200	case VM_INHERIT_NONE:
2201	case VM_INHERIT_COPY:
2202	case VM_INHERIT_SHARE:
2203		break;
2204	default:
2205		return (KERN_INVALID_ARGUMENT);
2206	}
2207	if (start == end)
2208		return (KERN_SUCCESS);
2209	vm_map_lock(map);
2210	VM_MAP_RANGE_CHECK(map, start, end);
2211	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2212		entry = temp_entry;
2213		vm_map_clip_start(map, entry, start);
2214	} else
2215		entry = temp_entry->next;
2216	while ((entry != &map->header) && (entry->start < end)) {
2217		vm_map_clip_end(map, entry, end);
2218		entry->inheritance = new_inheritance;
2219		vm_map_simplify_entry(map, entry);
2220		entry = entry->next;
2221	}
2222	vm_map_unlock(map);
2223	return (KERN_SUCCESS);
2224}
2225
2226/*
2227 *	vm_map_unwire:
2228 *
2229 *	Implements both kernel and user unwiring.
2230 */
2231int
2232vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2233    int flags)
2234{
2235	vm_map_entry_t entry, first_entry, tmp_entry;
2236	vm_offset_t saved_start;
2237	unsigned int last_timestamp;
2238	int rv;
2239	boolean_t need_wakeup, result, user_unwire;
2240
2241	if (start == end)
2242		return (KERN_SUCCESS);
2243	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2244	vm_map_lock(map);
2245	VM_MAP_RANGE_CHECK(map, start, end);
2246	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2247		if (flags & VM_MAP_WIRE_HOLESOK)
2248			first_entry = first_entry->next;
2249		else {
2250			vm_map_unlock(map);
2251			return (KERN_INVALID_ADDRESS);
2252		}
2253	}
2254	last_timestamp = map->timestamp;
2255	entry = first_entry;
2256	while (entry != &map->header && entry->start < end) {
2257		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2258			/*
2259			 * We have not yet clipped the entry.
2260			 */
2261			saved_start = (start >= entry->start) ? start :
2262			    entry->start;
2263			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2264			if (vm_map_unlock_and_wait(map, 0)) {
2265				/*
2266				 * Allow interruption of user unwiring?
2267				 */
2268			}
2269			vm_map_lock(map);
2270			if (last_timestamp+1 != map->timestamp) {
2271				/*
2272				 * Look again for the entry because the map was
2273				 * modified while it was unlocked.
2274				 * Specifically, the entry may have been
2275				 * clipped, merged, or deleted.
2276				 */
2277				if (!vm_map_lookup_entry(map, saved_start,
2278				    &tmp_entry)) {
2279					if (flags & VM_MAP_WIRE_HOLESOK)
2280						tmp_entry = tmp_entry->next;
2281					else {
2282						if (saved_start == start) {
2283							/*
2284							 * First_entry has been deleted.
2285							 */
2286							vm_map_unlock(map);
2287							return (KERN_INVALID_ADDRESS);
2288						}
2289						end = saved_start;
2290						rv = KERN_INVALID_ADDRESS;
2291						goto done;
2292					}
2293				}
2294				if (entry == first_entry)
2295					first_entry = tmp_entry;
2296				else
2297					first_entry = NULL;
2298				entry = tmp_entry;
2299			}
2300			last_timestamp = map->timestamp;
2301			continue;
2302		}
2303		vm_map_clip_start(map, entry, start);
2304		vm_map_clip_end(map, entry, end);
2305		/*
2306		 * Mark the entry in case the map lock is released.  (See
2307		 * above.)
2308		 */
2309		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2310		    entry->wiring_thread == NULL,
2311		    ("owned map entry %p", entry));
2312		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2313		entry->wiring_thread = curthread;
2314		/*
2315		 * Check the map for holes in the specified region.
2316		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2317		 */
2318		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2319		    (entry->end < end && (entry->next == &map->header ||
2320		    entry->next->start > entry->end))) {
2321			end = entry->end;
2322			rv = KERN_INVALID_ADDRESS;
2323			goto done;
2324		}
2325		/*
2326		 * If system unwiring, require that the entry is system wired.
2327		 */
2328		if (!user_unwire &&
2329		    vm_map_entry_system_wired_count(entry) == 0) {
2330			end = entry->end;
2331			rv = KERN_INVALID_ARGUMENT;
2332			goto done;
2333		}
2334		entry = entry->next;
2335	}
2336	rv = KERN_SUCCESS;
2337done:
2338	need_wakeup = FALSE;
2339	if (first_entry == NULL) {
2340		result = vm_map_lookup_entry(map, start, &first_entry);
2341		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2342			first_entry = first_entry->next;
2343		else
2344			KASSERT(result, ("vm_map_unwire: lookup failed"));
2345	}
2346	for (entry = first_entry; entry != &map->header && entry->start < end;
2347	    entry = entry->next) {
2348		/*
2349		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2350		 * space in the unwired region could have been mapped
2351		 * while the map lock was dropped for draining
2352		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2353		 * could be simultaneously wiring this new mapping
2354		 * entry.  Detect these cases and skip any entries
2355		 * marked as in transition by us.
2356		 */
2357		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2358		    entry->wiring_thread != curthread) {
2359			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2360			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2361			continue;
2362		}
2363
2364		if (rv == KERN_SUCCESS && (!user_unwire ||
2365		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2366			if (user_unwire)
2367				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2368			entry->wired_count--;
2369			if (entry->wired_count == 0) {
2370				/*
2371				 * Retain the map lock.
2372				 */
2373				vm_fault_unwire(map, entry->start, entry->end,
2374				    entry->object.vm_object != NULL &&
2375				    (entry->object.vm_object->flags &
2376				    OBJ_FICTITIOUS) != 0);
2377			}
2378		}
2379		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2380		    ("vm_map_unwire: in-transition flag missing %p", entry));
2381		KASSERT(entry->wiring_thread == curthread,
2382		    ("vm_map_unwire: alien wire %p", entry));
2383		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2384		entry->wiring_thread = NULL;
2385		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2386			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2387			need_wakeup = TRUE;
2388		}
2389		vm_map_simplify_entry(map, entry);
2390	}
2391	vm_map_unlock(map);
2392	if (need_wakeup)
2393		vm_map_wakeup(map);
2394	return (rv);
2395}
2396
2397/*
2398 *	vm_map_wire:
2399 *
2400 *	Implements both kernel and user wiring.
2401 */
2402int
2403vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2404    int flags)
2405{
2406	vm_map_entry_t entry, first_entry, tmp_entry;
2407	vm_offset_t saved_end, saved_start;
2408	unsigned int last_timestamp;
2409	int rv;
2410	boolean_t fictitious, need_wakeup, result, user_wire;
2411	vm_prot_t prot;
2412
2413	if (start == end)
2414		return (KERN_SUCCESS);
2415	prot = 0;
2416	if (flags & VM_MAP_WIRE_WRITE)
2417		prot |= VM_PROT_WRITE;
2418	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2419	vm_map_lock(map);
2420	VM_MAP_RANGE_CHECK(map, start, end);
2421	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2422		if (flags & VM_MAP_WIRE_HOLESOK)
2423			first_entry = first_entry->next;
2424		else {
2425			vm_map_unlock(map);
2426			return (KERN_INVALID_ADDRESS);
2427		}
2428	}
2429	last_timestamp = map->timestamp;
2430	entry = first_entry;
2431	while (entry != &map->header && entry->start < end) {
2432		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2433			/*
2434			 * We have not yet clipped the entry.
2435			 */
2436			saved_start = (start >= entry->start) ? start :
2437			    entry->start;
2438			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2439			if (vm_map_unlock_and_wait(map, 0)) {
2440				/*
2441				 * Allow interruption of user wiring?
2442				 */
2443			}
2444			vm_map_lock(map);
2445			if (last_timestamp + 1 != map->timestamp) {
2446				/*
2447				 * Look again for the entry because the map was
2448				 * modified while it was unlocked.
2449				 * Specifically, the entry may have been
2450				 * clipped, merged, or deleted.
2451				 */
2452				if (!vm_map_lookup_entry(map, saved_start,
2453				    &tmp_entry)) {
2454					if (flags & VM_MAP_WIRE_HOLESOK)
2455						tmp_entry = tmp_entry->next;
2456					else {
2457						if (saved_start == start) {
2458							/*
2459							 * first_entry has been deleted.
2460							 */
2461							vm_map_unlock(map);
2462							return (KERN_INVALID_ADDRESS);
2463						}
2464						end = saved_start;
2465						rv = KERN_INVALID_ADDRESS;
2466						goto done;
2467					}
2468				}
2469				if (entry == first_entry)
2470					first_entry = tmp_entry;
2471				else
2472					first_entry = NULL;
2473				entry = tmp_entry;
2474			}
2475			last_timestamp = map->timestamp;
2476			continue;
2477		}
2478		vm_map_clip_start(map, entry, start);
2479		vm_map_clip_end(map, entry, end);
2480		/*
2481		 * Mark the entry in case the map lock is released.  (See
2482		 * above.)
2483		 */
2484		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2485		    entry->wiring_thread == NULL,
2486		    ("owned map entry %p", entry));
2487		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2488		entry->wiring_thread = curthread;
2489		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2490		    || (entry->protection & prot) != prot) {
2491			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2492			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2493				end = entry->end;
2494				rv = KERN_INVALID_ADDRESS;
2495				goto done;
2496			}
2497			goto next_entry;
2498		}
2499		if (entry->wired_count == 0) {
2500			entry->wired_count++;
2501			saved_start = entry->start;
2502			saved_end = entry->end;
2503			fictitious = entry->object.vm_object != NULL &&
2504			    (entry->object.vm_object->flags &
2505			    OBJ_FICTITIOUS) != 0;
2506			/*
2507			 * Release the map lock, relying on the in-transition
2508			 * mark.  Mark the map busy for fork.
2509			 */
2510			vm_map_busy(map);
2511			vm_map_unlock(map);
2512			rv = vm_fault_wire(map, saved_start, saved_end,
2513			    fictitious);
2514			vm_map_lock(map);
2515			vm_map_unbusy(map);
2516			if (last_timestamp + 1 != map->timestamp) {
2517				/*
2518				 * Look again for the entry because the map was
2519				 * modified while it was unlocked.  The entry
2520				 * may have been clipped, but NOT merged or
2521				 * deleted.
2522				 */
2523				result = vm_map_lookup_entry(map, saved_start,
2524				    &tmp_entry);
2525				KASSERT(result, ("vm_map_wire: lookup failed"));
2526				if (entry == first_entry)
2527					first_entry = tmp_entry;
2528				else
2529					first_entry = NULL;
2530				entry = tmp_entry;
2531				while (entry->end < saved_end) {
2532					if (rv != KERN_SUCCESS) {
2533						KASSERT(entry->wired_count == 1,
2534						    ("vm_map_wire: bad count"));
2535						entry->wired_count = -1;
2536					}
2537					entry = entry->next;
2538				}
2539			}
2540			last_timestamp = map->timestamp;
2541			if (rv != KERN_SUCCESS) {
2542				KASSERT(entry->wired_count == 1,
2543				    ("vm_map_wire: bad count"));
2544				/*
2545				 * Assign an out-of-range value to represent
2546				 * the failure to wire this entry.
2547				 */
2548				entry->wired_count = -1;
2549				end = entry->end;
2550				goto done;
2551			}
2552		} else if (!user_wire ||
2553			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2554			entry->wired_count++;
2555		}
2556		/*
2557		 * Check the map for holes in the specified region.
2558		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2559		 */
2560	next_entry:
2561		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2562		    (entry->end < end && (entry->next == &map->header ||
2563		    entry->next->start > entry->end))) {
2564			end = entry->end;
2565			rv = KERN_INVALID_ADDRESS;
2566			goto done;
2567		}
2568		entry = entry->next;
2569	}
2570	rv = KERN_SUCCESS;
2571done:
2572	need_wakeup = FALSE;
2573	if (first_entry == NULL) {
2574		result = vm_map_lookup_entry(map, start, &first_entry);
2575		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2576			first_entry = first_entry->next;
2577		else
2578			KASSERT(result, ("vm_map_wire: lookup failed"));
2579	}
2580	for (entry = first_entry; entry != &map->header && entry->start < end;
2581	    entry = entry->next) {
2582		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2583			goto next_entry_done;
2584
2585		/*
2586		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2587		 * space in the unwired region could have been mapped
2588		 * while the map lock was dropped for faulting in the
2589		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2590		 * Moreover, another thread could be simultaneously
2591		 * wiring this new mapping entry.  Detect these cases
2592		 * and skip any entries marked as in transition by us.
2593		 */
2594		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2595		    entry->wiring_thread != curthread) {
2596			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2597			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2598			continue;
2599		}
2600
2601		if (rv == KERN_SUCCESS) {
2602			if (user_wire)
2603				entry->eflags |= MAP_ENTRY_USER_WIRED;
2604		} else if (entry->wired_count == -1) {
2605			/*
2606			 * Wiring failed on this entry.  Thus, unwiring is
2607			 * unnecessary.
2608			 */
2609			entry->wired_count = 0;
2610		} else {
2611			if (!user_wire ||
2612			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2613				entry->wired_count--;
2614			if (entry->wired_count == 0) {
2615				/*
2616				 * Retain the map lock.
2617				 */
2618				vm_fault_unwire(map, entry->start, entry->end,
2619				    entry->object.vm_object != NULL &&
2620				    (entry->object.vm_object->flags &
2621				    OBJ_FICTITIOUS) != 0);
2622			}
2623		}
2624	next_entry_done:
2625		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2626		    ("vm_map_wire: in-transition flag missing %p", entry));
2627		KASSERT(entry->wiring_thread == curthread,
2628		    ("vm_map_wire: alien wire %p", entry));
2629		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2630		    MAP_ENTRY_WIRE_SKIPPED);
2631		entry->wiring_thread = NULL;
2632		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2633			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2634			need_wakeup = TRUE;
2635		}
2636		vm_map_simplify_entry(map, entry);
2637	}
2638	vm_map_unlock(map);
2639	if (need_wakeup)
2640		vm_map_wakeup(map);
2641	return (rv);
2642}
2643
2644/*
2645 * vm_map_sync
2646 *
2647 * Push any dirty cached pages in the address range to their pager.
2648 * If syncio is TRUE, dirty pages are written synchronously.
2649 * If invalidate is TRUE, any cached pages are freed as well.
2650 *
2651 * If the size of the region from start to end is zero, we are
2652 * supposed to flush all modified pages within the region containing
2653 * start.  Unfortunately, a region can be split or coalesced with
2654 * neighboring regions, making it difficult to determine what the
2655 * original region was.  Therefore, we approximate this requirement by
2656 * flushing the current region containing start.
2657 *
2658 * Returns an error if any part of the specified range is not mapped.
2659 */
2660int
2661vm_map_sync(
2662	vm_map_t map,
2663	vm_offset_t start,
2664	vm_offset_t end,
2665	boolean_t syncio,
2666	boolean_t invalidate)
2667{
2668	vm_map_entry_t current;
2669	vm_map_entry_t entry;
2670	vm_size_t size;
2671	vm_object_t object;
2672	vm_ooffset_t offset;
2673	unsigned int last_timestamp;
2674	boolean_t failed;
2675
2676	vm_map_lock_read(map);
2677	VM_MAP_RANGE_CHECK(map, start, end);
2678	if (!vm_map_lookup_entry(map, start, &entry)) {
2679		vm_map_unlock_read(map);
2680		return (KERN_INVALID_ADDRESS);
2681	} else if (start == end) {
2682		start = entry->start;
2683		end = entry->end;
2684	}
2685	/*
2686	 * Make a first pass to check for user-wired memory and holes.
2687	 */
2688	for (current = entry; current != &map->header && current->start < end;
2689	    current = current->next) {
2690		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2691			vm_map_unlock_read(map);
2692			return (KERN_INVALID_ARGUMENT);
2693		}
2694		if (end > current->end &&
2695		    (current->next == &map->header ||
2696			current->end != current->next->start)) {
2697			vm_map_unlock_read(map);
2698			return (KERN_INVALID_ADDRESS);
2699		}
2700	}
2701
2702	if (invalidate)
2703		pmap_remove(map->pmap, start, end);
2704	failed = FALSE;
2705
2706	/*
2707	 * Make a second pass, cleaning/uncaching pages from the indicated
2708	 * objects as we go.
2709	 */
2710	for (current = entry; current != &map->header && current->start < end;) {
2711		offset = current->offset + (start - current->start);
2712		size = (end <= current->end ? end : current->end) - start;
2713		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2714			vm_map_t smap;
2715			vm_map_entry_t tentry;
2716			vm_size_t tsize;
2717
2718			smap = current->object.sub_map;
2719			vm_map_lock_read(smap);
2720			(void) vm_map_lookup_entry(smap, offset, &tentry);
2721			tsize = tentry->end - offset;
2722			if (tsize < size)
2723				size = tsize;
2724			object = tentry->object.vm_object;
2725			offset = tentry->offset + (offset - tentry->start);
2726			vm_map_unlock_read(smap);
2727		} else {
2728			object = current->object.vm_object;
2729		}
2730		vm_object_reference(object);
2731		last_timestamp = map->timestamp;
2732		vm_map_unlock_read(map);
2733		if (!vm_object_sync(object, offset, size, syncio, invalidate))
2734			failed = TRUE;
2735		start += size;
2736		vm_object_deallocate(object);
2737		vm_map_lock_read(map);
2738		if (last_timestamp == map->timestamp ||
2739		    !vm_map_lookup_entry(map, start, &current))
2740			current = current->next;
2741	}
2742
2743	vm_map_unlock_read(map);
2744	return (failed ? KERN_FAILURE : KERN_SUCCESS);
2745}
2746
2747/*
2748 *	vm_map_entry_unwire:	[ internal use only ]
2749 *
2750 *	Make the region specified by this entry pageable.
2751 *
2752 *	The map in question should be locked.
2753 *	[This is the reason for this routine's existence.]
2754 */
2755static void
2756vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2757{
2758	vm_fault_unwire(map, entry->start, entry->end,
2759	    entry->object.vm_object != NULL &&
2760	    (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0);
2761	entry->wired_count = 0;
2762}
2763
2764static void
2765vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2766{
2767
2768	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2769		vm_object_deallocate(entry->object.vm_object);
2770	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2771}
2772
2773/*
2774 *	vm_map_entry_delete:	[ internal use only ]
2775 *
2776 *	Deallocate the given entry from the target map.
2777 */
2778static void
2779vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2780{
2781	vm_object_t object;
2782	vm_pindex_t offidxstart, offidxend, count, size1;
2783	vm_ooffset_t size;
2784
2785	vm_map_entry_unlink(map, entry);
2786	object = entry->object.vm_object;
2787	size = entry->end - entry->start;
2788	map->size -= size;
2789
2790	if (entry->cred != NULL) {
2791		swap_release_by_cred(size, entry->cred);
2792		crfree(entry->cred);
2793	}
2794
2795	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2796	    (object != NULL)) {
2797		KASSERT(entry->cred == NULL || object->cred == NULL ||
2798		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2799		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2800		count = OFF_TO_IDX(size);
2801		offidxstart = OFF_TO_IDX(entry->offset);
2802		offidxend = offidxstart + count;
2803		VM_OBJECT_WLOCK(object);
2804		if (object->ref_count != 1 &&
2805		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2806		    object == kernel_object || object == kmem_object)) {
2807			vm_object_collapse(object);
2808
2809			/*
2810			 * The option OBJPR_NOTMAPPED can be passed here
2811			 * because vm_map_delete() already performed
2812			 * pmap_remove() on the only mapping to this range
2813			 * of pages.
2814			 */
2815			vm_object_page_remove(object, offidxstart, offidxend,
2816			    OBJPR_NOTMAPPED);
2817			if (object->type == OBJT_SWAP)
2818				swap_pager_freespace(object, offidxstart, count);
2819			if (offidxend >= object->size &&
2820			    offidxstart < object->size) {
2821				size1 = object->size;
2822				object->size = offidxstart;
2823				if (object->cred != NULL) {
2824					size1 -= object->size;
2825					KASSERT(object->charge >= ptoa(size1),
2826					    ("vm_map_entry_delete: object->charge < 0"));
2827					swap_release_by_cred(ptoa(size1), object->cred);
2828					object->charge -= ptoa(size1);
2829				}
2830			}
2831		}
2832		VM_OBJECT_WUNLOCK(object);
2833	} else
2834		entry->object.vm_object = NULL;
2835	if (map->system_map)
2836		vm_map_entry_deallocate(entry, TRUE);
2837	else {
2838		entry->next = curthread->td_map_def_user;
2839		curthread->td_map_def_user = entry;
2840	}
2841}
2842
2843/*
2844 *	vm_map_delete:	[ internal use only ]
2845 *
2846 *	Deallocates the given address range from the target
2847 *	map.
2848 */
2849int
2850vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2851{
2852	vm_map_entry_t entry;
2853	vm_map_entry_t first_entry;
2854
2855	VM_MAP_ASSERT_LOCKED(map);
2856	if (start == end)
2857		return (KERN_SUCCESS);
2858
2859	/*
2860	 * Find the start of the region, and clip it
2861	 */
2862	if (!vm_map_lookup_entry(map, start, &first_entry))
2863		entry = first_entry->next;
2864	else {
2865		entry = first_entry;
2866		vm_map_clip_start(map, entry, start);
2867	}
2868
2869	/*
2870	 * Step through all entries in this region
2871	 */
2872	while ((entry != &map->header) && (entry->start < end)) {
2873		vm_map_entry_t next;
2874
2875		/*
2876		 * Wait for wiring or unwiring of an entry to complete.
2877		 * Also wait for any system wirings to disappear on
2878		 * user maps.
2879		 */
2880		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2881		    (vm_map_pmap(map) != kernel_pmap &&
2882		    vm_map_entry_system_wired_count(entry) != 0)) {
2883			unsigned int last_timestamp;
2884			vm_offset_t saved_start;
2885			vm_map_entry_t tmp_entry;
2886
2887			saved_start = entry->start;
2888			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2889			last_timestamp = map->timestamp;
2890			(void) vm_map_unlock_and_wait(map, 0);
2891			vm_map_lock(map);
2892			if (last_timestamp + 1 != map->timestamp) {
2893				/*
2894				 * Look again for the entry because the map was
2895				 * modified while it was unlocked.
2896				 * Specifically, the entry may have been
2897				 * clipped, merged, or deleted.
2898				 */
2899				if (!vm_map_lookup_entry(map, saved_start,
2900							 &tmp_entry))
2901					entry = tmp_entry->next;
2902				else {
2903					entry = tmp_entry;
2904					vm_map_clip_start(map, entry,
2905							  saved_start);
2906				}
2907			}
2908			continue;
2909		}
2910		vm_map_clip_end(map, entry, end);
2911
2912		next = entry->next;
2913
2914		/*
2915		 * Unwire before removing addresses from the pmap; otherwise,
2916		 * unwiring will put the entries back in the pmap.
2917		 */
2918		if (entry->wired_count != 0) {
2919			vm_map_entry_unwire(map, entry);
2920		}
2921
2922		pmap_remove(map->pmap, entry->start, entry->end);
2923
2924		/*
2925		 * Delete the entry only after removing all pmap
2926		 * entries pointing to its pages.  (Otherwise, its
2927		 * page frames may be reallocated, and any modify bits
2928		 * will be set in the wrong object!)
2929		 */
2930		vm_map_entry_delete(map, entry);
2931		entry = next;
2932	}
2933	return (KERN_SUCCESS);
2934}
2935
2936/*
2937 *	vm_map_remove:
2938 *
2939 *	Remove the given address range from the target map.
2940 *	This is the exported form of vm_map_delete.
2941 */
2942int
2943vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2944{
2945	int result;
2946
2947	vm_map_lock(map);
2948	VM_MAP_RANGE_CHECK(map, start, end);
2949	result = vm_map_delete(map, start, end);
2950	vm_map_unlock(map);
2951	return (result);
2952}
2953
2954/*
2955 *	vm_map_check_protection:
2956 *
2957 *	Assert that the target map allows the specified privilege on the
2958 *	entire address region given.  The entire region must be allocated.
2959 *
2960 *	WARNING!  This code does not and should not check whether the
2961 *	contents of the region is accessible.  For example a smaller file
2962 *	might be mapped into a larger address space.
2963 *
2964 *	NOTE!  This code is also called by munmap().
2965 *
2966 *	The map must be locked.  A read lock is sufficient.
2967 */
2968boolean_t
2969vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2970			vm_prot_t protection)
2971{
2972	vm_map_entry_t entry;
2973	vm_map_entry_t tmp_entry;
2974
2975	if (!vm_map_lookup_entry(map, start, &tmp_entry))
2976		return (FALSE);
2977	entry = tmp_entry;
2978
2979	while (start < end) {
2980		if (entry == &map->header)
2981			return (FALSE);
2982		/*
2983		 * No holes allowed!
2984		 */
2985		if (start < entry->start)
2986			return (FALSE);
2987		/*
2988		 * Check protection associated with entry.
2989		 */
2990		if ((entry->protection & protection) != protection)
2991			return (FALSE);
2992		/* go to next entry */
2993		start = entry->end;
2994		entry = entry->next;
2995	}
2996	return (TRUE);
2997}
2998
2999/*
3000 *	vm_map_copy_entry:
3001 *
3002 *	Copies the contents of the source entry to the destination
3003 *	entry.  The entries *must* be aligned properly.
3004 */
3005static void
3006vm_map_copy_entry(
3007	vm_map_t src_map,
3008	vm_map_t dst_map,
3009	vm_map_entry_t src_entry,
3010	vm_map_entry_t dst_entry,
3011	vm_ooffset_t *fork_charge)
3012{
3013	vm_object_t src_object;
3014	vm_map_entry_t fake_entry;
3015	vm_offset_t size;
3016	struct ucred *cred;
3017	int charged;
3018
3019	VM_MAP_ASSERT_LOCKED(dst_map);
3020
3021	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3022		return;
3023
3024	if (src_entry->wired_count == 0 ||
3025	    (src_entry->protection & VM_PROT_WRITE) == 0) {
3026		/*
3027		 * If the source entry is marked needs_copy, it is already
3028		 * write-protected.
3029		 */
3030		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3031		    (src_entry->protection & VM_PROT_WRITE) != 0) {
3032			pmap_protect(src_map->pmap,
3033			    src_entry->start,
3034			    src_entry->end,
3035			    src_entry->protection & ~VM_PROT_WRITE);
3036		}
3037
3038		/*
3039		 * Make a copy of the object.
3040		 */
3041		size = src_entry->end - src_entry->start;
3042		if ((src_object = src_entry->object.vm_object) != NULL) {
3043			VM_OBJECT_WLOCK(src_object);
3044			charged = ENTRY_CHARGED(src_entry);
3045			if ((src_object->handle == NULL) &&
3046				(src_object->type == OBJT_DEFAULT ||
3047				 src_object->type == OBJT_SWAP)) {
3048				vm_object_collapse(src_object);
3049				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3050					vm_object_split(src_entry);
3051					src_object = src_entry->object.vm_object;
3052				}
3053			}
3054			vm_object_reference_locked(src_object);
3055			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3056			if (src_entry->cred != NULL &&
3057			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3058				KASSERT(src_object->cred == NULL,
3059				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3060				     src_object));
3061				src_object->cred = src_entry->cred;
3062				src_object->charge = size;
3063			}
3064			VM_OBJECT_WUNLOCK(src_object);
3065			dst_entry->object.vm_object = src_object;
3066			if (charged) {
3067				cred = curthread->td_ucred;
3068				crhold(cred);
3069				dst_entry->cred = cred;
3070				*fork_charge += size;
3071				if (!(src_entry->eflags &
3072				      MAP_ENTRY_NEEDS_COPY)) {
3073					crhold(cred);
3074					src_entry->cred = cred;
3075					*fork_charge += size;
3076				}
3077			}
3078			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3079			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3080			dst_entry->offset = src_entry->offset;
3081			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3082				/*
3083				 * MAP_ENTRY_VN_WRITECNT cannot
3084				 * indicate write reference from
3085				 * src_entry, since the entry is
3086				 * marked as needs copy.  Allocate a
3087				 * fake entry that is used to
3088				 * decrement object->un_pager.vnp.writecount
3089				 * at the appropriate time.  Attach
3090				 * fake_entry to the deferred list.
3091				 */
3092				fake_entry = vm_map_entry_create(dst_map);
3093				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3094				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3095				vm_object_reference(src_object);
3096				fake_entry->object.vm_object = src_object;
3097				fake_entry->start = src_entry->start;
3098				fake_entry->end = src_entry->end;
3099				fake_entry->next = curthread->td_map_def_user;
3100				curthread->td_map_def_user = fake_entry;
3101			}
3102		} else {
3103			dst_entry->object.vm_object = NULL;
3104			dst_entry->offset = 0;
3105			if (src_entry->cred != NULL) {
3106				dst_entry->cred = curthread->td_ucred;
3107				crhold(dst_entry->cred);
3108				*fork_charge += size;
3109			}
3110		}
3111
3112		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3113		    dst_entry->end - dst_entry->start, src_entry->start);
3114	} else {
3115		/*
3116		 * We don't want to make writeable wired pages copy-on-write.
3117		 * Immediately copy these pages into the new map by simulating
3118		 * page faults.  The new pages are pageable.
3119		 */
3120		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3121		    fork_charge);
3122	}
3123}
3124
3125/*
3126 * vmspace_map_entry_forked:
3127 * Update the newly-forked vmspace each time a map entry is inherited
3128 * or copied.  The values for vm_dsize and vm_tsize are approximate
3129 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3130 */
3131static void
3132vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3133    vm_map_entry_t entry)
3134{
3135	vm_size_t entrysize;
3136	vm_offset_t newend;
3137
3138	entrysize = entry->end - entry->start;
3139	vm2->vm_map.size += entrysize;
3140	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3141		vm2->vm_ssize += btoc(entrysize);
3142	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3143	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3144		newend = MIN(entry->end,
3145		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3146		vm2->vm_dsize += btoc(newend - entry->start);
3147	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3148	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3149		newend = MIN(entry->end,
3150		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3151		vm2->vm_tsize += btoc(newend - entry->start);
3152	}
3153}
3154
3155/*
3156 * vmspace_fork:
3157 * Create a new process vmspace structure and vm_map
3158 * based on those of an existing process.  The new map
3159 * is based on the old map, according to the inheritance
3160 * values on the regions in that map.
3161 *
3162 * XXX It might be worth coalescing the entries added to the new vmspace.
3163 *
3164 * The source map must not be locked.
3165 */
3166struct vmspace *
3167vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3168{
3169	struct vmspace *vm2;
3170	vm_map_t new_map, old_map;
3171	vm_map_entry_t new_entry, old_entry;
3172	vm_object_t object;
3173	int locked;
3174
3175	old_map = &vm1->vm_map;
3176	/* Copy immutable fields of vm1 to vm2. */
3177	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
3178	if (vm2 == NULL)
3179		return (NULL);
3180	vm2->vm_taddr = vm1->vm_taddr;
3181	vm2->vm_daddr = vm1->vm_daddr;
3182	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3183	vm_map_lock(old_map);
3184	if (old_map->busy)
3185		vm_map_wait_busy(old_map);
3186	new_map = &vm2->vm_map;
3187	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3188	KASSERT(locked, ("vmspace_fork: lock failed"));
3189
3190	old_entry = old_map->header.next;
3191
3192	while (old_entry != &old_map->header) {
3193		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3194			panic("vm_map_fork: encountered a submap");
3195
3196		switch (old_entry->inheritance) {
3197		case VM_INHERIT_NONE:
3198			break;
3199
3200		case VM_INHERIT_SHARE:
3201			/*
3202			 * Clone the entry, creating the shared object if necessary.
3203			 */
3204			object = old_entry->object.vm_object;
3205			if (object == NULL) {
3206				object = vm_object_allocate(OBJT_DEFAULT,
3207					atop(old_entry->end - old_entry->start));
3208				old_entry->object.vm_object = object;
3209				old_entry->offset = 0;
3210				if (old_entry->cred != NULL) {
3211					object->cred = old_entry->cred;
3212					object->charge = old_entry->end -
3213					    old_entry->start;
3214					old_entry->cred = NULL;
3215				}
3216			}
3217
3218			/*
3219			 * Add the reference before calling vm_object_shadow
3220			 * to insure that a shadow object is created.
3221			 */
3222			vm_object_reference(object);
3223			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3224				vm_object_shadow(&old_entry->object.vm_object,
3225				    &old_entry->offset,
3226				    old_entry->end - old_entry->start);
3227				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3228				/* Transfer the second reference too. */
3229				vm_object_reference(
3230				    old_entry->object.vm_object);
3231
3232				/*
3233				 * As in vm_map_simplify_entry(), the
3234				 * vnode lock will not be acquired in
3235				 * this call to vm_object_deallocate().
3236				 */
3237				vm_object_deallocate(object);
3238				object = old_entry->object.vm_object;
3239			}
3240			VM_OBJECT_WLOCK(object);
3241			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3242			if (old_entry->cred != NULL) {
3243				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3244				object->cred = old_entry->cred;
3245				object->charge = old_entry->end - old_entry->start;
3246				old_entry->cred = NULL;
3247			}
3248
3249			/*
3250			 * Assert the correct state of the vnode
3251			 * v_writecount while the object is locked, to
3252			 * not relock it later for the assertion
3253			 * correctness.
3254			 */
3255			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3256			    object->type == OBJT_VNODE) {
3257				KASSERT(((struct vnode *)object->handle)->
3258				    v_writecount > 0,
3259				    ("vmspace_fork: v_writecount %p", object));
3260				KASSERT(object->un_pager.vnp.writemappings > 0,
3261				    ("vmspace_fork: vnp.writecount %p",
3262				    object));
3263			}
3264			VM_OBJECT_WUNLOCK(object);
3265
3266			/*
3267			 * Clone the entry, referencing the shared object.
3268			 */
3269			new_entry = vm_map_entry_create(new_map);
3270			*new_entry = *old_entry;
3271			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3272			    MAP_ENTRY_IN_TRANSITION);
3273			new_entry->wiring_thread = NULL;
3274			new_entry->wired_count = 0;
3275			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3276				vnode_pager_update_writecount(object,
3277				    new_entry->start, new_entry->end);
3278			}
3279
3280			/*
3281			 * Insert the entry into the new map -- we know we're
3282			 * inserting at the end of the new map.
3283			 */
3284			vm_map_entry_link(new_map, new_map->header.prev,
3285			    new_entry);
3286			vmspace_map_entry_forked(vm1, vm2, new_entry);
3287
3288			/*
3289			 * Update the physical map
3290			 */
3291			pmap_copy(new_map->pmap, old_map->pmap,
3292			    new_entry->start,
3293			    (old_entry->end - old_entry->start),
3294			    old_entry->start);
3295			break;
3296
3297		case VM_INHERIT_COPY:
3298			/*
3299			 * Clone the entry and link into the map.
3300			 */
3301			new_entry = vm_map_entry_create(new_map);
3302			*new_entry = *old_entry;
3303			/*
3304			 * Copied entry is COW over the old object.
3305			 */
3306			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3307			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3308			new_entry->wiring_thread = NULL;
3309			new_entry->wired_count = 0;
3310			new_entry->object.vm_object = NULL;
3311			new_entry->cred = NULL;
3312			vm_map_entry_link(new_map, new_map->header.prev,
3313			    new_entry);
3314			vmspace_map_entry_forked(vm1, vm2, new_entry);
3315			vm_map_copy_entry(old_map, new_map, old_entry,
3316			    new_entry, fork_charge);
3317			break;
3318		}
3319		old_entry = old_entry->next;
3320	}
3321	/*
3322	 * Use inlined vm_map_unlock() to postpone handling the deferred
3323	 * map entries, which cannot be done until both old_map and
3324	 * new_map locks are released.
3325	 */
3326	sx_xunlock(&old_map->lock);
3327	sx_xunlock(&new_map->lock);
3328	vm_map_process_deferred();
3329
3330	return (vm2);
3331}
3332
3333int
3334vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3335    vm_prot_t prot, vm_prot_t max, int cow)
3336{
3337	vm_map_entry_t new_entry, prev_entry;
3338	vm_offset_t bot, top;
3339	vm_size_t growsize, init_ssize;
3340	int orient, rv;
3341	rlim_t lmemlim, vmemlim;
3342
3343	/*
3344	 * The stack orientation is piggybacked with the cow argument.
3345	 * Extract it into orient and mask the cow argument so that we
3346	 * don't pass it around further.
3347	 * NOTE: We explicitly allow bi-directional stacks.
3348	 */
3349	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3350	KASSERT(orient != 0, ("No stack grow direction"));
3351
3352	if (addrbos < vm_map_min(map) ||
3353	    addrbos > vm_map_max(map) ||
3354	    addrbos + max_ssize < addrbos)
3355		return (KERN_NO_SPACE);
3356
3357	growsize = sgrowsiz;
3358	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3359
3360	PROC_LOCK(curproc);
3361	lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3362	vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3363	PROC_UNLOCK(curproc);
3364
3365	vm_map_lock(map);
3366
3367	/* If addr is already mapped, no go */
3368	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3369		vm_map_unlock(map);
3370		return (KERN_NO_SPACE);
3371	}
3372
3373	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3374		if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3375			vm_map_unlock(map);
3376			return (KERN_NO_SPACE);
3377		}
3378	}
3379
3380	/* If we would blow our VMEM resource limit, no go */
3381	if (map->size + init_ssize > vmemlim) {
3382		vm_map_unlock(map);
3383		return (KERN_NO_SPACE);
3384	}
3385
3386	/*
3387	 * If we can't accomodate max_ssize in the current mapping, no go.
3388	 * However, we need to be aware that subsequent user mappings might
3389	 * map into the space we have reserved for stack, and currently this
3390	 * space is not protected.
3391	 *
3392	 * Hopefully we will at least detect this condition when we try to
3393	 * grow the stack.
3394	 */
3395	if ((prev_entry->next != &map->header) &&
3396	    (prev_entry->next->start < addrbos + max_ssize)) {
3397		vm_map_unlock(map);
3398		return (KERN_NO_SPACE);
3399	}
3400
3401	/*
3402	 * We initially map a stack of only init_ssize.  We will grow as
3403	 * needed later.  Depending on the orientation of the stack (i.e.
3404	 * the grow direction) we either map at the top of the range, the
3405	 * bottom of the range or in the middle.
3406	 *
3407	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3408	 * and cow to be 0.  Possibly we should eliminate these as input
3409	 * parameters, and just pass these values here in the insert call.
3410	 */
3411	if (orient == MAP_STACK_GROWS_DOWN)
3412		bot = addrbos + max_ssize - init_ssize;
3413	else if (orient == MAP_STACK_GROWS_UP)
3414		bot = addrbos;
3415	else
3416		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3417	top = bot + init_ssize;
3418	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3419
3420	/* Now set the avail_ssize amount. */
3421	if (rv == KERN_SUCCESS) {
3422		if (prev_entry != &map->header)
3423			vm_map_clip_end(map, prev_entry, bot);
3424		new_entry = prev_entry->next;
3425		if (new_entry->end != top || new_entry->start != bot)
3426			panic("Bad entry start/end for new stack entry");
3427
3428		new_entry->avail_ssize = max_ssize - init_ssize;
3429		if (orient & MAP_STACK_GROWS_DOWN)
3430			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3431		if (orient & MAP_STACK_GROWS_UP)
3432			new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3433	}
3434
3435	vm_map_unlock(map);
3436	return (rv);
3437}
3438
3439static int stack_guard_page = 0;
3440TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3441SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3442    &stack_guard_page, 0,
3443    "Insert stack guard page ahead of the growable segments.");
3444
3445/* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3446 * desired address is already mapped, or if we successfully grow
3447 * the stack.  Also returns KERN_SUCCESS if addr is outside the
3448 * stack range (this is strange, but preserves compatibility with
3449 * the grow function in vm_machdep.c).
3450 */
3451int
3452vm_map_growstack(struct proc *p, vm_offset_t addr)
3453{
3454	vm_map_entry_t next_entry, prev_entry;
3455	vm_map_entry_t new_entry, stack_entry;
3456	struct vmspace *vm = p->p_vmspace;
3457	vm_map_t map = &vm->vm_map;
3458	vm_offset_t end;
3459	vm_size_t growsize;
3460	size_t grow_amount, max_grow;
3461	rlim_t lmemlim, stacklim, vmemlim;
3462	int is_procstack, rv;
3463	struct ucred *cred;
3464#ifdef notyet
3465	uint64_t limit;
3466#endif
3467#ifdef RACCT
3468	int error;
3469#endif
3470
3471Retry:
3472	PROC_LOCK(p);
3473	lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3474	stacklim = lim_cur(p, RLIMIT_STACK);
3475	vmemlim = lim_cur(p, RLIMIT_VMEM);
3476	PROC_UNLOCK(p);
3477
3478	vm_map_lock_read(map);
3479
3480	/* If addr is already in the entry range, no need to grow.*/
3481	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3482		vm_map_unlock_read(map);
3483		return (KERN_SUCCESS);
3484	}
3485
3486	next_entry = prev_entry->next;
3487	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3488		/*
3489		 * This entry does not grow upwards. Since the address lies
3490		 * beyond this entry, the next entry (if one exists) has to
3491		 * be a downward growable entry. The entry list header is
3492		 * never a growable entry, so it suffices to check the flags.
3493		 */
3494		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3495			vm_map_unlock_read(map);
3496			return (KERN_SUCCESS);
3497		}
3498		stack_entry = next_entry;
3499	} else {
3500		/*
3501		 * This entry grows upward. If the next entry does not at
3502		 * least grow downwards, this is the entry we need to grow.
3503		 * otherwise we have two possible choices and we have to
3504		 * select one.
3505		 */
3506		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3507			/*
3508			 * We have two choices; grow the entry closest to
3509			 * the address to minimize the amount of growth.
3510			 */
3511			if (addr - prev_entry->end <= next_entry->start - addr)
3512				stack_entry = prev_entry;
3513			else
3514				stack_entry = next_entry;
3515		} else
3516			stack_entry = prev_entry;
3517	}
3518
3519	if (stack_entry == next_entry) {
3520		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3521		KASSERT(addr < stack_entry->start, ("foo"));
3522		end = (prev_entry != &map->header) ? prev_entry->end :
3523		    stack_entry->start - stack_entry->avail_ssize;
3524		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3525		max_grow = stack_entry->start - end;
3526	} else {
3527		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3528		KASSERT(addr >= stack_entry->end, ("foo"));
3529		end = (next_entry != &map->header) ? next_entry->start :
3530		    stack_entry->end + stack_entry->avail_ssize;
3531		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3532		max_grow = end - stack_entry->end;
3533	}
3534
3535	if (grow_amount > stack_entry->avail_ssize) {
3536		vm_map_unlock_read(map);
3537		return (KERN_NO_SPACE);
3538	}
3539
3540	/*
3541	 * If there is no longer enough space between the entries nogo, and
3542	 * adjust the available space.  Note: this  should only happen if the
3543	 * user has mapped into the stack area after the stack was created,
3544	 * and is probably an error.
3545	 *
3546	 * This also effectively destroys any guard page the user might have
3547	 * intended by limiting the stack size.
3548	 */
3549	if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3550		if (vm_map_lock_upgrade(map))
3551			goto Retry;
3552
3553		stack_entry->avail_ssize = max_grow;
3554
3555		vm_map_unlock(map);
3556		return (KERN_NO_SPACE);
3557	}
3558
3559	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3560
3561	/*
3562	 * If this is the main process stack, see if we're over the stack
3563	 * limit.
3564	 */
3565	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3566		vm_map_unlock_read(map);
3567		return (KERN_NO_SPACE);
3568	}
3569#ifdef RACCT
3570	PROC_LOCK(p);
3571	if (is_procstack &&
3572	    racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
3573		PROC_UNLOCK(p);
3574		vm_map_unlock_read(map);
3575		return (KERN_NO_SPACE);
3576	}
3577	PROC_UNLOCK(p);
3578#endif
3579
3580	/* Round up the grow amount modulo sgrowsiz */
3581	growsize = sgrowsiz;
3582	grow_amount = roundup(grow_amount, growsize);
3583	if (grow_amount > stack_entry->avail_ssize)
3584		grow_amount = stack_entry->avail_ssize;
3585	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3586		grow_amount = trunc_page((vm_size_t)stacklim) -
3587		    ctob(vm->vm_ssize);
3588	}
3589#ifdef notyet
3590	PROC_LOCK(p);
3591	limit = racct_get_available(p, RACCT_STACK);
3592	PROC_UNLOCK(p);
3593	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3594		grow_amount = limit - ctob(vm->vm_ssize);
3595#endif
3596	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3597		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3598			vm_map_unlock_read(map);
3599			rv = KERN_NO_SPACE;
3600			goto out;
3601		}
3602#ifdef RACCT
3603		PROC_LOCK(p);
3604		if (racct_set(p, RACCT_MEMLOCK,
3605		    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3606			PROC_UNLOCK(p);
3607			vm_map_unlock_read(map);
3608			rv = KERN_NO_SPACE;
3609			goto out;
3610		}
3611		PROC_UNLOCK(p);
3612#endif
3613	}
3614	/* If we would blow our VMEM resource limit, no go */
3615	if (map->size + grow_amount > vmemlim) {
3616		vm_map_unlock_read(map);
3617		rv = KERN_NO_SPACE;
3618		goto out;
3619	}
3620#ifdef RACCT
3621	PROC_LOCK(p);
3622	if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3623		PROC_UNLOCK(p);
3624		vm_map_unlock_read(map);
3625		rv = KERN_NO_SPACE;
3626		goto out;
3627	}
3628	PROC_UNLOCK(p);
3629#endif
3630
3631	if (vm_map_lock_upgrade(map))
3632		goto Retry;
3633
3634	if (stack_entry == next_entry) {
3635		/*
3636		 * Growing downward.
3637		 */
3638		/* Get the preliminary new entry start value */
3639		addr = stack_entry->start - grow_amount;
3640
3641		/*
3642		 * If this puts us into the previous entry, cut back our
3643		 * growth to the available space. Also, see the note above.
3644		 */
3645		if (addr < end) {
3646			stack_entry->avail_ssize = max_grow;
3647			addr = end;
3648			if (stack_guard_page)
3649				addr += PAGE_SIZE;
3650		}
3651
3652		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3653		    next_entry->protection, next_entry->max_protection, 0);
3654
3655		/* Adjust the available stack space by the amount we grew. */
3656		if (rv == KERN_SUCCESS) {
3657			if (prev_entry != &map->header)
3658				vm_map_clip_end(map, prev_entry, addr);
3659			new_entry = prev_entry->next;
3660			KASSERT(new_entry == stack_entry->prev, ("foo"));
3661			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3662			KASSERT(new_entry->start == addr, ("foo"));
3663			grow_amount = new_entry->end - new_entry->start;
3664			new_entry->avail_ssize = stack_entry->avail_ssize -
3665			    grow_amount;
3666			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3667			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3668		}
3669	} else {
3670		/*
3671		 * Growing upward.
3672		 */
3673		addr = stack_entry->end + grow_amount;
3674
3675		/*
3676		 * If this puts us into the next entry, cut back our growth
3677		 * to the available space. Also, see the note above.
3678		 */
3679		if (addr > end) {
3680			stack_entry->avail_ssize = end - stack_entry->end;
3681			addr = end;
3682			if (stack_guard_page)
3683				addr -= PAGE_SIZE;
3684		}
3685
3686		grow_amount = addr - stack_entry->end;
3687		cred = stack_entry->cred;
3688		if (cred == NULL && stack_entry->object.vm_object != NULL)
3689			cred = stack_entry->object.vm_object->cred;
3690		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3691			rv = KERN_NO_SPACE;
3692		/* Grow the underlying object if applicable. */
3693		else if (stack_entry->object.vm_object == NULL ||
3694			 vm_object_coalesce(stack_entry->object.vm_object,
3695			 stack_entry->offset,
3696			 (vm_size_t)(stack_entry->end - stack_entry->start),
3697			 (vm_size_t)grow_amount, cred != NULL)) {
3698			map->size += (addr - stack_entry->end);
3699			/* Update the current entry. */
3700			stack_entry->end = addr;
3701			stack_entry->avail_ssize -= grow_amount;
3702			vm_map_entry_resize_free(map, stack_entry);
3703			rv = KERN_SUCCESS;
3704
3705			if (next_entry != &map->header)
3706				vm_map_clip_start(map, next_entry, addr);
3707		} else
3708			rv = KERN_FAILURE;
3709	}
3710
3711	if (rv == KERN_SUCCESS && is_procstack)
3712		vm->vm_ssize += btoc(grow_amount);
3713
3714	vm_map_unlock(map);
3715
3716	/*
3717	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3718	 */
3719	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3720		vm_map_wire(map,
3721		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3722		    (stack_entry == next_entry) ? stack_entry->start : addr,
3723		    (p->p_flag & P_SYSTEM)
3724		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3725		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3726	}
3727
3728out:
3729#ifdef RACCT
3730	if (rv != KERN_SUCCESS) {
3731		PROC_LOCK(p);
3732		error = racct_set(p, RACCT_VMEM, map->size);
3733		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3734		if (!old_mlock) {
3735			error = racct_set(p, RACCT_MEMLOCK,
3736			    ptoa(pmap_wired_count(map->pmap)));
3737			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3738		}
3739	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3740		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3741		PROC_UNLOCK(p);
3742	}
3743#endif
3744
3745	return (rv);
3746}
3747
3748/*
3749 * Unshare the specified VM space for exec.  If other processes are
3750 * mapped to it, then create a new one.  The new vmspace is null.
3751 */
3752int
3753vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3754{
3755	struct vmspace *oldvmspace = p->p_vmspace;
3756	struct vmspace *newvmspace;
3757
3758	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3759	    ("vmspace_exec recursed"));
3760	newvmspace = vmspace_alloc(minuser, maxuser, NULL);
3761	if (newvmspace == NULL)
3762		return (ENOMEM);
3763	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3764	/*
3765	 * This code is written like this for prototype purposes.  The
3766	 * goal is to avoid running down the vmspace here, but let the
3767	 * other process's that are still using the vmspace to finally
3768	 * run it down.  Even though there is little or no chance of blocking
3769	 * here, it is a good idea to keep this form for future mods.
3770	 */
3771	PROC_VMSPACE_LOCK(p);
3772	p->p_vmspace = newvmspace;
3773	PROC_VMSPACE_UNLOCK(p);
3774	if (p == curthread->td_proc)
3775		pmap_activate(curthread);
3776	curthread->td_pflags |= TDP_EXECVMSPC;
3777	return (0);
3778}
3779
3780/*
3781 * Unshare the specified VM space for forcing COW.  This
3782 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3783 */
3784int
3785vmspace_unshare(struct proc *p)
3786{
3787	struct vmspace *oldvmspace = p->p_vmspace;
3788	struct vmspace *newvmspace;
3789	vm_ooffset_t fork_charge;
3790
3791	if (oldvmspace->vm_refcnt == 1)
3792		return (0);
3793	fork_charge = 0;
3794	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3795	if (newvmspace == NULL)
3796		return (ENOMEM);
3797	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3798		vmspace_free(newvmspace);
3799		return (ENOMEM);
3800	}
3801	PROC_VMSPACE_LOCK(p);
3802	p->p_vmspace = newvmspace;
3803	PROC_VMSPACE_UNLOCK(p);
3804	if (p == curthread->td_proc)
3805		pmap_activate(curthread);
3806	vmspace_free(oldvmspace);
3807	return (0);
3808}
3809
3810/*
3811 *	vm_map_lookup:
3812 *
3813 *	Finds the VM object, offset, and
3814 *	protection for a given virtual address in the
3815 *	specified map, assuming a page fault of the
3816 *	type specified.
3817 *
3818 *	Leaves the map in question locked for read; return
3819 *	values are guaranteed until a vm_map_lookup_done
3820 *	call is performed.  Note that the map argument
3821 *	is in/out; the returned map must be used in
3822 *	the call to vm_map_lookup_done.
3823 *
3824 *	A handle (out_entry) is returned for use in
3825 *	vm_map_lookup_done, to make that fast.
3826 *
3827 *	If a lookup is requested with "write protection"
3828 *	specified, the map may be changed to perform virtual
3829 *	copying operations, although the data referenced will
3830 *	remain the same.
3831 */
3832int
3833vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3834	      vm_offset_t vaddr,
3835	      vm_prot_t fault_typea,
3836	      vm_map_entry_t *out_entry,	/* OUT */
3837	      vm_object_t *object,		/* OUT */
3838	      vm_pindex_t *pindex,		/* OUT */
3839	      vm_prot_t *out_prot,		/* OUT */
3840	      boolean_t *wired)			/* OUT */
3841{
3842	vm_map_entry_t entry;
3843	vm_map_t map = *var_map;
3844	vm_prot_t prot;
3845	vm_prot_t fault_type = fault_typea;
3846	vm_object_t eobject;
3847	vm_size_t size;
3848	struct ucred *cred;
3849
3850RetryLookup:;
3851
3852	vm_map_lock_read(map);
3853
3854	/*
3855	 * Lookup the faulting address.
3856	 */
3857	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3858		vm_map_unlock_read(map);
3859		return (KERN_INVALID_ADDRESS);
3860	}
3861
3862	entry = *out_entry;
3863
3864	/*
3865	 * Handle submaps.
3866	 */
3867	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3868		vm_map_t old_map = map;
3869
3870		*var_map = map = entry->object.sub_map;
3871		vm_map_unlock_read(old_map);
3872		goto RetryLookup;
3873	}
3874
3875	/*
3876	 * Check whether this task is allowed to have this page.
3877	 */
3878	prot = entry->protection;
3879	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3880	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3881		vm_map_unlock_read(map);
3882		return (KERN_PROTECTION_FAILURE);
3883	}
3884	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3885	    (entry->eflags & MAP_ENTRY_COW) &&
3886	    (fault_type & VM_PROT_WRITE)) {
3887		vm_map_unlock_read(map);
3888		return (KERN_PROTECTION_FAILURE);
3889	}
3890	if ((fault_typea & VM_PROT_COPY) != 0 &&
3891	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
3892	    (entry->eflags & MAP_ENTRY_COW) == 0) {
3893		vm_map_unlock_read(map);
3894		return (KERN_PROTECTION_FAILURE);
3895	}
3896
3897	/*
3898	 * If this page is not pageable, we have to get it for all possible
3899	 * accesses.
3900	 */
3901	*wired = (entry->wired_count != 0);
3902	if (*wired)
3903		fault_type = entry->protection;
3904	size = entry->end - entry->start;
3905	/*
3906	 * If the entry was copy-on-write, we either ...
3907	 */
3908	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3909		/*
3910		 * If we want to write the page, we may as well handle that
3911		 * now since we've got the map locked.
3912		 *
3913		 * If we don't need to write the page, we just demote the
3914		 * permissions allowed.
3915		 */
3916		if ((fault_type & VM_PROT_WRITE) != 0 ||
3917		    (fault_typea & VM_PROT_COPY) != 0) {
3918			/*
3919			 * Make a new object, and place it in the object
3920			 * chain.  Note that no new references have appeared
3921			 * -- one just moved from the map to the new
3922			 * object.
3923			 */
3924			if (vm_map_lock_upgrade(map))
3925				goto RetryLookup;
3926
3927			if (entry->cred == NULL) {
3928				/*
3929				 * The debugger owner is charged for
3930				 * the memory.
3931				 */
3932				cred = curthread->td_ucred;
3933				crhold(cred);
3934				if (!swap_reserve_by_cred(size, cred)) {
3935					crfree(cred);
3936					vm_map_unlock(map);
3937					return (KERN_RESOURCE_SHORTAGE);
3938				}
3939				entry->cred = cred;
3940			}
3941			vm_object_shadow(&entry->object.vm_object,
3942			    &entry->offset, size);
3943			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3944			eobject = entry->object.vm_object;
3945			if (eobject->cred != NULL) {
3946				/*
3947				 * The object was not shadowed.
3948				 */
3949				swap_release_by_cred(size, entry->cred);
3950				crfree(entry->cred);
3951				entry->cred = NULL;
3952			} else if (entry->cred != NULL) {
3953				VM_OBJECT_WLOCK(eobject);
3954				eobject->cred = entry->cred;
3955				eobject->charge = size;
3956				VM_OBJECT_WUNLOCK(eobject);
3957				entry->cred = NULL;
3958			}
3959
3960			vm_map_lock_downgrade(map);
3961		} else {
3962			/*
3963			 * We're attempting to read a copy-on-write page --
3964			 * don't allow writes.
3965			 */
3966			prot &= ~VM_PROT_WRITE;
3967		}
3968	}
3969
3970	/*
3971	 * Create an object if necessary.
3972	 */
3973	if (entry->object.vm_object == NULL &&
3974	    !map->system_map) {
3975		if (vm_map_lock_upgrade(map))
3976			goto RetryLookup;
3977		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3978		    atop(size));
3979		entry->offset = 0;
3980		if (entry->cred != NULL) {
3981			VM_OBJECT_WLOCK(entry->object.vm_object);
3982			entry->object.vm_object->cred = entry->cred;
3983			entry->object.vm_object->charge = size;
3984			VM_OBJECT_WUNLOCK(entry->object.vm_object);
3985			entry->cred = NULL;
3986		}
3987		vm_map_lock_downgrade(map);
3988	}
3989
3990	/*
3991	 * Return the object/offset from this entry.  If the entry was
3992	 * copy-on-write or empty, it has been fixed up.
3993	 */
3994	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3995	*object = entry->object.vm_object;
3996
3997	*out_prot = prot;
3998	return (KERN_SUCCESS);
3999}
4000
4001/*
4002 *	vm_map_lookup_locked:
4003 *
4004 *	Lookup the faulting address.  A version of vm_map_lookup that returns
4005 *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4006 */
4007int
4008vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
4009		     vm_offset_t vaddr,
4010		     vm_prot_t fault_typea,
4011		     vm_map_entry_t *out_entry,	/* OUT */
4012		     vm_object_t *object,	/* OUT */
4013		     vm_pindex_t *pindex,	/* OUT */
4014		     vm_prot_t *out_prot,	/* OUT */
4015		     boolean_t *wired)		/* OUT */
4016{
4017	vm_map_entry_t entry;
4018	vm_map_t map = *var_map;
4019	vm_prot_t prot;
4020	vm_prot_t fault_type = fault_typea;
4021
4022	/*
4023	 * Lookup the faulting address.
4024	 */
4025	if (!vm_map_lookup_entry(map, vaddr, out_entry))
4026		return (KERN_INVALID_ADDRESS);
4027
4028	entry = *out_entry;
4029
4030	/*
4031	 * Fail if the entry refers to a submap.
4032	 */
4033	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4034		return (KERN_FAILURE);
4035
4036	/*
4037	 * Check whether this task is allowed to have this page.
4038	 */
4039	prot = entry->protection;
4040	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4041	if ((fault_type & prot) != fault_type)
4042		return (KERN_PROTECTION_FAILURE);
4043	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4044	    (entry->eflags & MAP_ENTRY_COW) &&
4045	    (fault_type & VM_PROT_WRITE))
4046		return (KERN_PROTECTION_FAILURE);
4047
4048	/*
4049	 * If this page is not pageable, we have to get it for all possible
4050	 * accesses.
4051	 */
4052	*wired = (entry->wired_count != 0);
4053	if (*wired)
4054		fault_type = entry->protection;
4055
4056	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4057		/*
4058		 * Fail if the entry was copy-on-write for a write fault.
4059		 */
4060		if (fault_type & VM_PROT_WRITE)
4061			return (KERN_FAILURE);
4062		/*
4063		 * We're attempting to read a copy-on-write page --
4064		 * don't allow writes.
4065		 */
4066		prot &= ~VM_PROT_WRITE;
4067	}
4068
4069	/*
4070	 * Fail if an object should be created.
4071	 */
4072	if (entry->object.vm_object == NULL && !map->system_map)
4073		return (KERN_FAILURE);
4074
4075	/*
4076	 * Return the object/offset from this entry.  If the entry was
4077	 * copy-on-write or empty, it has been fixed up.
4078	 */
4079	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4080	*object = entry->object.vm_object;
4081
4082	*out_prot = prot;
4083	return (KERN_SUCCESS);
4084}
4085
4086/*
4087 *	vm_map_lookup_done:
4088 *
4089 *	Releases locks acquired by a vm_map_lookup
4090 *	(according to the handle returned by that lookup).
4091 */
4092void
4093vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4094{
4095	/*
4096	 * Unlock the main-level map
4097	 */
4098	vm_map_unlock_read(map);
4099}
4100
4101#include "opt_ddb.h"
4102#ifdef DDB
4103#include <sys/kernel.h>
4104
4105#include <ddb/ddb.h>
4106
4107static void
4108vm_map_print(vm_map_t map)
4109{
4110	vm_map_entry_t entry;
4111
4112	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4113	    (void *)map,
4114	    (void *)map->pmap, map->nentries, map->timestamp);
4115
4116	db_indent += 2;
4117	for (entry = map->header.next; entry != &map->header;
4118	    entry = entry->next) {
4119		db_iprintf("map entry %p: start=%p, end=%p\n",
4120		    (void *)entry, (void *)entry->start, (void *)entry->end);
4121		{
4122			static char *inheritance_name[4] =
4123			{"share", "copy", "none", "donate_copy"};
4124
4125			db_iprintf(" prot=%x/%x/%s",
4126			    entry->protection,
4127			    entry->max_protection,
4128			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4129			if (entry->wired_count != 0)
4130				db_printf(", wired");
4131		}
4132		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4133			db_printf(", share=%p, offset=0x%jx\n",
4134			    (void *)entry->object.sub_map,
4135			    (uintmax_t)entry->offset);
4136			if ((entry->prev == &map->header) ||
4137			    (entry->prev->object.sub_map !=
4138				entry->object.sub_map)) {
4139				db_indent += 2;
4140				vm_map_print((vm_map_t)entry->object.sub_map);
4141				db_indent -= 2;
4142			}
4143		} else {
4144			if (entry->cred != NULL)
4145				db_printf(", ruid %d", entry->cred->cr_ruid);
4146			db_printf(", object=%p, offset=0x%jx",
4147			    (void *)entry->object.vm_object,
4148			    (uintmax_t)entry->offset);
4149			if (entry->object.vm_object && entry->object.vm_object->cred)
4150				db_printf(", obj ruid %d charge %jx",
4151				    entry->object.vm_object->cred->cr_ruid,
4152				    (uintmax_t)entry->object.vm_object->charge);
4153			if (entry->eflags & MAP_ENTRY_COW)
4154				db_printf(", copy (%s)",
4155				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4156			db_printf("\n");
4157
4158			if ((entry->prev == &map->header) ||
4159			    (entry->prev->object.vm_object !=
4160				entry->object.vm_object)) {
4161				db_indent += 2;
4162				vm_object_print((db_expr_t)(intptr_t)
4163						entry->object.vm_object,
4164						0, 0, (char *)0);
4165				db_indent -= 2;
4166			}
4167		}
4168	}
4169	db_indent -= 2;
4170}
4171
4172DB_SHOW_COMMAND(map, map)
4173{
4174
4175	if (!have_addr) {
4176		db_printf("usage: show map <addr>\n");
4177		return;
4178	}
4179	vm_map_print((vm_map_t)addr);
4180}
4181
4182DB_SHOW_COMMAND(procvm, procvm)
4183{
4184	struct proc *p;
4185
4186	if (have_addr) {
4187		p = (struct proc *) addr;
4188	} else {
4189		p = curproc;
4190	}
4191
4192	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4193	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4194	    (void *)vmspace_pmap(p->p_vmspace));
4195
4196	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4197}
4198
4199#endif /* DDB */
4200