vm_map.c revision 266589
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Virtual memory mapping module.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD: stable/10/sys/vm/vm_map.c 266589 2014-05-23 16:46:50Z alc $");
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/vmmeter.h>
76#include <sys/mman.h>
77#include <sys/vnode.h>
78#include <sys/racct.h>
79#include <sys/resourcevar.h>
80#include <sys/rwlock.h>
81#include <sys/file.h>
82#include <sys/sysctl.h>
83#include <sys/sysent.h>
84#include <sys/shm.h>
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_page.h>
91#include <vm/vm_object.h>
92#include <vm/vm_pager.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_extern.h>
95#include <vm/vnode_pager.h>
96#include <vm/swap_pager.h>
97#include <vm/uma.h>
98
99/*
100 *	Virtual memory maps provide for the mapping, protection,
101 *	and sharing of virtual memory objects.  In addition,
102 *	this module provides for an efficient virtual copy of
103 *	memory from one map to another.
104 *
105 *	Synchronization is required prior to most operations.
106 *
107 *	Maps consist of an ordered doubly-linked list of simple
108 *	entries; a self-adjusting binary search tree of these
109 *	entries is used to speed up lookups.
110 *
111 *	Since portions of maps are specified by start/end addresses,
112 *	which may not align with existing map entries, all
113 *	routines merely "clip" entries to these start/end values.
114 *	[That is, an entry is split into two, bordering at a
115 *	start or end value.]  Note that these clippings may not
116 *	always be necessary (as the two resulting entries are then
117 *	not changed); however, the clipping is done for convenience.
118 *
119 *	As mentioned above, virtual copy operations are performed
120 *	by copying VM object references from one map to
121 *	another, and then marking both regions as copy-on-write.
122 */
123
124static struct mtx map_sleep_mtx;
125static uma_zone_t mapentzone;
126static uma_zone_t kmapentzone;
127static uma_zone_t mapzone;
128static uma_zone_t vmspace_zone;
129static int vmspace_zinit(void *mem, int size, int flags);
130static int vm_map_zinit(void *mem, int ize, int flags);
131static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132    vm_offset_t max);
133static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135#ifdef INVARIANTS
136static void vm_map_zdtor(void *mem, int size, void *arg);
137static void vmspace_zdtor(void *mem, int size, void *arg);
138#endif
139
140#define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
141    ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
142     !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
143
144/*
145 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
146 * stable.
147 */
148#define PROC_VMSPACE_LOCK(p) do { } while (0)
149#define PROC_VMSPACE_UNLOCK(p) do { } while (0)
150
151/*
152 *	VM_MAP_RANGE_CHECK:	[ internal use only ]
153 *
154 *	Asserts that the starting and ending region
155 *	addresses fall within the valid range of the map.
156 */
157#define	VM_MAP_RANGE_CHECK(map, start, end)		\
158		{					\
159		if (start < vm_map_min(map))		\
160			start = vm_map_min(map);	\
161		if (end > vm_map_max(map))		\
162			end = vm_map_max(map);		\
163		if (start > end)			\
164			start = end;			\
165		}
166
167/*
168 *	vm_map_startup:
169 *
170 *	Initialize the vm_map module.  Must be called before
171 *	any other vm_map routines.
172 *
173 *	Map and entry structures are allocated from the general
174 *	purpose memory pool with some exceptions:
175 *
176 *	- The kernel map and kmem submap are allocated statically.
177 *	- Kernel map entries are allocated out of a static pool.
178 *
179 *	These restrictions are necessary since malloc() uses the
180 *	maps and requires map entries.
181 */
182
183void
184vm_map_startup(void)
185{
186	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
187	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
188#ifdef INVARIANTS
189	    vm_map_zdtor,
190#else
191	    NULL,
192#endif
193	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
194	uma_prealloc(mapzone, MAX_KMAP);
195	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
196	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
197	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
198	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
199	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
200	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
201#ifdef INVARIANTS
202	    vmspace_zdtor,
203#else
204	    NULL,
205#endif
206	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
207}
208
209static int
210vmspace_zinit(void *mem, int size, int flags)
211{
212	struct vmspace *vm;
213
214	vm = (struct vmspace *)mem;
215
216	vm->vm_map.pmap = NULL;
217	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
218	PMAP_LOCK_INIT(vmspace_pmap(vm));
219	return (0);
220}
221
222static int
223vm_map_zinit(void *mem, int size, int flags)
224{
225	vm_map_t map;
226
227	map = (vm_map_t)mem;
228	memset(map, 0, sizeof(*map));
229	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
230	sx_init(&map->lock, "vm map (user)");
231	return (0);
232}
233
234#ifdef INVARIANTS
235static void
236vmspace_zdtor(void *mem, int size, void *arg)
237{
238	struct vmspace *vm;
239
240	vm = (struct vmspace *)mem;
241
242	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
243}
244static void
245vm_map_zdtor(void *mem, int size, void *arg)
246{
247	vm_map_t map;
248
249	map = (vm_map_t)mem;
250	KASSERT(map->nentries == 0,
251	    ("map %p nentries == %d on free.",
252	    map, map->nentries));
253	KASSERT(map->size == 0,
254	    ("map %p size == %lu on free.",
255	    map, (unsigned long)map->size));
256}
257#endif	/* INVARIANTS */
258
259/*
260 * Allocate a vmspace structure, including a vm_map and pmap,
261 * and initialize those structures.  The refcnt is set to 1.
262 *
263 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
264 */
265struct vmspace *
266vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
267{
268	struct vmspace *vm;
269
270	vm = uma_zalloc(vmspace_zone, M_WAITOK);
271
272	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
273
274	if (pinit == NULL)
275		pinit = &pmap_pinit;
276
277	if (!pinit(vmspace_pmap(vm))) {
278		uma_zfree(vmspace_zone, vm);
279		return (NULL);
280	}
281	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
282	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
283	vm->vm_refcnt = 1;
284	vm->vm_shm = NULL;
285	vm->vm_swrss = 0;
286	vm->vm_tsize = 0;
287	vm->vm_dsize = 0;
288	vm->vm_ssize = 0;
289	vm->vm_taddr = 0;
290	vm->vm_daddr = 0;
291	vm->vm_maxsaddr = 0;
292	return (vm);
293}
294
295static void
296vmspace_container_reset(struct proc *p)
297{
298
299#ifdef RACCT
300	PROC_LOCK(p);
301	racct_set(p, RACCT_DATA, 0);
302	racct_set(p, RACCT_STACK, 0);
303	racct_set(p, RACCT_RSS, 0);
304	racct_set(p, RACCT_MEMLOCK, 0);
305	racct_set(p, RACCT_VMEM, 0);
306	PROC_UNLOCK(p);
307#endif
308}
309
310static inline void
311vmspace_dofree(struct vmspace *vm)
312{
313
314	CTR1(KTR_VM, "vmspace_free: %p", vm);
315
316	/*
317	 * Make sure any SysV shm is freed, it might not have been in
318	 * exit1().
319	 */
320	shmexit(vm);
321
322	/*
323	 * Lock the map, to wait out all other references to it.
324	 * Delete all of the mappings and pages they hold, then call
325	 * the pmap module to reclaim anything left.
326	 */
327	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
328	    vm->vm_map.max_offset);
329
330	pmap_release(vmspace_pmap(vm));
331	vm->vm_map.pmap = NULL;
332	uma_zfree(vmspace_zone, vm);
333}
334
335void
336vmspace_free(struct vmspace *vm)
337{
338
339	if (vm->vm_refcnt == 0)
340		panic("vmspace_free: attempt to free already freed vmspace");
341
342	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
343		vmspace_dofree(vm);
344}
345
346void
347vmspace_exitfree(struct proc *p)
348{
349	struct vmspace *vm;
350
351	PROC_VMSPACE_LOCK(p);
352	vm = p->p_vmspace;
353	p->p_vmspace = NULL;
354	PROC_VMSPACE_UNLOCK(p);
355	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
356	vmspace_free(vm);
357}
358
359void
360vmspace_exit(struct thread *td)
361{
362	int refcnt;
363	struct vmspace *vm;
364	struct proc *p;
365
366	/*
367	 * Release user portion of address space.
368	 * This releases references to vnodes,
369	 * which could cause I/O if the file has been unlinked.
370	 * Need to do this early enough that we can still sleep.
371	 *
372	 * The last exiting process to reach this point releases as
373	 * much of the environment as it can. vmspace_dofree() is the
374	 * slower fallback in case another process had a temporary
375	 * reference to the vmspace.
376	 */
377
378	p = td->td_proc;
379	vm = p->p_vmspace;
380	atomic_add_int(&vmspace0.vm_refcnt, 1);
381	do {
382		refcnt = vm->vm_refcnt;
383		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
384			/* Switch now since other proc might free vmspace */
385			PROC_VMSPACE_LOCK(p);
386			p->p_vmspace = &vmspace0;
387			PROC_VMSPACE_UNLOCK(p);
388			pmap_activate(td);
389		}
390	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
391	if (refcnt == 1) {
392		if (p->p_vmspace != vm) {
393			/* vmspace not yet freed, switch back */
394			PROC_VMSPACE_LOCK(p);
395			p->p_vmspace = vm;
396			PROC_VMSPACE_UNLOCK(p);
397			pmap_activate(td);
398		}
399		pmap_remove_pages(vmspace_pmap(vm));
400		/* Switch now since this proc will free vmspace */
401		PROC_VMSPACE_LOCK(p);
402		p->p_vmspace = &vmspace0;
403		PROC_VMSPACE_UNLOCK(p);
404		pmap_activate(td);
405		vmspace_dofree(vm);
406	}
407	vmspace_container_reset(p);
408}
409
410/* Acquire reference to vmspace owned by another process. */
411
412struct vmspace *
413vmspace_acquire_ref(struct proc *p)
414{
415	struct vmspace *vm;
416	int refcnt;
417
418	PROC_VMSPACE_LOCK(p);
419	vm = p->p_vmspace;
420	if (vm == NULL) {
421		PROC_VMSPACE_UNLOCK(p);
422		return (NULL);
423	}
424	do {
425		refcnt = vm->vm_refcnt;
426		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
427			PROC_VMSPACE_UNLOCK(p);
428			return (NULL);
429		}
430	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
431	if (vm != p->p_vmspace) {
432		PROC_VMSPACE_UNLOCK(p);
433		vmspace_free(vm);
434		return (NULL);
435	}
436	PROC_VMSPACE_UNLOCK(p);
437	return (vm);
438}
439
440void
441_vm_map_lock(vm_map_t map, const char *file, int line)
442{
443
444	if (map->system_map)
445		mtx_lock_flags_(&map->system_mtx, 0, file, line);
446	else
447		sx_xlock_(&map->lock, file, line);
448	map->timestamp++;
449}
450
451static void
452vm_map_process_deferred(void)
453{
454	struct thread *td;
455	vm_map_entry_t entry, next;
456	vm_object_t object;
457
458	td = curthread;
459	entry = td->td_map_def_user;
460	td->td_map_def_user = NULL;
461	while (entry != NULL) {
462		next = entry->next;
463		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
464			/*
465			 * Decrement the object's writemappings and
466			 * possibly the vnode's v_writecount.
467			 */
468			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
469			    ("Submap with writecount"));
470			object = entry->object.vm_object;
471			KASSERT(object != NULL, ("No object for writecount"));
472			vnode_pager_release_writecount(object, entry->start,
473			    entry->end);
474		}
475		vm_map_entry_deallocate(entry, FALSE);
476		entry = next;
477	}
478}
479
480void
481_vm_map_unlock(vm_map_t map, const char *file, int line)
482{
483
484	if (map->system_map)
485		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
486	else {
487		sx_xunlock_(&map->lock, file, line);
488		vm_map_process_deferred();
489	}
490}
491
492void
493_vm_map_lock_read(vm_map_t map, const char *file, int line)
494{
495
496	if (map->system_map)
497		mtx_lock_flags_(&map->system_mtx, 0, file, line);
498	else
499		sx_slock_(&map->lock, file, line);
500}
501
502void
503_vm_map_unlock_read(vm_map_t map, const char *file, int line)
504{
505
506	if (map->system_map)
507		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
508	else {
509		sx_sunlock_(&map->lock, file, line);
510		vm_map_process_deferred();
511	}
512}
513
514int
515_vm_map_trylock(vm_map_t map, const char *file, int line)
516{
517	int error;
518
519	error = map->system_map ?
520	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
521	    !sx_try_xlock_(&map->lock, file, line);
522	if (error == 0)
523		map->timestamp++;
524	return (error == 0);
525}
526
527int
528_vm_map_trylock_read(vm_map_t map, const char *file, int line)
529{
530	int error;
531
532	error = map->system_map ?
533	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
534	    !sx_try_slock_(&map->lock, file, line);
535	return (error == 0);
536}
537
538/*
539 *	_vm_map_lock_upgrade:	[ internal use only ]
540 *
541 *	Tries to upgrade a read (shared) lock on the specified map to a write
542 *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
543 *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
544 *	returned without a read or write lock held.
545 *
546 *	Requires that the map be read locked.
547 */
548int
549_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
550{
551	unsigned int last_timestamp;
552
553	if (map->system_map) {
554		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
555	} else {
556		if (!sx_try_upgrade_(&map->lock, file, line)) {
557			last_timestamp = map->timestamp;
558			sx_sunlock_(&map->lock, file, line);
559			vm_map_process_deferred();
560			/*
561			 * If the map's timestamp does not change while the
562			 * map is unlocked, then the upgrade succeeds.
563			 */
564			sx_xlock_(&map->lock, file, line);
565			if (last_timestamp != map->timestamp) {
566				sx_xunlock_(&map->lock, file, line);
567				return (1);
568			}
569		}
570	}
571	map->timestamp++;
572	return (0);
573}
574
575void
576_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
577{
578
579	if (map->system_map) {
580		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
581	} else
582		sx_downgrade_(&map->lock, file, line);
583}
584
585/*
586 *	vm_map_locked:
587 *
588 *	Returns a non-zero value if the caller holds a write (exclusive) lock
589 *	on the specified map and the value "0" otherwise.
590 */
591int
592vm_map_locked(vm_map_t map)
593{
594
595	if (map->system_map)
596		return (mtx_owned(&map->system_mtx));
597	else
598		return (sx_xlocked(&map->lock));
599}
600
601#ifdef INVARIANTS
602static void
603_vm_map_assert_locked(vm_map_t map, const char *file, int line)
604{
605
606	if (map->system_map)
607		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
608	else
609		sx_assert_(&map->lock, SA_XLOCKED, file, line);
610}
611
612#define	VM_MAP_ASSERT_LOCKED(map) \
613    _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
614#else
615#define	VM_MAP_ASSERT_LOCKED(map)
616#endif
617
618/*
619 *	_vm_map_unlock_and_wait:
620 *
621 *	Atomically releases the lock on the specified map and puts the calling
622 *	thread to sleep.  The calling thread will remain asleep until either
623 *	vm_map_wakeup() is performed on the map or the specified timeout is
624 *	exceeded.
625 *
626 *	WARNING!  This function does not perform deferred deallocations of
627 *	objects and map	entries.  Therefore, the calling thread is expected to
628 *	reacquire the map lock after reawakening and later perform an ordinary
629 *	unlock operation, such as vm_map_unlock(), before completing its
630 *	operation on the map.
631 */
632int
633_vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
634{
635
636	mtx_lock(&map_sleep_mtx);
637	if (map->system_map)
638		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
639	else
640		sx_xunlock_(&map->lock, file, line);
641	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
642	    timo));
643}
644
645/*
646 *	vm_map_wakeup:
647 *
648 *	Awaken any threads that have slept on the map using
649 *	vm_map_unlock_and_wait().
650 */
651void
652vm_map_wakeup(vm_map_t map)
653{
654
655	/*
656	 * Acquire and release map_sleep_mtx to prevent a wakeup()
657	 * from being performed (and lost) between the map unlock
658	 * and the msleep() in _vm_map_unlock_and_wait().
659	 */
660	mtx_lock(&map_sleep_mtx);
661	mtx_unlock(&map_sleep_mtx);
662	wakeup(&map->root);
663}
664
665void
666vm_map_busy(vm_map_t map)
667{
668
669	VM_MAP_ASSERT_LOCKED(map);
670	map->busy++;
671}
672
673void
674vm_map_unbusy(vm_map_t map)
675{
676
677	VM_MAP_ASSERT_LOCKED(map);
678	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
679	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
680		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
681		wakeup(&map->busy);
682	}
683}
684
685void
686vm_map_wait_busy(vm_map_t map)
687{
688
689	VM_MAP_ASSERT_LOCKED(map);
690	while (map->busy) {
691		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
692		if (map->system_map)
693			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
694		else
695			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
696	}
697	map->timestamp++;
698}
699
700long
701vmspace_resident_count(struct vmspace *vmspace)
702{
703	return pmap_resident_count(vmspace_pmap(vmspace));
704}
705
706/*
707 *	vm_map_create:
708 *
709 *	Creates and returns a new empty VM map with
710 *	the given physical map structure, and having
711 *	the given lower and upper address bounds.
712 */
713vm_map_t
714vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
715{
716	vm_map_t result;
717
718	result = uma_zalloc(mapzone, M_WAITOK);
719	CTR1(KTR_VM, "vm_map_create: %p", result);
720	_vm_map_init(result, pmap, min, max);
721	return (result);
722}
723
724/*
725 * Initialize an existing vm_map structure
726 * such as that in the vmspace structure.
727 */
728static void
729_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
730{
731
732	map->header.next = map->header.prev = &map->header;
733	map->needs_wakeup = FALSE;
734	map->system_map = 0;
735	map->pmap = pmap;
736	map->min_offset = min;
737	map->max_offset = max;
738	map->flags = 0;
739	map->root = NULL;
740	map->timestamp = 0;
741	map->busy = 0;
742}
743
744void
745vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
746{
747
748	_vm_map_init(map, pmap, min, max);
749	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
750	sx_init(&map->lock, "user map");
751}
752
753/*
754 *	vm_map_entry_dispose:	[ internal use only ]
755 *
756 *	Inverse of vm_map_entry_create.
757 */
758static void
759vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
760{
761	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
762}
763
764/*
765 *	vm_map_entry_create:	[ internal use only ]
766 *
767 *	Allocates a VM map entry for insertion.
768 *	No entry fields are filled in.
769 */
770static vm_map_entry_t
771vm_map_entry_create(vm_map_t map)
772{
773	vm_map_entry_t new_entry;
774
775	if (map->system_map)
776		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
777	else
778		new_entry = uma_zalloc(mapentzone, M_WAITOK);
779	if (new_entry == NULL)
780		panic("vm_map_entry_create: kernel resources exhausted");
781	return (new_entry);
782}
783
784/*
785 *	vm_map_entry_set_behavior:
786 *
787 *	Set the expected access behavior, either normal, random, or
788 *	sequential.
789 */
790static inline void
791vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
792{
793	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
794	    (behavior & MAP_ENTRY_BEHAV_MASK);
795}
796
797/*
798 *	vm_map_entry_set_max_free:
799 *
800 *	Set the max_free field in a vm_map_entry.
801 */
802static inline void
803vm_map_entry_set_max_free(vm_map_entry_t entry)
804{
805
806	entry->max_free = entry->adj_free;
807	if (entry->left != NULL && entry->left->max_free > entry->max_free)
808		entry->max_free = entry->left->max_free;
809	if (entry->right != NULL && entry->right->max_free > entry->max_free)
810		entry->max_free = entry->right->max_free;
811}
812
813/*
814 *	vm_map_entry_splay:
815 *
816 *	The Sleator and Tarjan top-down splay algorithm with the
817 *	following variation.  Max_free must be computed bottom-up, so
818 *	on the downward pass, maintain the left and right spines in
819 *	reverse order.  Then, make a second pass up each side to fix
820 *	the pointers and compute max_free.  The time bound is O(log n)
821 *	amortized.
822 *
823 *	The new root is the vm_map_entry containing "addr", or else an
824 *	adjacent entry (lower or higher) if addr is not in the tree.
825 *
826 *	The map must be locked, and leaves it so.
827 *
828 *	Returns: the new root.
829 */
830static vm_map_entry_t
831vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
832{
833	vm_map_entry_t llist, rlist;
834	vm_map_entry_t ltree, rtree;
835	vm_map_entry_t y;
836
837	/* Special case of empty tree. */
838	if (root == NULL)
839		return (root);
840
841	/*
842	 * Pass One: Splay down the tree until we find addr or a NULL
843	 * pointer where addr would go.  llist and rlist are the two
844	 * sides in reverse order (bottom-up), with llist linked by
845	 * the right pointer and rlist linked by the left pointer in
846	 * the vm_map_entry.  Wait until Pass Two to set max_free on
847	 * the two spines.
848	 */
849	llist = NULL;
850	rlist = NULL;
851	for (;;) {
852		/* root is never NULL in here. */
853		if (addr < root->start) {
854			y = root->left;
855			if (y == NULL)
856				break;
857			if (addr < y->start && y->left != NULL) {
858				/* Rotate right and put y on rlist. */
859				root->left = y->right;
860				y->right = root;
861				vm_map_entry_set_max_free(root);
862				root = y->left;
863				y->left = rlist;
864				rlist = y;
865			} else {
866				/* Put root on rlist. */
867				root->left = rlist;
868				rlist = root;
869				root = y;
870			}
871		} else if (addr >= root->end) {
872			y = root->right;
873			if (y == NULL)
874				break;
875			if (addr >= y->end && y->right != NULL) {
876				/* Rotate left and put y on llist. */
877				root->right = y->left;
878				y->left = root;
879				vm_map_entry_set_max_free(root);
880				root = y->right;
881				y->right = llist;
882				llist = y;
883			} else {
884				/* Put root on llist. */
885				root->right = llist;
886				llist = root;
887				root = y;
888			}
889		} else
890			break;
891	}
892
893	/*
894	 * Pass Two: Walk back up the two spines, flip the pointers
895	 * and set max_free.  The subtrees of the root go at the
896	 * bottom of llist and rlist.
897	 */
898	ltree = root->left;
899	while (llist != NULL) {
900		y = llist->right;
901		llist->right = ltree;
902		vm_map_entry_set_max_free(llist);
903		ltree = llist;
904		llist = y;
905	}
906	rtree = root->right;
907	while (rlist != NULL) {
908		y = rlist->left;
909		rlist->left = rtree;
910		vm_map_entry_set_max_free(rlist);
911		rtree = rlist;
912		rlist = y;
913	}
914
915	/*
916	 * Final assembly: add ltree and rtree as subtrees of root.
917	 */
918	root->left = ltree;
919	root->right = rtree;
920	vm_map_entry_set_max_free(root);
921
922	return (root);
923}
924
925/*
926 *	vm_map_entry_{un,}link:
927 *
928 *	Insert/remove entries from maps.
929 */
930static void
931vm_map_entry_link(vm_map_t map,
932		  vm_map_entry_t after_where,
933		  vm_map_entry_t entry)
934{
935
936	CTR4(KTR_VM,
937	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
938	    map->nentries, entry, after_where);
939	VM_MAP_ASSERT_LOCKED(map);
940	map->nentries++;
941	entry->prev = after_where;
942	entry->next = after_where->next;
943	entry->next->prev = entry;
944	after_where->next = entry;
945
946	if (after_where != &map->header) {
947		if (after_where != map->root)
948			vm_map_entry_splay(after_where->start, map->root);
949		entry->right = after_where->right;
950		entry->left = after_where;
951		after_where->right = NULL;
952		after_where->adj_free = entry->start - after_where->end;
953		vm_map_entry_set_max_free(after_where);
954	} else {
955		entry->right = map->root;
956		entry->left = NULL;
957	}
958	entry->adj_free = (entry->next == &map->header ? map->max_offset :
959	    entry->next->start) - entry->end;
960	vm_map_entry_set_max_free(entry);
961	map->root = entry;
962}
963
964static void
965vm_map_entry_unlink(vm_map_t map,
966		    vm_map_entry_t entry)
967{
968	vm_map_entry_t next, prev, root;
969
970	VM_MAP_ASSERT_LOCKED(map);
971	if (entry != map->root)
972		vm_map_entry_splay(entry->start, map->root);
973	if (entry->left == NULL)
974		root = entry->right;
975	else {
976		root = vm_map_entry_splay(entry->start, entry->left);
977		root->right = entry->right;
978		root->adj_free = (entry->next == &map->header ? map->max_offset :
979		    entry->next->start) - root->end;
980		vm_map_entry_set_max_free(root);
981	}
982	map->root = root;
983
984	prev = entry->prev;
985	next = entry->next;
986	next->prev = prev;
987	prev->next = next;
988	map->nentries--;
989	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
990	    map->nentries, entry);
991}
992
993/*
994 *	vm_map_entry_resize_free:
995 *
996 *	Recompute the amount of free space following a vm_map_entry
997 *	and propagate that value up the tree.  Call this function after
998 *	resizing a map entry in-place, that is, without a call to
999 *	vm_map_entry_link() or _unlink().
1000 *
1001 *	The map must be locked, and leaves it so.
1002 */
1003static void
1004vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1005{
1006
1007	/*
1008	 * Using splay trees without parent pointers, propagating
1009	 * max_free up the tree is done by moving the entry to the
1010	 * root and making the change there.
1011	 */
1012	if (entry != map->root)
1013		map->root = vm_map_entry_splay(entry->start, map->root);
1014
1015	entry->adj_free = (entry->next == &map->header ? map->max_offset :
1016	    entry->next->start) - entry->end;
1017	vm_map_entry_set_max_free(entry);
1018}
1019
1020/*
1021 *	vm_map_lookup_entry:	[ internal use only ]
1022 *
1023 *	Finds the map entry containing (or
1024 *	immediately preceding) the specified address
1025 *	in the given map; the entry is returned
1026 *	in the "entry" parameter.  The boolean
1027 *	result indicates whether the address is
1028 *	actually contained in the map.
1029 */
1030boolean_t
1031vm_map_lookup_entry(
1032	vm_map_t map,
1033	vm_offset_t address,
1034	vm_map_entry_t *entry)	/* OUT */
1035{
1036	vm_map_entry_t cur;
1037	boolean_t locked;
1038
1039	/*
1040	 * If the map is empty, then the map entry immediately preceding
1041	 * "address" is the map's header.
1042	 */
1043	cur = map->root;
1044	if (cur == NULL)
1045		*entry = &map->header;
1046	else if (address >= cur->start && cur->end > address) {
1047		*entry = cur;
1048		return (TRUE);
1049	} else if ((locked = vm_map_locked(map)) ||
1050	    sx_try_upgrade(&map->lock)) {
1051		/*
1052		 * Splay requires a write lock on the map.  However, it only
1053		 * restructures the binary search tree; it does not otherwise
1054		 * change the map.  Thus, the map's timestamp need not change
1055		 * on a temporary upgrade.
1056		 */
1057		map->root = cur = vm_map_entry_splay(address, cur);
1058		if (!locked)
1059			sx_downgrade(&map->lock);
1060
1061		/*
1062		 * If "address" is contained within a map entry, the new root
1063		 * is that map entry.  Otherwise, the new root is a map entry
1064		 * immediately before or after "address".
1065		 */
1066		if (address >= cur->start) {
1067			*entry = cur;
1068			if (cur->end > address)
1069				return (TRUE);
1070		} else
1071			*entry = cur->prev;
1072	} else
1073		/*
1074		 * Since the map is only locked for read access, perform a
1075		 * standard binary search tree lookup for "address".
1076		 */
1077		for (;;) {
1078			if (address < cur->start) {
1079				if (cur->left == NULL) {
1080					*entry = cur->prev;
1081					break;
1082				}
1083				cur = cur->left;
1084			} else if (cur->end > address) {
1085				*entry = cur;
1086				return (TRUE);
1087			} else {
1088				if (cur->right == NULL) {
1089					*entry = cur;
1090					break;
1091				}
1092				cur = cur->right;
1093			}
1094		}
1095	return (FALSE);
1096}
1097
1098/*
1099 *	vm_map_insert:
1100 *
1101 *	Inserts the given whole VM object into the target
1102 *	map at the specified address range.  The object's
1103 *	size should match that of the address range.
1104 *
1105 *	Requires that the map be locked, and leaves it so.
1106 *
1107 *	If object is non-NULL, ref count must be bumped by caller
1108 *	prior to making call to account for the new entry.
1109 */
1110int
1111vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1112	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1113	      int cow)
1114{
1115	vm_map_entry_t new_entry;
1116	vm_map_entry_t prev_entry;
1117	vm_map_entry_t temp_entry;
1118	vm_eflags_t protoeflags;
1119	struct ucred *cred;
1120	vm_inherit_t inheritance;
1121	boolean_t charge_prev_obj;
1122
1123	VM_MAP_ASSERT_LOCKED(map);
1124
1125	/*
1126	 * Check that the start and end points are not bogus.
1127	 */
1128	if ((start < map->min_offset) || (end > map->max_offset) ||
1129	    (start >= end))
1130		return (KERN_INVALID_ADDRESS);
1131
1132	/*
1133	 * Find the entry prior to the proposed starting address; if it's part
1134	 * of an existing entry, this range is bogus.
1135	 */
1136	if (vm_map_lookup_entry(map, start, &temp_entry))
1137		return (KERN_NO_SPACE);
1138
1139	prev_entry = temp_entry;
1140
1141	/*
1142	 * Assert that the next entry doesn't overlap the end point.
1143	 */
1144	if ((prev_entry->next != &map->header) &&
1145	    (prev_entry->next->start < end))
1146		return (KERN_NO_SPACE);
1147
1148	protoeflags = 0;
1149	charge_prev_obj = FALSE;
1150
1151	if (cow & MAP_COPY_ON_WRITE)
1152		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1153
1154	if (cow & MAP_NOFAULT) {
1155		protoeflags |= MAP_ENTRY_NOFAULT;
1156
1157		KASSERT(object == NULL,
1158			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1159	}
1160	if (cow & MAP_DISABLE_SYNCER)
1161		protoeflags |= MAP_ENTRY_NOSYNC;
1162	if (cow & MAP_DISABLE_COREDUMP)
1163		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1164	if (cow & MAP_VN_WRITECOUNT)
1165		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1166	if (cow & MAP_INHERIT_SHARE)
1167		inheritance = VM_INHERIT_SHARE;
1168	else
1169		inheritance = VM_INHERIT_DEFAULT;
1170
1171	cred = NULL;
1172	KASSERT((object != kmem_object && object != kernel_object) ||
1173	    ((object == kmem_object || object == kernel_object) &&
1174		!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1175	    ("kmem or kernel object and cow"));
1176	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1177		goto charged;
1178	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1179	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1180		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1181			return (KERN_RESOURCE_SHORTAGE);
1182		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1183		    object->cred == NULL,
1184		    ("OVERCOMMIT: vm_map_insert o %p", object));
1185		cred = curthread->td_ucred;
1186		crhold(cred);
1187		if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1188			charge_prev_obj = TRUE;
1189	}
1190
1191charged:
1192	/* Expand the kernel pmap, if necessary. */
1193	if (map == kernel_map && end > kernel_vm_end)
1194		pmap_growkernel(end);
1195	if (object != NULL) {
1196		/*
1197		 * OBJ_ONEMAPPING must be cleared unless this mapping
1198		 * is trivially proven to be the only mapping for any
1199		 * of the object's pages.  (Object granularity
1200		 * reference counting is insufficient to recognize
1201		 * aliases with precision.)
1202		 */
1203		VM_OBJECT_WLOCK(object);
1204		if (object->ref_count > 1 || object->shadow_count != 0)
1205			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1206		VM_OBJECT_WUNLOCK(object);
1207	}
1208	else if ((prev_entry != &map->header) &&
1209		 (prev_entry->eflags == protoeflags) &&
1210		 (cow & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) == 0 &&
1211		 (prev_entry->end == start) &&
1212		 (prev_entry->wired_count == 0) &&
1213		 (prev_entry->cred == cred ||
1214		  (prev_entry->object.vm_object != NULL &&
1215		   (prev_entry->object.vm_object->cred == cred))) &&
1216		   vm_object_coalesce(prev_entry->object.vm_object,
1217		       prev_entry->offset,
1218		       (vm_size_t)(prev_entry->end - prev_entry->start),
1219		       (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1220		/*
1221		 * We were able to extend the object.  Determine if we
1222		 * can extend the previous map entry to include the
1223		 * new range as well.
1224		 */
1225		if ((prev_entry->inheritance == inheritance) &&
1226		    (prev_entry->protection == prot) &&
1227		    (prev_entry->max_protection == max)) {
1228			map->size += (end - prev_entry->end);
1229			prev_entry->end = end;
1230			vm_map_entry_resize_free(map, prev_entry);
1231			vm_map_simplify_entry(map, prev_entry);
1232			if (cred != NULL)
1233				crfree(cred);
1234			return (KERN_SUCCESS);
1235		}
1236
1237		/*
1238		 * If we can extend the object but cannot extend the
1239		 * map entry, we have to create a new map entry.  We
1240		 * must bump the ref count on the extended object to
1241		 * account for it.  object may be NULL.
1242		 */
1243		object = prev_entry->object.vm_object;
1244		offset = prev_entry->offset +
1245			(prev_entry->end - prev_entry->start);
1246		vm_object_reference(object);
1247		if (cred != NULL && object != NULL && object->cred != NULL &&
1248		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1249			/* Object already accounts for this uid. */
1250			crfree(cred);
1251			cred = NULL;
1252		}
1253	}
1254
1255	/*
1256	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1257	 * in things like the buffer map where we manage kva but do not manage
1258	 * backing objects.
1259	 */
1260
1261	/*
1262	 * Create a new entry
1263	 */
1264	new_entry = vm_map_entry_create(map);
1265	new_entry->start = start;
1266	new_entry->end = end;
1267	new_entry->cred = NULL;
1268
1269	new_entry->eflags = protoeflags;
1270	new_entry->object.vm_object = object;
1271	new_entry->offset = offset;
1272	new_entry->avail_ssize = 0;
1273
1274	new_entry->inheritance = inheritance;
1275	new_entry->protection = prot;
1276	new_entry->max_protection = max;
1277	new_entry->wired_count = 0;
1278	new_entry->wiring_thread = NULL;
1279	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1280	new_entry->next_read = OFF_TO_IDX(offset);
1281
1282	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1283	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1284	new_entry->cred = cred;
1285
1286	/*
1287	 * Insert the new entry into the list
1288	 */
1289	vm_map_entry_link(map, prev_entry, new_entry);
1290	map->size += new_entry->end - new_entry->start;
1291
1292	/*
1293	 * It may be possible to merge the new entry with the next and/or
1294	 * previous entries.  However, due to MAP_STACK_* being a hack, a
1295	 * panic can result from merging such entries.
1296	 */
1297	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1298		vm_map_simplify_entry(map, new_entry);
1299
1300	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1301		vm_map_pmap_enter(map, start, prot,
1302				    object, OFF_TO_IDX(offset), end - start,
1303				    cow & MAP_PREFAULT_PARTIAL);
1304	}
1305
1306	return (KERN_SUCCESS);
1307}
1308
1309/*
1310 *	vm_map_findspace:
1311 *
1312 *	Find the first fit (lowest VM address) for "length" free bytes
1313 *	beginning at address >= start in the given map.
1314 *
1315 *	In a vm_map_entry, "adj_free" is the amount of free space
1316 *	adjacent (higher address) to this entry, and "max_free" is the
1317 *	maximum amount of contiguous free space in its subtree.  This
1318 *	allows finding a free region in one path down the tree, so
1319 *	O(log n) amortized with splay trees.
1320 *
1321 *	The map must be locked, and leaves it so.
1322 *
1323 *	Returns: 0 on success, and starting address in *addr,
1324 *		 1 if insufficient space.
1325 */
1326int
1327vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1328    vm_offset_t *addr)	/* OUT */
1329{
1330	vm_map_entry_t entry;
1331	vm_offset_t st;
1332
1333	/*
1334	 * Request must fit within min/max VM address and must avoid
1335	 * address wrap.
1336	 */
1337	if (start < map->min_offset)
1338		start = map->min_offset;
1339	if (start + length > map->max_offset || start + length < start)
1340		return (1);
1341
1342	/* Empty tree means wide open address space. */
1343	if (map->root == NULL) {
1344		*addr = start;
1345		return (0);
1346	}
1347
1348	/*
1349	 * After splay, if start comes before root node, then there
1350	 * must be a gap from start to the root.
1351	 */
1352	map->root = vm_map_entry_splay(start, map->root);
1353	if (start + length <= map->root->start) {
1354		*addr = start;
1355		return (0);
1356	}
1357
1358	/*
1359	 * Root is the last node that might begin its gap before
1360	 * start, and this is the last comparison where address
1361	 * wrap might be a problem.
1362	 */
1363	st = (start > map->root->end) ? start : map->root->end;
1364	if (length <= map->root->end + map->root->adj_free - st) {
1365		*addr = st;
1366		return (0);
1367	}
1368
1369	/* With max_free, can immediately tell if no solution. */
1370	entry = map->root->right;
1371	if (entry == NULL || length > entry->max_free)
1372		return (1);
1373
1374	/*
1375	 * Search the right subtree in the order: left subtree, root,
1376	 * right subtree (first fit).  The previous splay implies that
1377	 * all regions in the right subtree have addresses > start.
1378	 */
1379	while (entry != NULL) {
1380		if (entry->left != NULL && entry->left->max_free >= length)
1381			entry = entry->left;
1382		else if (entry->adj_free >= length) {
1383			*addr = entry->end;
1384			return (0);
1385		} else
1386			entry = entry->right;
1387	}
1388
1389	/* Can't get here, so panic if we do. */
1390	panic("vm_map_findspace: max_free corrupt");
1391}
1392
1393int
1394vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1395    vm_offset_t start, vm_size_t length, vm_prot_t prot,
1396    vm_prot_t max, int cow)
1397{
1398	vm_offset_t end;
1399	int result;
1400
1401	end = start + length;
1402	vm_map_lock(map);
1403	VM_MAP_RANGE_CHECK(map, start, end);
1404	(void) vm_map_delete(map, start, end);
1405	result = vm_map_insert(map, object, offset, start, end, prot,
1406	    max, cow);
1407	vm_map_unlock(map);
1408	return (result);
1409}
1410
1411/*
1412 *	vm_map_find finds an unallocated region in the target address
1413 *	map with the given length.  The search is defined to be
1414 *	first-fit from the specified address; the region found is
1415 *	returned in the same parameter.
1416 *
1417 *	If object is non-NULL, ref count must be bumped by caller
1418 *	prior to making call to account for the new entry.
1419 */
1420int
1421vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1422	    vm_offset_t *addr,	/* IN/OUT */
1423	    vm_size_t length, vm_offset_t max_addr, int find_space,
1424	    vm_prot_t prot, vm_prot_t max, int cow)
1425{
1426	vm_offset_t alignment, initial_addr, start;
1427	int result;
1428
1429	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1430	    (object->flags & OBJ_COLORED) == 0))
1431		find_space = VMFS_ANY_SPACE;
1432	if (find_space >> 8 != 0) {
1433		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1434		alignment = (vm_offset_t)1 << (find_space >> 8);
1435	} else
1436		alignment = 0;
1437	initial_addr = *addr;
1438again:
1439	start = initial_addr;
1440	vm_map_lock(map);
1441	do {
1442		if (find_space != VMFS_NO_SPACE) {
1443			if (vm_map_findspace(map, start, length, addr) ||
1444			    (max_addr != 0 && *addr + length > max_addr)) {
1445				vm_map_unlock(map);
1446				if (find_space == VMFS_OPTIMAL_SPACE) {
1447					find_space = VMFS_ANY_SPACE;
1448					goto again;
1449				}
1450				return (KERN_NO_SPACE);
1451			}
1452			switch (find_space) {
1453			case VMFS_SUPER_SPACE:
1454			case VMFS_OPTIMAL_SPACE:
1455				pmap_align_superpage(object, offset, addr,
1456				    length);
1457				break;
1458			case VMFS_ANY_SPACE:
1459				break;
1460			default:
1461				if ((*addr & (alignment - 1)) != 0) {
1462					*addr &= ~(alignment - 1);
1463					*addr += alignment;
1464				}
1465				break;
1466			}
1467
1468			start = *addr;
1469		}
1470		result = vm_map_insert(map, object, offset, start, start +
1471		    length, prot, max, cow);
1472	} while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1473	    find_space != VMFS_ANY_SPACE);
1474	vm_map_unlock(map);
1475	return (result);
1476}
1477
1478/*
1479 *	vm_map_simplify_entry:
1480 *
1481 *	Simplify the given map entry by merging with either neighbor.  This
1482 *	routine also has the ability to merge with both neighbors.
1483 *
1484 *	The map must be locked.
1485 *
1486 *	This routine guarentees that the passed entry remains valid (though
1487 *	possibly extended).  When merging, this routine may delete one or
1488 *	both neighbors.
1489 */
1490void
1491vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1492{
1493	vm_map_entry_t next, prev;
1494	vm_size_t prevsize, esize;
1495
1496	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1497		return;
1498
1499	prev = entry->prev;
1500	if (prev != &map->header) {
1501		prevsize = prev->end - prev->start;
1502		if ( (prev->end == entry->start) &&
1503		     (prev->object.vm_object == entry->object.vm_object) &&
1504		     (!prev->object.vm_object ||
1505			(prev->offset + prevsize == entry->offset)) &&
1506		     (prev->eflags == entry->eflags) &&
1507		     (prev->protection == entry->protection) &&
1508		     (prev->max_protection == entry->max_protection) &&
1509		     (prev->inheritance == entry->inheritance) &&
1510		     (prev->wired_count == entry->wired_count) &&
1511		     (prev->cred == entry->cred)) {
1512			vm_map_entry_unlink(map, prev);
1513			entry->start = prev->start;
1514			entry->offset = prev->offset;
1515			if (entry->prev != &map->header)
1516				vm_map_entry_resize_free(map, entry->prev);
1517
1518			/*
1519			 * If the backing object is a vnode object,
1520			 * vm_object_deallocate() calls vrele().
1521			 * However, vrele() does not lock the vnode
1522			 * because the vnode has additional
1523			 * references.  Thus, the map lock can be kept
1524			 * without causing a lock-order reversal with
1525			 * the vnode lock.
1526			 *
1527			 * Since we count the number of virtual page
1528			 * mappings in object->un_pager.vnp.writemappings,
1529			 * the writemappings value should not be adjusted
1530			 * when the entry is disposed of.
1531			 */
1532			if (prev->object.vm_object)
1533				vm_object_deallocate(prev->object.vm_object);
1534			if (prev->cred != NULL)
1535				crfree(prev->cred);
1536			vm_map_entry_dispose(map, prev);
1537		}
1538	}
1539
1540	next = entry->next;
1541	if (next != &map->header) {
1542		esize = entry->end - entry->start;
1543		if ((entry->end == next->start) &&
1544		    (next->object.vm_object == entry->object.vm_object) &&
1545		     (!entry->object.vm_object ||
1546			(entry->offset + esize == next->offset)) &&
1547		    (next->eflags == entry->eflags) &&
1548		    (next->protection == entry->protection) &&
1549		    (next->max_protection == entry->max_protection) &&
1550		    (next->inheritance == entry->inheritance) &&
1551		    (next->wired_count == entry->wired_count) &&
1552		    (next->cred == entry->cred)) {
1553			vm_map_entry_unlink(map, next);
1554			entry->end = next->end;
1555			vm_map_entry_resize_free(map, entry);
1556
1557			/*
1558			 * See comment above.
1559			 */
1560			if (next->object.vm_object)
1561				vm_object_deallocate(next->object.vm_object);
1562			if (next->cred != NULL)
1563				crfree(next->cred);
1564			vm_map_entry_dispose(map, next);
1565		}
1566	}
1567}
1568/*
1569 *	vm_map_clip_start:	[ internal use only ]
1570 *
1571 *	Asserts that the given entry begins at or after
1572 *	the specified address; if necessary,
1573 *	it splits the entry into two.
1574 */
1575#define vm_map_clip_start(map, entry, startaddr) \
1576{ \
1577	if (startaddr > entry->start) \
1578		_vm_map_clip_start(map, entry, startaddr); \
1579}
1580
1581/*
1582 *	This routine is called only when it is known that
1583 *	the entry must be split.
1584 */
1585static void
1586_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1587{
1588	vm_map_entry_t new_entry;
1589
1590	VM_MAP_ASSERT_LOCKED(map);
1591
1592	/*
1593	 * Split off the front portion -- note that we must insert the new
1594	 * entry BEFORE this one, so that this entry has the specified
1595	 * starting address.
1596	 */
1597	vm_map_simplify_entry(map, entry);
1598
1599	/*
1600	 * If there is no object backing this entry, we might as well create
1601	 * one now.  If we defer it, an object can get created after the map
1602	 * is clipped, and individual objects will be created for the split-up
1603	 * map.  This is a bit of a hack, but is also about the best place to
1604	 * put this improvement.
1605	 */
1606	if (entry->object.vm_object == NULL && !map->system_map) {
1607		vm_object_t object;
1608		object = vm_object_allocate(OBJT_DEFAULT,
1609				atop(entry->end - entry->start));
1610		entry->object.vm_object = object;
1611		entry->offset = 0;
1612		if (entry->cred != NULL) {
1613			object->cred = entry->cred;
1614			object->charge = entry->end - entry->start;
1615			entry->cred = NULL;
1616		}
1617	} else if (entry->object.vm_object != NULL &&
1618		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1619		   entry->cred != NULL) {
1620		VM_OBJECT_WLOCK(entry->object.vm_object);
1621		KASSERT(entry->object.vm_object->cred == NULL,
1622		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1623		entry->object.vm_object->cred = entry->cred;
1624		entry->object.vm_object->charge = entry->end - entry->start;
1625		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1626		entry->cred = NULL;
1627	}
1628
1629	new_entry = vm_map_entry_create(map);
1630	*new_entry = *entry;
1631
1632	new_entry->end = start;
1633	entry->offset += (start - entry->start);
1634	entry->start = start;
1635	if (new_entry->cred != NULL)
1636		crhold(entry->cred);
1637
1638	vm_map_entry_link(map, entry->prev, new_entry);
1639
1640	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1641		vm_object_reference(new_entry->object.vm_object);
1642		/*
1643		 * The object->un_pager.vnp.writemappings for the
1644		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1645		 * kept as is here.  The virtual pages are
1646		 * re-distributed among the clipped entries, so the sum is
1647		 * left the same.
1648		 */
1649	}
1650}
1651
1652/*
1653 *	vm_map_clip_end:	[ internal use only ]
1654 *
1655 *	Asserts that the given entry ends at or before
1656 *	the specified address; if necessary,
1657 *	it splits the entry into two.
1658 */
1659#define vm_map_clip_end(map, entry, endaddr) \
1660{ \
1661	if ((endaddr) < (entry->end)) \
1662		_vm_map_clip_end((map), (entry), (endaddr)); \
1663}
1664
1665/*
1666 *	This routine is called only when it is known that
1667 *	the entry must be split.
1668 */
1669static void
1670_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1671{
1672	vm_map_entry_t new_entry;
1673
1674	VM_MAP_ASSERT_LOCKED(map);
1675
1676	/*
1677	 * If there is no object backing this entry, we might as well create
1678	 * one now.  If we defer it, an object can get created after the map
1679	 * is clipped, and individual objects will be created for the split-up
1680	 * map.  This is a bit of a hack, but is also about the best place to
1681	 * put this improvement.
1682	 */
1683	if (entry->object.vm_object == NULL && !map->system_map) {
1684		vm_object_t object;
1685		object = vm_object_allocate(OBJT_DEFAULT,
1686				atop(entry->end - entry->start));
1687		entry->object.vm_object = object;
1688		entry->offset = 0;
1689		if (entry->cred != NULL) {
1690			object->cred = entry->cred;
1691			object->charge = entry->end - entry->start;
1692			entry->cred = NULL;
1693		}
1694	} else if (entry->object.vm_object != NULL &&
1695		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1696		   entry->cred != NULL) {
1697		VM_OBJECT_WLOCK(entry->object.vm_object);
1698		KASSERT(entry->object.vm_object->cred == NULL,
1699		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1700		entry->object.vm_object->cred = entry->cred;
1701		entry->object.vm_object->charge = entry->end - entry->start;
1702		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1703		entry->cred = NULL;
1704	}
1705
1706	/*
1707	 * Create a new entry and insert it AFTER the specified entry
1708	 */
1709	new_entry = vm_map_entry_create(map);
1710	*new_entry = *entry;
1711
1712	new_entry->start = entry->end = end;
1713	new_entry->offset += (end - entry->start);
1714	if (new_entry->cred != NULL)
1715		crhold(entry->cred);
1716
1717	vm_map_entry_link(map, entry, new_entry);
1718
1719	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1720		vm_object_reference(new_entry->object.vm_object);
1721	}
1722}
1723
1724/*
1725 *	vm_map_submap:		[ kernel use only ]
1726 *
1727 *	Mark the given range as handled by a subordinate map.
1728 *
1729 *	This range must have been created with vm_map_find,
1730 *	and no other operations may have been performed on this
1731 *	range prior to calling vm_map_submap.
1732 *
1733 *	Only a limited number of operations can be performed
1734 *	within this rage after calling vm_map_submap:
1735 *		vm_fault
1736 *	[Don't try vm_map_copy!]
1737 *
1738 *	To remove a submapping, one must first remove the
1739 *	range from the superior map, and then destroy the
1740 *	submap (if desired).  [Better yet, don't try it.]
1741 */
1742int
1743vm_map_submap(
1744	vm_map_t map,
1745	vm_offset_t start,
1746	vm_offset_t end,
1747	vm_map_t submap)
1748{
1749	vm_map_entry_t entry;
1750	int result = KERN_INVALID_ARGUMENT;
1751
1752	vm_map_lock(map);
1753
1754	VM_MAP_RANGE_CHECK(map, start, end);
1755
1756	if (vm_map_lookup_entry(map, start, &entry)) {
1757		vm_map_clip_start(map, entry, start);
1758	} else
1759		entry = entry->next;
1760
1761	vm_map_clip_end(map, entry, end);
1762
1763	if ((entry->start == start) && (entry->end == end) &&
1764	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1765	    (entry->object.vm_object == NULL)) {
1766		entry->object.sub_map = submap;
1767		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1768		result = KERN_SUCCESS;
1769	}
1770	vm_map_unlock(map);
1771
1772	return (result);
1773}
1774
1775/*
1776 * The maximum number of pages to map
1777 */
1778#define	MAX_INIT_PT	96
1779
1780/*
1781 *	vm_map_pmap_enter:
1782 *
1783 *	Preload read-only mappings for the specified object's resident pages
1784 *	into the target map.  If "flags" is MAP_PREFAULT_PARTIAL, then only
1785 *	the resident pages within the address range [addr, addr + ulmin(size,
1786 *	ptoa(MAX_INIT_PT))) are mapped.  Otherwise, all resident pages within
1787 *	the specified address range are mapped.  This eliminates many soft
1788 *	faults on process startup and immediately after an mmap(2).  Because
1789 *	these are speculative mappings, cached pages are not reactivated and
1790 *	mapped.
1791 */
1792void
1793vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1794    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1795{
1796	vm_offset_t start;
1797	vm_page_t p, p_start;
1798	vm_pindex_t psize, tmpidx;
1799
1800	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1801		return;
1802	VM_OBJECT_RLOCK(object);
1803	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1804		VM_OBJECT_RUNLOCK(object);
1805		VM_OBJECT_WLOCK(object);
1806		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1807			pmap_object_init_pt(map->pmap, addr, object, pindex,
1808			    size);
1809			VM_OBJECT_WUNLOCK(object);
1810			return;
1811		}
1812		VM_OBJECT_LOCK_DOWNGRADE(object);
1813	}
1814
1815	psize = atop(size);
1816	if (psize > MAX_INIT_PT && (flags & MAP_PREFAULT_PARTIAL) != 0)
1817		psize = MAX_INIT_PT;
1818	if (psize + pindex > object->size) {
1819		if (object->size < pindex) {
1820			VM_OBJECT_RUNLOCK(object);
1821			return;
1822		}
1823		psize = object->size - pindex;
1824	}
1825
1826	start = 0;
1827	p_start = NULL;
1828
1829	p = vm_page_find_least(object, pindex);
1830	/*
1831	 * Assert: the variable p is either (1) the page with the
1832	 * least pindex greater than or equal to the parameter pindex
1833	 * or (2) NULL.
1834	 */
1835	for (;
1836	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1837	     p = TAILQ_NEXT(p, listq)) {
1838		/*
1839		 * don't allow an madvise to blow away our really
1840		 * free pages allocating pv entries.
1841		 */
1842		if ((flags & MAP_PREFAULT_MADVISE) &&
1843		    cnt.v_free_count < cnt.v_free_reserved) {
1844			psize = tmpidx;
1845			break;
1846		}
1847		if (p->valid == VM_PAGE_BITS_ALL) {
1848			if (p_start == NULL) {
1849				start = addr + ptoa(tmpidx);
1850				p_start = p;
1851			}
1852		} else if (p_start != NULL) {
1853			pmap_enter_object(map->pmap, start, addr +
1854			    ptoa(tmpidx), p_start, prot);
1855			p_start = NULL;
1856		}
1857	}
1858	if (p_start != NULL)
1859		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1860		    p_start, prot);
1861	VM_OBJECT_RUNLOCK(object);
1862}
1863
1864/*
1865 *	vm_map_protect:
1866 *
1867 *	Sets the protection of the specified address
1868 *	region in the target map.  If "set_max" is
1869 *	specified, the maximum protection is to be set;
1870 *	otherwise, only the current protection is affected.
1871 */
1872int
1873vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1874	       vm_prot_t new_prot, boolean_t set_max)
1875{
1876	vm_map_entry_t current, entry;
1877	vm_object_t obj;
1878	struct ucred *cred;
1879	vm_prot_t old_prot;
1880
1881	if (start == end)
1882		return (KERN_SUCCESS);
1883
1884	vm_map_lock(map);
1885
1886	VM_MAP_RANGE_CHECK(map, start, end);
1887
1888	if (vm_map_lookup_entry(map, start, &entry)) {
1889		vm_map_clip_start(map, entry, start);
1890	} else {
1891		entry = entry->next;
1892	}
1893
1894	/*
1895	 * Make a first pass to check for protection violations.
1896	 */
1897	current = entry;
1898	while ((current != &map->header) && (current->start < end)) {
1899		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1900			vm_map_unlock(map);
1901			return (KERN_INVALID_ARGUMENT);
1902		}
1903		if ((new_prot & current->max_protection) != new_prot) {
1904			vm_map_unlock(map);
1905			return (KERN_PROTECTION_FAILURE);
1906		}
1907		current = current->next;
1908	}
1909
1910
1911	/*
1912	 * Do an accounting pass for private read-only mappings that
1913	 * now will do cow due to allowed write (e.g. debugger sets
1914	 * breakpoint on text segment)
1915	 */
1916	for (current = entry; (current != &map->header) &&
1917	     (current->start < end); current = current->next) {
1918
1919		vm_map_clip_end(map, current, end);
1920
1921		if (set_max ||
1922		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1923		    ENTRY_CHARGED(current)) {
1924			continue;
1925		}
1926
1927		cred = curthread->td_ucred;
1928		obj = current->object.vm_object;
1929
1930		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1931			if (!swap_reserve(current->end - current->start)) {
1932				vm_map_unlock(map);
1933				return (KERN_RESOURCE_SHORTAGE);
1934			}
1935			crhold(cred);
1936			current->cred = cred;
1937			continue;
1938		}
1939
1940		VM_OBJECT_WLOCK(obj);
1941		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1942			VM_OBJECT_WUNLOCK(obj);
1943			continue;
1944		}
1945
1946		/*
1947		 * Charge for the whole object allocation now, since
1948		 * we cannot distinguish between non-charged and
1949		 * charged clipped mapping of the same object later.
1950		 */
1951		KASSERT(obj->charge == 0,
1952		    ("vm_map_protect: object %p overcharged (entry %p)",
1953		    obj, current));
1954		if (!swap_reserve(ptoa(obj->size))) {
1955			VM_OBJECT_WUNLOCK(obj);
1956			vm_map_unlock(map);
1957			return (KERN_RESOURCE_SHORTAGE);
1958		}
1959
1960		crhold(cred);
1961		obj->cred = cred;
1962		obj->charge = ptoa(obj->size);
1963		VM_OBJECT_WUNLOCK(obj);
1964	}
1965
1966	/*
1967	 * Go back and fix up protections. [Note that clipping is not
1968	 * necessary the second time.]
1969	 */
1970	current = entry;
1971	while ((current != &map->header) && (current->start < end)) {
1972		old_prot = current->protection;
1973
1974		if (set_max)
1975			current->protection =
1976			    (current->max_protection = new_prot) &
1977			    old_prot;
1978		else
1979			current->protection = new_prot;
1980
1981		/*
1982		 * For user wired map entries, the normal lazy evaluation of
1983		 * write access upgrades through soft page faults is
1984		 * undesirable.  Instead, immediately copy any pages that are
1985		 * copy-on-write and enable write access in the physical map.
1986		 */
1987		if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
1988		    (current->protection & VM_PROT_WRITE) != 0 &&
1989		    (old_prot & VM_PROT_WRITE) == 0) {
1990			KASSERT(old_prot != VM_PROT_NONE,
1991			    ("vm_map_protect: inaccessible wired map entry"));
1992			vm_fault_copy_entry(map, map, current, current, NULL);
1993		}
1994
1995		/*
1996		 * When restricting access, update the physical map.  Worry
1997		 * about copy-on-write here.
1998		 */
1999		if ((old_prot & ~current->protection) != 0) {
2000#define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2001							VM_PROT_ALL)
2002			pmap_protect(map->pmap, current->start,
2003			    current->end,
2004			    current->protection & MASK(current));
2005#undef	MASK
2006		}
2007		vm_map_simplify_entry(map, current);
2008		current = current->next;
2009	}
2010	vm_map_unlock(map);
2011	return (KERN_SUCCESS);
2012}
2013
2014/*
2015 *	vm_map_madvise:
2016 *
2017 *	This routine traverses a processes map handling the madvise
2018 *	system call.  Advisories are classified as either those effecting
2019 *	the vm_map_entry structure, or those effecting the underlying
2020 *	objects.
2021 */
2022int
2023vm_map_madvise(
2024	vm_map_t map,
2025	vm_offset_t start,
2026	vm_offset_t end,
2027	int behav)
2028{
2029	vm_map_entry_t current, entry;
2030	int modify_map = 0;
2031
2032	/*
2033	 * Some madvise calls directly modify the vm_map_entry, in which case
2034	 * we need to use an exclusive lock on the map and we need to perform
2035	 * various clipping operations.  Otherwise we only need a read-lock
2036	 * on the map.
2037	 */
2038	switch(behav) {
2039	case MADV_NORMAL:
2040	case MADV_SEQUENTIAL:
2041	case MADV_RANDOM:
2042	case MADV_NOSYNC:
2043	case MADV_AUTOSYNC:
2044	case MADV_NOCORE:
2045	case MADV_CORE:
2046		if (start == end)
2047			return (KERN_SUCCESS);
2048		modify_map = 1;
2049		vm_map_lock(map);
2050		break;
2051	case MADV_WILLNEED:
2052	case MADV_DONTNEED:
2053	case MADV_FREE:
2054		if (start == end)
2055			return (KERN_SUCCESS);
2056		vm_map_lock_read(map);
2057		break;
2058	default:
2059		return (KERN_INVALID_ARGUMENT);
2060	}
2061
2062	/*
2063	 * Locate starting entry and clip if necessary.
2064	 */
2065	VM_MAP_RANGE_CHECK(map, start, end);
2066
2067	if (vm_map_lookup_entry(map, start, &entry)) {
2068		if (modify_map)
2069			vm_map_clip_start(map, entry, start);
2070	} else {
2071		entry = entry->next;
2072	}
2073
2074	if (modify_map) {
2075		/*
2076		 * madvise behaviors that are implemented in the vm_map_entry.
2077		 *
2078		 * We clip the vm_map_entry so that behavioral changes are
2079		 * limited to the specified address range.
2080		 */
2081		for (current = entry;
2082		     (current != &map->header) && (current->start < end);
2083		     current = current->next
2084		) {
2085			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2086				continue;
2087
2088			vm_map_clip_end(map, current, end);
2089
2090			switch (behav) {
2091			case MADV_NORMAL:
2092				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2093				break;
2094			case MADV_SEQUENTIAL:
2095				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2096				break;
2097			case MADV_RANDOM:
2098				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2099				break;
2100			case MADV_NOSYNC:
2101				current->eflags |= MAP_ENTRY_NOSYNC;
2102				break;
2103			case MADV_AUTOSYNC:
2104				current->eflags &= ~MAP_ENTRY_NOSYNC;
2105				break;
2106			case MADV_NOCORE:
2107				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2108				break;
2109			case MADV_CORE:
2110				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2111				break;
2112			default:
2113				break;
2114			}
2115			vm_map_simplify_entry(map, current);
2116		}
2117		vm_map_unlock(map);
2118	} else {
2119		vm_pindex_t pstart, pend;
2120
2121		/*
2122		 * madvise behaviors that are implemented in the underlying
2123		 * vm_object.
2124		 *
2125		 * Since we don't clip the vm_map_entry, we have to clip
2126		 * the vm_object pindex and count.
2127		 */
2128		for (current = entry;
2129		     (current != &map->header) && (current->start < end);
2130		     current = current->next
2131		) {
2132			vm_offset_t useEnd, useStart;
2133
2134			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2135				continue;
2136
2137			pstart = OFF_TO_IDX(current->offset);
2138			pend = pstart + atop(current->end - current->start);
2139			useStart = current->start;
2140			useEnd = current->end;
2141
2142			if (current->start < start) {
2143				pstart += atop(start - current->start);
2144				useStart = start;
2145			}
2146			if (current->end > end) {
2147				pend -= atop(current->end - end);
2148				useEnd = end;
2149			}
2150
2151			if (pstart >= pend)
2152				continue;
2153
2154			/*
2155			 * Perform the pmap_advise() before clearing
2156			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2157			 * concurrent pmap operation, such as pmap_remove(),
2158			 * could clear a reference in the pmap and set
2159			 * PGA_REFERENCED on the page before the pmap_advise()
2160			 * had completed.  Consequently, the page would appear
2161			 * referenced based upon an old reference that
2162			 * occurred before this pmap_advise() ran.
2163			 */
2164			if (behav == MADV_DONTNEED || behav == MADV_FREE)
2165				pmap_advise(map->pmap, useStart, useEnd,
2166				    behav);
2167
2168			vm_object_madvise(current->object.vm_object, pstart,
2169			    pend, behav);
2170			if (behav == MADV_WILLNEED) {
2171				vm_map_pmap_enter(map,
2172				    useStart,
2173				    current->protection,
2174				    current->object.vm_object,
2175				    pstart,
2176				    ptoa(pend - pstart),
2177				    MAP_PREFAULT_MADVISE
2178				);
2179			}
2180		}
2181		vm_map_unlock_read(map);
2182	}
2183	return (0);
2184}
2185
2186
2187/*
2188 *	vm_map_inherit:
2189 *
2190 *	Sets the inheritance of the specified address
2191 *	range in the target map.  Inheritance
2192 *	affects how the map will be shared with
2193 *	child maps at the time of vmspace_fork.
2194 */
2195int
2196vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2197	       vm_inherit_t new_inheritance)
2198{
2199	vm_map_entry_t entry;
2200	vm_map_entry_t temp_entry;
2201
2202	switch (new_inheritance) {
2203	case VM_INHERIT_NONE:
2204	case VM_INHERIT_COPY:
2205	case VM_INHERIT_SHARE:
2206		break;
2207	default:
2208		return (KERN_INVALID_ARGUMENT);
2209	}
2210	if (start == end)
2211		return (KERN_SUCCESS);
2212	vm_map_lock(map);
2213	VM_MAP_RANGE_CHECK(map, start, end);
2214	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2215		entry = temp_entry;
2216		vm_map_clip_start(map, entry, start);
2217	} else
2218		entry = temp_entry->next;
2219	while ((entry != &map->header) && (entry->start < end)) {
2220		vm_map_clip_end(map, entry, end);
2221		entry->inheritance = new_inheritance;
2222		vm_map_simplify_entry(map, entry);
2223		entry = entry->next;
2224	}
2225	vm_map_unlock(map);
2226	return (KERN_SUCCESS);
2227}
2228
2229/*
2230 *	vm_map_unwire:
2231 *
2232 *	Implements both kernel and user unwiring.
2233 */
2234int
2235vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2236    int flags)
2237{
2238	vm_map_entry_t entry, first_entry, tmp_entry;
2239	vm_offset_t saved_start;
2240	unsigned int last_timestamp;
2241	int rv;
2242	boolean_t need_wakeup, result, user_unwire;
2243
2244	if (start == end)
2245		return (KERN_SUCCESS);
2246	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2247	vm_map_lock(map);
2248	VM_MAP_RANGE_CHECK(map, start, end);
2249	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2250		if (flags & VM_MAP_WIRE_HOLESOK)
2251			first_entry = first_entry->next;
2252		else {
2253			vm_map_unlock(map);
2254			return (KERN_INVALID_ADDRESS);
2255		}
2256	}
2257	last_timestamp = map->timestamp;
2258	entry = first_entry;
2259	while (entry != &map->header && entry->start < end) {
2260		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2261			/*
2262			 * We have not yet clipped the entry.
2263			 */
2264			saved_start = (start >= entry->start) ? start :
2265			    entry->start;
2266			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2267			if (vm_map_unlock_and_wait(map, 0)) {
2268				/*
2269				 * Allow interruption of user unwiring?
2270				 */
2271			}
2272			vm_map_lock(map);
2273			if (last_timestamp+1 != map->timestamp) {
2274				/*
2275				 * Look again for the entry because the map was
2276				 * modified while it was unlocked.
2277				 * Specifically, the entry may have been
2278				 * clipped, merged, or deleted.
2279				 */
2280				if (!vm_map_lookup_entry(map, saved_start,
2281				    &tmp_entry)) {
2282					if (flags & VM_MAP_WIRE_HOLESOK)
2283						tmp_entry = tmp_entry->next;
2284					else {
2285						if (saved_start == start) {
2286							/*
2287							 * First_entry has been deleted.
2288							 */
2289							vm_map_unlock(map);
2290							return (KERN_INVALID_ADDRESS);
2291						}
2292						end = saved_start;
2293						rv = KERN_INVALID_ADDRESS;
2294						goto done;
2295					}
2296				}
2297				if (entry == first_entry)
2298					first_entry = tmp_entry;
2299				else
2300					first_entry = NULL;
2301				entry = tmp_entry;
2302			}
2303			last_timestamp = map->timestamp;
2304			continue;
2305		}
2306		vm_map_clip_start(map, entry, start);
2307		vm_map_clip_end(map, entry, end);
2308		/*
2309		 * Mark the entry in case the map lock is released.  (See
2310		 * above.)
2311		 */
2312		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2313		    entry->wiring_thread == NULL,
2314		    ("owned map entry %p", entry));
2315		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2316		entry->wiring_thread = curthread;
2317		/*
2318		 * Check the map for holes in the specified region.
2319		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2320		 */
2321		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2322		    (entry->end < end && (entry->next == &map->header ||
2323		    entry->next->start > entry->end))) {
2324			end = entry->end;
2325			rv = KERN_INVALID_ADDRESS;
2326			goto done;
2327		}
2328		/*
2329		 * If system unwiring, require that the entry is system wired.
2330		 */
2331		if (!user_unwire &&
2332		    vm_map_entry_system_wired_count(entry) == 0) {
2333			end = entry->end;
2334			rv = KERN_INVALID_ARGUMENT;
2335			goto done;
2336		}
2337		entry = entry->next;
2338	}
2339	rv = KERN_SUCCESS;
2340done:
2341	need_wakeup = FALSE;
2342	if (first_entry == NULL) {
2343		result = vm_map_lookup_entry(map, start, &first_entry);
2344		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2345			first_entry = first_entry->next;
2346		else
2347			KASSERT(result, ("vm_map_unwire: lookup failed"));
2348	}
2349	for (entry = first_entry; entry != &map->header && entry->start < end;
2350	    entry = entry->next) {
2351		/*
2352		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2353		 * space in the unwired region could have been mapped
2354		 * while the map lock was dropped for draining
2355		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2356		 * could be simultaneously wiring this new mapping
2357		 * entry.  Detect these cases and skip any entries
2358		 * marked as in transition by us.
2359		 */
2360		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2361		    entry->wiring_thread != curthread) {
2362			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2363			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2364			continue;
2365		}
2366
2367		if (rv == KERN_SUCCESS && (!user_unwire ||
2368		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2369			if (user_unwire)
2370				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2371			entry->wired_count--;
2372			if (entry->wired_count == 0) {
2373				/*
2374				 * Retain the map lock.
2375				 */
2376				vm_fault_unwire(map, entry->start, entry->end,
2377				    entry->object.vm_object != NULL &&
2378				    (entry->object.vm_object->flags &
2379				    OBJ_FICTITIOUS) != 0);
2380			}
2381		}
2382		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2383		    ("vm_map_unwire: in-transition flag missing %p", entry));
2384		KASSERT(entry->wiring_thread == curthread,
2385		    ("vm_map_unwire: alien wire %p", entry));
2386		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2387		entry->wiring_thread = NULL;
2388		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2389			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2390			need_wakeup = TRUE;
2391		}
2392		vm_map_simplify_entry(map, entry);
2393	}
2394	vm_map_unlock(map);
2395	if (need_wakeup)
2396		vm_map_wakeup(map);
2397	return (rv);
2398}
2399
2400/*
2401 *	vm_map_wire:
2402 *
2403 *	Implements both kernel and user wiring.
2404 */
2405int
2406vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2407    int flags)
2408{
2409	vm_map_entry_t entry, first_entry, tmp_entry;
2410	vm_offset_t saved_end, saved_start;
2411	unsigned int last_timestamp;
2412	int rv;
2413	boolean_t fictitious, need_wakeup, result, user_wire;
2414	vm_prot_t prot;
2415
2416	if (start == end)
2417		return (KERN_SUCCESS);
2418	prot = 0;
2419	if (flags & VM_MAP_WIRE_WRITE)
2420		prot |= VM_PROT_WRITE;
2421	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2422	vm_map_lock(map);
2423	VM_MAP_RANGE_CHECK(map, start, end);
2424	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2425		if (flags & VM_MAP_WIRE_HOLESOK)
2426			first_entry = first_entry->next;
2427		else {
2428			vm_map_unlock(map);
2429			return (KERN_INVALID_ADDRESS);
2430		}
2431	}
2432	last_timestamp = map->timestamp;
2433	entry = first_entry;
2434	while (entry != &map->header && entry->start < end) {
2435		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2436			/*
2437			 * We have not yet clipped the entry.
2438			 */
2439			saved_start = (start >= entry->start) ? start :
2440			    entry->start;
2441			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2442			if (vm_map_unlock_and_wait(map, 0)) {
2443				/*
2444				 * Allow interruption of user wiring?
2445				 */
2446			}
2447			vm_map_lock(map);
2448			if (last_timestamp + 1 != map->timestamp) {
2449				/*
2450				 * Look again for the entry because the map was
2451				 * modified while it was unlocked.
2452				 * Specifically, the entry may have been
2453				 * clipped, merged, or deleted.
2454				 */
2455				if (!vm_map_lookup_entry(map, saved_start,
2456				    &tmp_entry)) {
2457					if (flags & VM_MAP_WIRE_HOLESOK)
2458						tmp_entry = tmp_entry->next;
2459					else {
2460						if (saved_start == start) {
2461							/*
2462							 * first_entry has been deleted.
2463							 */
2464							vm_map_unlock(map);
2465							return (KERN_INVALID_ADDRESS);
2466						}
2467						end = saved_start;
2468						rv = KERN_INVALID_ADDRESS;
2469						goto done;
2470					}
2471				}
2472				if (entry == first_entry)
2473					first_entry = tmp_entry;
2474				else
2475					first_entry = NULL;
2476				entry = tmp_entry;
2477			}
2478			last_timestamp = map->timestamp;
2479			continue;
2480		}
2481		vm_map_clip_start(map, entry, start);
2482		vm_map_clip_end(map, entry, end);
2483		/*
2484		 * Mark the entry in case the map lock is released.  (See
2485		 * above.)
2486		 */
2487		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2488		    entry->wiring_thread == NULL,
2489		    ("owned map entry %p", entry));
2490		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2491		entry->wiring_thread = curthread;
2492		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2493		    || (entry->protection & prot) != prot) {
2494			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2495			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2496				end = entry->end;
2497				rv = KERN_INVALID_ADDRESS;
2498				goto done;
2499			}
2500			goto next_entry;
2501		}
2502		if (entry->wired_count == 0) {
2503			entry->wired_count++;
2504			saved_start = entry->start;
2505			saved_end = entry->end;
2506			fictitious = entry->object.vm_object != NULL &&
2507			    (entry->object.vm_object->flags &
2508			    OBJ_FICTITIOUS) != 0;
2509			/*
2510			 * Release the map lock, relying on the in-transition
2511			 * mark.  Mark the map busy for fork.
2512			 */
2513			vm_map_busy(map);
2514			vm_map_unlock(map);
2515			rv = vm_fault_wire(map, saved_start, saved_end,
2516			    fictitious);
2517			vm_map_lock(map);
2518			vm_map_unbusy(map);
2519			if (last_timestamp + 1 != map->timestamp) {
2520				/*
2521				 * Look again for the entry because the map was
2522				 * modified while it was unlocked.  The entry
2523				 * may have been clipped, but NOT merged or
2524				 * deleted.
2525				 */
2526				result = vm_map_lookup_entry(map, saved_start,
2527				    &tmp_entry);
2528				KASSERT(result, ("vm_map_wire: lookup failed"));
2529				if (entry == first_entry)
2530					first_entry = tmp_entry;
2531				else
2532					first_entry = NULL;
2533				entry = tmp_entry;
2534				while (entry->end < saved_end) {
2535					if (rv != KERN_SUCCESS) {
2536						KASSERT(entry->wired_count == 1,
2537						    ("vm_map_wire: bad count"));
2538						entry->wired_count = -1;
2539					}
2540					entry = entry->next;
2541				}
2542			}
2543			last_timestamp = map->timestamp;
2544			if (rv != KERN_SUCCESS) {
2545				KASSERT(entry->wired_count == 1,
2546				    ("vm_map_wire: bad count"));
2547				/*
2548				 * Assign an out-of-range value to represent
2549				 * the failure to wire this entry.
2550				 */
2551				entry->wired_count = -1;
2552				end = entry->end;
2553				goto done;
2554			}
2555		} else if (!user_wire ||
2556			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2557			entry->wired_count++;
2558		}
2559		/*
2560		 * Check the map for holes in the specified region.
2561		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2562		 */
2563	next_entry:
2564		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2565		    (entry->end < end && (entry->next == &map->header ||
2566		    entry->next->start > entry->end))) {
2567			end = entry->end;
2568			rv = KERN_INVALID_ADDRESS;
2569			goto done;
2570		}
2571		entry = entry->next;
2572	}
2573	rv = KERN_SUCCESS;
2574done:
2575	need_wakeup = FALSE;
2576	if (first_entry == NULL) {
2577		result = vm_map_lookup_entry(map, start, &first_entry);
2578		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2579			first_entry = first_entry->next;
2580		else
2581			KASSERT(result, ("vm_map_wire: lookup failed"));
2582	}
2583	for (entry = first_entry; entry != &map->header && entry->start < end;
2584	    entry = entry->next) {
2585		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2586			goto next_entry_done;
2587
2588		/*
2589		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2590		 * space in the unwired region could have been mapped
2591		 * while the map lock was dropped for faulting in the
2592		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2593		 * Moreover, another thread could be simultaneously
2594		 * wiring this new mapping entry.  Detect these cases
2595		 * and skip any entries marked as in transition by us.
2596		 */
2597		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2598		    entry->wiring_thread != curthread) {
2599			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2600			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2601			continue;
2602		}
2603
2604		if (rv == KERN_SUCCESS) {
2605			if (user_wire)
2606				entry->eflags |= MAP_ENTRY_USER_WIRED;
2607		} else if (entry->wired_count == -1) {
2608			/*
2609			 * Wiring failed on this entry.  Thus, unwiring is
2610			 * unnecessary.
2611			 */
2612			entry->wired_count = 0;
2613		} else {
2614			if (!user_wire ||
2615			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2616				entry->wired_count--;
2617			if (entry->wired_count == 0) {
2618				/*
2619				 * Retain the map lock.
2620				 */
2621				vm_fault_unwire(map, entry->start, entry->end,
2622				    entry->object.vm_object != NULL &&
2623				    (entry->object.vm_object->flags &
2624				    OBJ_FICTITIOUS) != 0);
2625			}
2626		}
2627	next_entry_done:
2628		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2629		    ("vm_map_wire: in-transition flag missing %p", entry));
2630		KASSERT(entry->wiring_thread == curthread,
2631		    ("vm_map_wire: alien wire %p", entry));
2632		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2633		    MAP_ENTRY_WIRE_SKIPPED);
2634		entry->wiring_thread = NULL;
2635		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2636			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2637			need_wakeup = TRUE;
2638		}
2639		vm_map_simplify_entry(map, entry);
2640	}
2641	vm_map_unlock(map);
2642	if (need_wakeup)
2643		vm_map_wakeup(map);
2644	return (rv);
2645}
2646
2647/*
2648 * vm_map_sync
2649 *
2650 * Push any dirty cached pages in the address range to their pager.
2651 * If syncio is TRUE, dirty pages are written synchronously.
2652 * If invalidate is TRUE, any cached pages are freed as well.
2653 *
2654 * If the size of the region from start to end is zero, we are
2655 * supposed to flush all modified pages within the region containing
2656 * start.  Unfortunately, a region can be split or coalesced with
2657 * neighboring regions, making it difficult to determine what the
2658 * original region was.  Therefore, we approximate this requirement by
2659 * flushing the current region containing start.
2660 *
2661 * Returns an error if any part of the specified range is not mapped.
2662 */
2663int
2664vm_map_sync(
2665	vm_map_t map,
2666	vm_offset_t start,
2667	vm_offset_t end,
2668	boolean_t syncio,
2669	boolean_t invalidate)
2670{
2671	vm_map_entry_t current;
2672	vm_map_entry_t entry;
2673	vm_size_t size;
2674	vm_object_t object;
2675	vm_ooffset_t offset;
2676	unsigned int last_timestamp;
2677	boolean_t failed;
2678
2679	vm_map_lock_read(map);
2680	VM_MAP_RANGE_CHECK(map, start, end);
2681	if (!vm_map_lookup_entry(map, start, &entry)) {
2682		vm_map_unlock_read(map);
2683		return (KERN_INVALID_ADDRESS);
2684	} else if (start == end) {
2685		start = entry->start;
2686		end = entry->end;
2687	}
2688	/*
2689	 * Make a first pass to check for user-wired memory and holes.
2690	 */
2691	for (current = entry; current != &map->header && current->start < end;
2692	    current = current->next) {
2693		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2694			vm_map_unlock_read(map);
2695			return (KERN_INVALID_ARGUMENT);
2696		}
2697		if (end > current->end &&
2698		    (current->next == &map->header ||
2699			current->end != current->next->start)) {
2700			vm_map_unlock_read(map);
2701			return (KERN_INVALID_ADDRESS);
2702		}
2703	}
2704
2705	if (invalidate)
2706		pmap_remove(map->pmap, start, end);
2707	failed = FALSE;
2708
2709	/*
2710	 * Make a second pass, cleaning/uncaching pages from the indicated
2711	 * objects as we go.
2712	 */
2713	for (current = entry; current != &map->header && current->start < end;) {
2714		offset = current->offset + (start - current->start);
2715		size = (end <= current->end ? end : current->end) - start;
2716		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2717			vm_map_t smap;
2718			vm_map_entry_t tentry;
2719			vm_size_t tsize;
2720
2721			smap = current->object.sub_map;
2722			vm_map_lock_read(smap);
2723			(void) vm_map_lookup_entry(smap, offset, &tentry);
2724			tsize = tentry->end - offset;
2725			if (tsize < size)
2726				size = tsize;
2727			object = tentry->object.vm_object;
2728			offset = tentry->offset + (offset - tentry->start);
2729			vm_map_unlock_read(smap);
2730		} else {
2731			object = current->object.vm_object;
2732		}
2733		vm_object_reference(object);
2734		last_timestamp = map->timestamp;
2735		vm_map_unlock_read(map);
2736		if (!vm_object_sync(object, offset, size, syncio, invalidate))
2737			failed = TRUE;
2738		start += size;
2739		vm_object_deallocate(object);
2740		vm_map_lock_read(map);
2741		if (last_timestamp == map->timestamp ||
2742		    !vm_map_lookup_entry(map, start, &current))
2743			current = current->next;
2744	}
2745
2746	vm_map_unlock_read(map);
2747	return (failed ? KERN_FAILURE : KERN_SUCCESS);
2748}
2749
2750/*
2751 *	vm_map_entry_unwire:	[ internal use only ]
2752 *
2753 *	Make the region specified by this entry pageable.
2754 *
2755 *	The map in question should be locked.
2756 *	[This is the reason for this routine's existence.]
2757 */
2758static void
2759vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2760{
2761	vm_fault_unwire(map, entry->start, entry->end,
2762	    entry->object.vm_object != NULL &&
2763	    (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0);
2764	entry->wired_count = 0;
2765}
2766
2767static void
2768vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2769{
2770
2771	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2772		vm_object_deallocate(entry->object.vm_object);
2773	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2774}
2775
2776/*
2777 *	vm_map_entry_delete:	[ internal use only ]
2778 *
2779 *	Deallocate the given entry from the target map.
2780 */
2781static void
2782vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2783{
2784	vm_object_t object;
2785	vm_pindex_t offidxstart, offidxend, count, size1;
2786	vm_ooffset_t size;
2787
2788	vm_map_entry_unlink(map, entry);
2789	object = entry->object.vm_object;
2790	size = entry->end - entry->start;
2791	map->size -= size;
2792
2793	if (entry->cred != NULL) {
2794		swap_release_by_cred(size, entry->cred);
2795		crfree(entry->cred);
2796	}
2797
2798	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2799	    (object != NULL)) {
2800		KASSERT(entry->cred == NULL || object->cred == NULL ||
2801		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2802		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2803		count = OFF_TO_IDX(size);
2804		offidxstart = OFF_TO_IDX(entry->offset);
2805		offidxend = offidxstart + count;
2806		VM_OBJECT_WLOCK(object);
2807		if (object->ref_count != 1 &&
2808		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2809		    object == kernel_object || object == kmem_object)) {
2810			vm_object_collapse(object);
2811
2812			/*
2813			 * The option OBJPR_NOTMAPPED can be passed here
2814			 * because vm_map_delete() already performed
2815			 * pmap_remove() on the only mapping to this range
2816			 * of pages.
2817			 */
2818			vm_object_page_remove(object, offidxstart, offidxend,
2819			    OBJPR_NOTMAPPED);
2820			if (object->type == OBJT_SWAP)
2821				swap_pager_freespace(object, offidxstart, count);
2822			if (offidxend >= object->size &&
2823			    offidxstart < object->size) {
2824				size1 = object->size;
2825				object->size = offidxstart;
2826				if (object->cred != NULL) {
2827					size1 -= object->size;
2828					KASSERT(object->charge >= ptoa(size1),
2829					    ("vm_map_entry_delete: object->charge < 0"));
2830					swap_release_by_cred(ptoa(size1), object->cred);
2831					object->charge -= ptoa(size1);
2832				}
2833			}
2834		}
2835		VM_OBJECT_WUNLOCK(object);
2836	} else
2837		entry->object.vm_object = NULL;
2838	if (map->system_map)
2839		vm_map_entry_deallocate(entry, TRUE);
2840	else {
2841		entry->next = curthread->td_map_def_user;
2842		curthread->td_map_def_user = entry;
2843	}
2844}
2845
2846/*
2847 *	vm_map_delete:	[ internal use only ]
2848 *
2849 *	Deallocates the given address range from the target
2850 *	map.
2851 */
2852int
2853vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2854{
2855	vm_map_entry_t entry;
2856	vm_map_entry_t first_entry;
2857
2858	VM_MAP_ASSERT_LOCKED(map);
2859	if (start == end)
2860		return (KERN_SUCCESS);
2861
2862	/*
2863	 * Find the start of the region, and clip it
2864	 */
2865	if (!vm_map_lookup_entry(map, start, &first_entry))
2866		entry = first_entry->next;
2867	else {
2868		entry = first_entry;
2869		vm_map_clip_start(map, entry, start);
2870	}
2871
2872	/*
2873	 * Step through all entries in this region
2874	 */
2875	while ((entry != &map->header) && (entry->start < end)) {
2876		vm_map_entry_t next;
2877
2878		/*
2879		 * Wait for wiring or unwiring of an entry to complete.
2880		 * Also wait for any system wirings to disappear on
2881		 * user maps.
2882		 */
2883		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2884		    (vm_map_pmap(map) != kernel_pmap &&
2885		    vm_map_entry_system_wired_count(entry) != 0)) {
2886			unsigned int last_timestamp;
2887			vm_offset_t saved_start;
2888			vm_map_entry_t tmp_entry;
2889
2890			saved_start = entry->start;
2891			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2892			last_timestamp = map->timestamp;
2893			(void) vm_map_unlock_and_wait(map, 0);
2894			vm_map_lock(map);
2895			if (last_timestamp + 1 != map->timestamp) {
2896				/*
2897				 * Look again for the entry because the map was
2898				 * modified while it was unlocked.
2899				 * Specifically, the entry may have been
2900				 * clipped, merged, or deleted.
2901				 */
2902				if (!vm_map_lookup_entry(map, saved_start,
2903							 &tmp_entry))
2904					entry = tmp_entry->next;
2905				else {
2906					entry = tmp_entry;
2907					vm_map_clip_start(map, entry,
2908							  saved_start);
2909				}
2910			}
2911			continue;
2912		}
2913		vm_map_clip_end(map, entry, end);
2914
2915		next = entry->next;
2916
2917		/*
2918		 * Unwire before removing addresses from the pmap; otherwise,
2919		 * unwiring will put the entries back in the pmap.
2920		 */
2921		if (entry->wired_count != 0) {
2922			vm_map_entry_unwire(map, entry);
2923		}
2924
2925		pmap_remove(map->pmap, entry->start, entry->end);
2926
2927		/*
2928		 * Delete the entry only after removing all pmap
2929		 * entries pointing to its pages.  (Otherwise, its
2930		 * page frames may be reallocated, and any modify bits
2931		 * will be set in the wrong object!)
2932		 */
2933		vm_map_entry_delete(map, entry);
2934		entry = next;
2935	}
2936	return (KERN_SUCCESS);
2937}
2938
2939/*
2940 *	vm_map_remove:
2941 *
2942 *	Remove the given address range from the target map.
2943 *	This is the exported form of vm_map_delete.
2944 */
2945int
2946vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2947{
2948	int result;
2949
2950	vm_map_lock(map);
2951	VM_MAP_RANGE_CHECK(map, start, end);
2952	result = vm_map_delete(map, start, end);
2953	vm_map_unlock(map);
2954	return (result);
2955}
2956
2957/*
2958 *	vm_map_check_protection:
2959 *
2960 *	Assert that the target map allows the specified privilege on the
2961 *	entire address region given.  The entire region must be allocated.
2962 *
2963 *	WARNING!  This code does not and should not check whether the
2964 *	contents of the region is accessible.  For example a smaller file
2965 *	might be mapped into a larger address space.
2966 *
2967 *	NOTE!  This code is also called by munmap().
2968 *
2969 *	The map must be locked.  A read lock is sufficient.
2970 */
2971boolean_t
2972vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2973			vm_prot_t protection)
2974{
2975	vm_map_entry_t entry;
2976	vm_map_entry_t tmp_entry;
2977
2978	if (!vm_map_lookup_entry(map, start, &tmp_entry))
2979		return (FALSE);
2980	entry = tmp_entry;
2981
2982	while (start < end) {
2983		if (entry == &map->header)
2984			return (FALSE);
2985		/*
2986		 * No holes allowed!
2987		 */
2988		if (start < entry->start)
2989			return (FALSE);
2990		/*
2991		 * Check protection associated with entry.
2992		 */
2993		if ((entry->protection & protection) != protection)
2994			return (FALSE);
2995		/* go to next entry */
2996		start = entry->end;
2997		entry = entry->next;
2998	}
2999	return (TRUE);
3000}
3001
3002/*
3003 *	vm_map_copy_entry:
3004 *
3005 *	Copies the contents of the source entry to the destination
3006 *	entry.  The entries *must* be aligned properly.
3007 */
3008static void
3009vm_map_copy_entry(
3010	vm_map_t src_map,
3011	vm_map_t dst_map,
3012	vm_map_entry_t src_entry,
3013	vm_map_entry_t dst_entry,
3014	vm_ooffset_t *fork_charge)
3015{
3016	vm_object_t src_object;
3017	vm_map_entry_t fake_entry;
3018	vm_offset_t size;
3019	struct ucred *cred;
3020	int charged;
3021
3022	VM_MAP_ASSERT_LOCKED(dst_map);
3023
3024	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3025		return;
3026
3027	if (src_entry->wired_count == 0 ||
3028	    (src_entry->protection & VM_PROT_WRITE) == 0) {
3029		/*
3030		 * If the source entry is marked needs_copy, it is already
3031		 * write-protected.
3032		 */
3033		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3034		    (src_entry->protection & VM_PROT_WRITE) != 0) {
3035			pmap_protect(src_map->pmap,
3036			    src_entry->start,
3037			    src_entry->end,
3038			    src_entry->protection & ~VM_PROT_WRITE);
3039		}
3040
3041		/*
3042		 * Make a copy of the object.
3043		 */
3044		size = src_entry->end - src_entry->start;
3045		if ((src_object = src_entry->object.vm_object) != NULL) {
3046			VM_OBJECT_WLOCK(src_object);
3047			charged = ENTRY_CHARGED(src_entry);
3048			if ((src_object->handle == NULL) &&
3049				(src_object->type == OBJT_DEFAULT ||
3050				 src_object->type == OBJT_SWAP)) {
3051				vm_object_collapse(src_object);
3052				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3053					vm_object_split(src_entry);
3054					src_object = src_entry->object.vm_object;
3055				}
3056			}
3057			vm_object_reference_locked(src_object);
3058			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3059			if (src_entry->cred != NULL &&
3060			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3061				KASSERT(src_object->cred == NULL,
3062				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3063				     src_object));
3064				src_object->cred = src_entry->cred;
3065				src_object->charge = size;
3066			}
3067			VM_OBJECT_WUNLOCK(src_object);
3068			dst_entry->object.vm_object = src_object;
3069			if (charged) {
3070				cred = curthread->td_ucred;
3071				crhold(cred);
3072				dst_entry->cred = cred;
3073				*fork_charge += size;
3074				if (!(src_entry->eflags &
3075				      MAP_ENTRY_NEEDS_COPY)) {
3076					crhold(cred);
3077					src_entry->cred = cred;
3078					*fork_charge += size;
3079				}
3080			}
3081			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3082			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3083			dst_entry->offset = src_entry->offset;
3084			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3085				/*
3086				 * MAP_ENTRY_VN_WRITECNT cannot
3087				 * indicate write reference from
3088				 * src_entry, since the entry is
3089				 * marked as needs copy.  Allocate a
3090				 * fake entry that is used to
3091				 * decrement object->un_pager.vnp.writecount
3092				 * at the appropriate time.  Attach
3093				 * fake_entry to the deferred list.
3094				 */
3095				fake_entry = vm_map_entry_create(dst_map);
3096				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3097				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3098				vm_object_reference(src_object);
3099				fake_entry->object.vm_object = src_object;
3100				fake_entry->start = src_entry->start;
3101				fake_entry->end = src_entry->end;
3102				fake_entry->next = curthread->td_map_def_user;
3103				curthread->td_map_def_user = fake_entry;
3104			}
3105		} else {
3106			dst_entry->object.vm_object = NULL;
3107			dst_entry->offset = 0;
3108			if (src_entry->cred != NULL) {
3109				dst_entry->cred = curthread->td_ucred;
3110				crhold(dst_entry->cred);
3111				*fork_charge += size;
3112			}
3113		}
3114
3115		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3116		    dst_entry->end - dst_entry->start, src_entry->start);
3117	} else {
3118		/*
3119		 * We don't want to make writeable wired pages copy-on-write.
3120		 * Immediately copy these pages into the new map by simulating
3121		 * page faults.  The new pages are pageable.
3122		 */
3123		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3124		    fork_charge);
3125	}
3126}
3127
3128/*
3129 * vmspace_map_entry_forked:
3130 * Update the newly-forked vmspace each time a map entry is inherited
3131 * or copied.  The values for vm_dsize and vm_tsize are approximate
3132 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3133 */
3134static void
3135vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3136    vm_map_entry_t entry)
3137{
3138	vm_size_t entrysize;
3139	vm_offset_t newend;
3140
3141	entrysize = entry->end - entry->start;
3142	vm2->vm_map.size += entrysize;
3143	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3144		vm2->vm_ssize += btoc(entrysize);
3145	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3146	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3147		newend = MIN(entry->end,
3148		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3149		vm2->vm_dsize += btoc(newend - entry->start);
3150	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3151	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3152		newend = MIN(entry->end,
3153		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3154		vm2->vm_tsize += btoc(newend - entry->start);
3155	}
3156}
3157
3158/*
3159 * vmspace_fork:
3160 * Create a new process vmspace structure and vm_map
3161 * based on those of an existing process.  The new map
3162 * is based on the old map, according to the inheritance
3163 * values on the regions in that map.
3164 *
3165 * XXX It might be worth coalescing the entries added to the new vmspace.
3166 *
3167 * The source map must not be locked.
3168 */
3169struct vmspace *
3170vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3171{
3172	struct vmspace *vm2;
3173	vm_map_t new_map, old_map;
3174	vm_map_entry_t new_entry, old_entry;
3175	vm_object_t object;
3176	int locked;
3177
3178	old_map = &vm1->vm_map;
3179	/* Copy immutable fields of vm1 to vm2. */
3180	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
3181	if (vm2 == NULL)
3182		return (NULL);
3183	vm2->vm_taddr = vm1->vm_taddr;
3184	vm2->vm_daddr = vm1->vm_daddr;
3185	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3186	vm_map_lock(old_map);
3187	if (old_map->busy)
3188		vm_map_wait_busy(old_map);
3189	new_map = &vm2->vm_map;
3190	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3191	KASSERT(locked, ("vmspace_fork: lock failed"));
3192
3193	old_entry = old_map->header.next;
3194
3195	while (old_entry != &old_map->header) {
3196		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3197			panic("vm_map_fork: encountered a submap");
3198
3199		switch (old_entry->inheritance) {
3200		case VM_INHERIT_NONE:
3201			break;
3202
3203		case VM_INHERIT_SHARE:
3204			/*
3205			 * Clone the entry, creating the shared object if necessary.
3206			 */
3207			object = old_entry->object.vm_object;
3208			if (object == NULL) {
3209				object = vm_object_allocate(OBJT_DEFAULT,
3210					atop(old_entry->end - old_entry->start));
3211				old_entry->object.vm_object = object;
3212				old_entry->offset = 0;
3213				if (old_entry->cred != NULL) {
3214					object->cred = old_entry->cred;
3215					object->charge = old_entry->end -
3216					    old_entry->start;
3217					old_entry->cred = NULL;
3218				}
3219			}
3220
3221			/*
3222			 * Add the reference before calling vm_object_shadow
3223			 * to insure that a shadow object is created.
3224			 */
3225			vm_object_reference(object);
3226			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3227				vm_object_shadow(&old_entry->object.vm_object,
3228				    &old_entry->offset,
3229				    old_entry->end - old_entry->start);
3230				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3231				/* Transfer the second reference too. */
3232				vm_object_reference(
3233				    old_entry->object.vm_object);
3234
3235				/*
3236				 * As in vm_map_simplify_entry(), the
3237				 * vnode lock will not be acquired in
3238				 * this call to vm_object_deallocate().
3239				 */
3240				vm_object_deallocate(object);
3241				object = old_entry->object.vm_object;
3242			}
3243			VM_OBJECT_WLOCK(object);
3244			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3245			if (old_entry->cred != NULL) {
3246				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3247				object->cred = old_entry->cred;
3248				object->charge = old_entry->end - old_entry->start;
3249				old_entry->cred = NULL;
3250			}
3251
3252			/*
3253			 * Assert the correct state of the vnode
3254			 * v_writecount while the object is locked, to
3255			 * not relock it later for the assertion
3256			 * correctness.
3257			 */
3258			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3259			    object->type == OBJT_VNODE) {
3260				KASSERT(((struct vnode *)object->handle)->
3261				    v_writecount > 0,
3262				    ("vmspace_fork: v_writecount %p", object));
3263				KASSERT(object->un_pager.vnp.writemappings > 0,
3264				    ("vmspace_fork: vnp.writecount %p",
3265				    object));
3266			}
3267			VM_OBJECT_WUNLOCK(object);
3268
3269			/*
3270			 * Clone the entry, referencing the shared object.
3271			 */
3272			new_entry = vm_map_entry_create(new_map);
3273			*new_entry = *old_entry;
3274			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3275			    MAP_ENTRY_IN_TRANSITION);
3276			new_entry->wiring_thread = NULL;
3277			new_entry->wired_count = 0;
3278			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3279				vnode_pager_update_writecount(object,
3280				    new_entry->start, new_entry->end);
3281			}
3282
3283			/*
3284			 * Insert the entry into the new map -- we know we're
3285			 * inserting at the end of the new map.
3286			 */
3287			vm_map_entry_link(new_map, new_map->header.prev,
3288			    new_entry);
3289			vmspace_map_entry_forked(vm1, vm2, new_entry);
3290
3291			/*
3292			 * Update the physical map
3293			 */
3294			pmap_copy(new_map->pmap, old_map->pmap,
3295			    new_entry->start,
3296			    (old_entry->end - old_entry->start),
3297			    old_entry->start);
3298			break;
3299
3300		case VM_INHERIT_COPY:
3301			/*
3302			 * Clone the entry and link into the map.
3303			 */
3304			new_entry = vm_map_entry_create(new_map);
3305			*new_entry = *old_entry;
3306			/*
3307			 * Copied entry is COW over the old object.
3308			 */
3309			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3310			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3311			new_entry->wiring_thread = NULL;
3312			new_entry->wired_count = 0;
3313			new_entry->object.vm_object = NULL;
3314			new_entry->cred = NULL;
3315			vm_map_entry_link(new_map, new_map->header.prev,
3316			    new_entry);
3317			vmspace_map_entry_forked(vm1, vm2, new_entry);
3318			vm_map_copy_entry(old_map, new_map, old_entry,
3319			    new_entry, fork_charge);
3320			break;
3321		}
3322		old_entry = old_entry->next;
3323	}
3324	/*
3325	 * Use inlined vm_map_unlock() to postpone handling the deferred
3326	 * map entries, which cannot be done until both old_map and
3327	 * new_map locks are released.
3328	 */
3329	sx_xunlock(&old_map->lock);
3330	sx_xunlock(&new_map->lock);
3331	vm_map_process_deferred();
3332
3333	return (vm2);
3334}
3335
3336int
3337vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3338    vm_prot_t prot, vm_prot_t max, int cow)
3339{
3340	vm_map_entry_t new_entry, prev_entry;
3341	vm_offset_t bot, top;
3342	vm_size_t growsize, init_ssize;
3343	int orient, rv;
3344	rlim_t lmemlim, vmemlim;
3345
3346	/*
3347	 * The stack orientation is piggybacked with the cow argument.
3348	 * Extract it into orient and mask the cow argument so that we
3349	 * don't pass it around further.
3350	 * NOTE: We explicitly allow bi-directional stacks.
3351	 */
3352	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3353	KASSERT(orient != 0, ("No stack grow direction"));
3354
3355	if (addrbos < vm_map_min(map) ||
3356	    addrbos > vm_map_max(map) ||
3357	    addrbos + max_ssize < addrbos)
3358		return (KERN_NO_SPACE);
3359
3360	growsize = sgrowsiz;
3361	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3362
3363	PROC_LOCK(curproc);
3364	lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3365	vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3366	PROC_UNLOCK(curproc);
3367
3368	vm_map_lock(map);
3369
3370	/* If addr is already mapped, no go */
3371	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3372		vm_map_unlock(map);
3373		return (KERN_NO_SPACE);
3374	}
3375
3376	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3377		if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3378			vm_map_unlock(map);
3379			return (KERN_NO_SPACE);
3380		}
3381	}
3382
3383	/* If we would blow our VMEM resource limit, no go */
3384	if (map->size + init_ssize > vmemlim) {
3385		vm_map_unlock(map);
3386		return (KERN_NO_SPACE);
3387	}
3388
3389	/*
3390	 * If we can't accomodate max_ssize in the current mapping, no go.
3391	 * However, we need to be aware that subsequent user mappings might
3392	 * map into the space we have reserved for stack, and currently this
3393	 * space is not protected.
3394	 *
3395	 * Hopefully we will at least detect this condition when we try to
3396	 * grow the stack.
3397	 */
3398	if ((prev_entry->next != &map->header) &&
3399	    (prev_entry->next->start < addrbos + max_ssize)) {
3400		vm_map_unlock(map);
3401		return (KERN_NO_SPACE);
3402	}
3403
3404	/*
3405	 * We initially map a stack of only init_ssize.  We will grow as
3406	 * needed later.  Depending on the orientation of the stack (i.e.
3407	 * the grow direction) we either map at the top of the range, the
3408	 * bottom of the range or in the middle.
3409	 *
3410	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3411	 * and cow to be 0.  Possibly we should eliminate these as input
3412	 * parameters, and just pass these values here in the insert call.
3413	 */
3414	if (orient == MAP_STACK_GROWS_DOWN)
3415		bot = addrbos + max_ssize - init_ssize;
3416	else if (orient == MAP_STACK_GROWS_UP)
3417		bot = addrbos;
3418	else
3419		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3420	top = bot + init_ssize;
3421	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3422
3423	/* Now set the avail_ssize amount. */
3424	if (rv == KERN_SUCCESS) {
3425		if (prev_entry != &map->header)
3426			vm_map_clip_end(map, prev_entry, bot);
3427		new_entry = prev_entry->next;
3428		if (new_entry->end != top || new_entry->start != bot)
3429			panic("Bad entry start/end for new stack entry");
3430
3431		new_entry->avail_ssize = max_ssize - init_ssize;
3432		if (orient & MAP_STACK_GROWS_DOWN)
3433			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3434		if (orient & MAP_STACK_GROWS_UP)
3435			new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3436	}
3437
3438	vm_map_unlock(map);
3439	return (rv);
3440}
3441
3442static int stack_guard_page = 0;
3443TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3444SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3445    &stack_guard_page, 0,
3446    "Insert stack guard page ahead of the growable segments.");
3447
3448/* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3449 * desired address is already mapped, or if we successfully grow
3450 * the stack.  Also returns KERN_SUCCESS if addr is outside the
3451 * stack range (this is strange, but preserves compatibility with
3452 * the grow function in vm_machdep.c).
3453 */
3454int
3455vm_map_growstack(struct proc *p, vm_offset_t addr)
3456{
3457	vm_map_entry_t next_entry, prev_entry;
3458	vm_map_entry_t new_entry, stack_entry;
3459	struct vmspace *vm = p->p_vmspace;
3460	vm_map_t map = &vm->vm_map;
3461	vm_offset_t end;
3462	vm_size_t growsize;
3463	size_t grow_amount, max_grow;
3464	rlim_t lmemlim, stacklim, vmemlim;
3465	int is_procstack, rv;
3466	struct ucred *cred;
3467#ifdef notyet
3468	uint64_t limit;
3469#endif
3470#ifdef RACCT
3471	int error;
3472#endif
3473
3474Retry:
3475	PROC_LOCK(p);
3476	lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3477	stacklim = lim_cur(p, RLIMIT_STACK);
3478	vmemlim = lim_cur(p, RLIMIT_VMEM);
3479	PROC_UNLOCK(p);
3480
3481	vm_map_lock_read(map);
3482
3483	/* If addr is already in the entry range, no need to grow.*/
3484	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3485		vm_map_unlock_read(map);
3486		return (KERN_SUCCESS);
3487	}
3488
3489	next_entry = prev_entry->next;
3490	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3491		/*
3492		 * This entry does not grow upwards. Since the address lies
3493		 * beyond this entry, the next entry (if one exists) has to
3494		 * be a downward growable entry. The entry list header is
3495		 * never a growable entry, so it suffices to check the flags.
3496		 */
3497		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3498			vm_map_unlock_read(map);
3499			return (KERN_SUCCESS);
3500		}
3501		stack_entry = next_entry;
3502	} else {
3503		/*
3504		 * This entry grows upward. If the next entry does not at
3505		 * least grow downwards, this is the entry we need to grow.
3506		 * otherwise we have two possible choices and we have to
3507		 * select one.
3508		 */
3509		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3510			/*
3511			 * We have two choices; grow the entry closest to
3512			 * the address to minimize the amount of growth.
3513			 */
3514			if (addr - prev_entry->end <= next_entry->start - addr)
3515				stack_entry = prev_entry;
3516			else
3517				stack_entry = next_entry;
3518		} else
3519			stack_entry = prev_entry;
3520	}
3521
3522	if (stack_entry == next_entry) {
3523		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3524		KASSERT(addr < stack_entry->start, ("foo"));
3525		end = (prev_entry != &map->header) ? prev_entry->end :
3526		    stack_entry->start - stack_entry->avail_ssize;
3527		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3528		max_grow = stack_entry->start - end;
3529	} else {
3530		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3531		KASSERT(addr >= stack_entry->end, ("foo"));
3532		end = (next_entry != &map->header) ? next_entry->start :
3533		    stack_entry->end + stack_entry->avail_ssize;
3534		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3535		max_grow = end - stack_entry->end;
3536	}
3537
3538	if (grow_amount > stack_entry->avail_ssize) {
3539		vm_map_unlock_read(map);
3540		return (KERN_NO_SPACE);
3541	}
3542
3543	/*
3544	 * If there is no longer enough space between the entries nogo, and
3545	 * adjust the available space.  Note: this  should only happen if the
3546	 * user has mapped into the stack area after the stack was created,
3547	 * and is probably an error.
3548	 *
3549	 * This also effectively destroys any guard page the user might have
3550	 * intended by limiting the stack size.
3551	 */
3552	if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3553		if (vm_map_lock_upgrade(map))
3554			goto Retry;
3555
3556		stack_entry->avail_ssize = max_grow;
3557
3558		vm_map_unlock(map);
3559		return (KERN_NO_SPACE);
3560	}
3561
3562	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3563
3564	/*
3565	 * If this is the main process stack, see if we're over the stack
3566	 * limit.
3567	 */
3568	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3569		vm_map_unlock_read(map);
3570		return (KERN_NO_SPACE);
3571	}
3572#ifdef RACCT
3573	PROC_LOCK(p);
3574	if (is_procstack &&
3575	    racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
3576		PROC_UNLOCK(p);
3577		vm_map_unlock_read(map);
3578		return (KERN_NO_SPACE);
3579	}
3580	PROC_UNLOCK(p);
3581#endif
3582
3583	/* Round up the grow amount modulo sgrowsiz */
3584	growsize = sgrowsiz;
3585	grow_amount = roundup(grow_amount, growsize);
3586	if (grow_amount > stack_entry->avail_ssize)
3587		grow_amount = stack_entry->avail_ssize;
3588	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3589		grow_amount = trunc_page((vm_size_t)stacklim) -
3590		    ctob(vm->vm_ssize);
3591	}
3592#ifdef notyet
3593	PROC_LOCK(p);
3594	limit = racct_get_available(p, RACCT_STACK);
3595	PROC_UNLOCK(p);
3596	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3597		grow_amount = limit - ctob(vm->vm_ssize);
3598#endif
3599	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3600		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3601			vm_map_unlock_read(map);
3602			rv = KERN_NO_SPACE;
3603			goto out;
3604		}
3605#ifdef RACCT
3606		PROC_LOCK(p);
3607		if (racct_set(p, RACCT_MEMLOCK,
3608		    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3609			PROC_UNLOCK(p);
3610			vm_map_unlock_read(map);
3611			rv = KERN_NO_SPACE;
3612			goto out;
3613		}
3614		PROC_UNLOCK(p);
3615#endif
3616	}
3617	/* If we would blow our VMEM resource limit, no go */
3618	if (map->size + grow_amount > vmemlim) {
3619		vm_map_unlock_read(map);
3620		rv = KERN_NO_SPACE;
3621		goto out;
3622	}
3623#ifdef RACCT
3624	PROC_LOCK(p);
3625	if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3626		PROC_UNLOCK(p);
3627		vm_map_unlock_read(map);
3628		rv = KERN_NO_SPACE;
3629		goto out;
3630	}
3631	PROC_UNLOCK(p);
3632#endif
3633
3634	if (vm_map_lock_upgrade(map))
3635		goto Retry;
3636
3637	if (stack_entry == next_entry) {
3638		/*
3639		 * Growing downward.
3640		 */
3641		/* Get the preliminary new entry start value */
3642		addr = stack_entry->start - grow_amount;
3643
3644		/*
3645		 * If this puts us into the previous entry, cut back our
3646		 * growth to the available space. Also, see the note above.
3647		 */
3648		if (addr < end) {
3649			stack_entry->avail_ssize = max_grow;
3650			addr = end;
3651			if (stack_guard_page)
3652				addr += PAGE_SIZE;
3653		}
3654
3655		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3656		    next_entry->protection, next_entry->max_protection, 0);
3657
3658		/* Adjust the available stack space by the amount we grew. */
3659		if (rv == KERN_SUCCESS) {
3660			if (prev_entry != &map->header)
3661				vm_map_clip_end(map, prev_entry, addr);
3662			new_entry = prev_entry->next;
3663			KASSERT(new_entry == stack_entry->prev, ("foo"));
3664			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3665			KASSERT(new_entry->start == addr, ("foo"));
3666			grow_amount = new_entry->end - new_entry->start;
3667			new_entry->avail_ssize = stack_entry->avail_ssize -
3668			    grow_amount;
3669			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3670			new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3671		}
3672	} else {
3673		/*
3674		 * Growing upward.
3675		 */
3676		addr = stack_entry->end + grow_amount;
3677
3678		/*
3679		 * If this puts us into the next entry, cut back our growth
3680		 * to the available space. Also, see the note above.
3681		 */
3682		if (addr > end) {
3683			stack_entry->avail_ssize = end - stack_entry->end;
3684			addr = end;
3685			if (stack_guard_page)
3686				addr -= PAGE_SIZE;
3687		}
3688
3689		grow_amount = addr - stack_entry->end;
3690		cred = stack_entry->cred;
3691		if (cred == NULL && stack_entry->object.vm_object != NULL)
3692			cred = stack_entry->object.vm_object->cred;
3693		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3694			rv = KERN_NO_SPACE;
3695		/* Grow the underlying object if applicable. */
3696		else if (stack_entry->object.vm_object == NULL ||
3697			 vm_object_coalesce(stack_entry->object.vm_object,
3698			 stack_entry->offset,
3699			 (vm_size_t)(stack_entry->end - stack_entry->start),
3700			 (vm_size_t)grow_amount, cred != NULL)) {
3701			map->size += (addr - stack_entry->end);
3702			/* Update the current entry. */
3703			stack_entry->end = addr;
3704			stack_entry->avail_ssize -= grow_amount;
3705			vm_map_entry_resize_free(map, stack_entry);
3706			rv = KERN_SUCCESS;
3707
3708			if (next_entry != &map->header)
3709				vm_map_clip_start(map, next_entry, addr);
3710		} else
3711			rv = KERN_FAILURE;
3712	}
3713
3714	if (rv == KERN_SUCCESS && is_procstack)
3715		vm->vm_ssize += btoc(grow_amount);
3716
3717	vm_map_unlock(map);
3718
3719	/*
3720	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3721	 */
3722	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3723		vm_map_wire(map,
3724		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3725		    (stack_entry == next_entry) ? stack_entry->start : addr,
3726		    (p->p_flag & P_SYSTEM)
3727		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3728		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3729	}
3730
3731out:
3732#ifdef RACCT
3733	if (rv != KERN_SUCCESS) {
3734		PROC_LOCK(p);
3735		error = racct_set(p, RACCT_VMEM, map->size);
3736		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3737		if (!old_mlock) {
3738			error = racct_set(p, RACCT_MEMLOCK,
3739			    ptoa(pmap_wired_count(map->pmap)));
3740			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3741		}
3742	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3743		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3744		PROC_UNLOCK(p);
3745	}
3746#endif
3747
3748	return (rv);
3749}
3750
3751/*
3752 * Unshare the specified VM space for exec.  If other processes are
3753 * mapped to it, then create a new one.  The new vmspace is null.
3754 */
3755int
3756vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3757{
3758	struct vmspace *oldvmspace = p->p_vmspace;
3759	struct vmspace *newvmspace;
3760
3761	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3762	    ("vmspace_exec recursed"));
3763	newvmspace = vmspace_alloc(minuser, maxuser, NULL);
3764	if (newvmspace == NULL)
3765		return (ENOMEM);
3766	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3767	/*
3768	 * This code is written like this for prototype purposes.  The
3769	 * goal is to avoid running down the vmspace here, but let the
3770	 * other process's that are still using the vmspace to finally
3771	 * run it down.  Even though there is little or no chance of blocking
3772	 * here, it is a good idea to keep this form for future mods.
3773	 */
3774	PROC_VMSPACE_LOCK(p);
3775	p->p_vmspace = newvmspace;
3776	PROC_VMSPACE_UNLOCK(p);
3777	if (p == curthread->td_proc)
3778		pmap_activate(curthread);
3779	curthread->td_pflags |= TDP_EXECVMSPC;
3780	return (0);
3781}
3782
3783/*
3784 * Unshare the specified VM space for forcing COW.  This
3785 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3786 */
3787int
3788vmspace_unshare(struct proc *p)
3789{
3790	struct vmspace *oldvmspace = p->p_vmspace;
3791	struct vmspace *newvmspace;
3792	vm_ooffset_t fork_charge;
3793
3794	if (oldvmspace->vm_refcnt == 1)
3795		return (0);
3796	fork_charge = 0;
3797	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3798	if (newvmspace == NULL)
3799		return (ENOMEM);
3800	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3801		vmspace_free(newvmspace);
3802		return (ENOMEM);
3803	}
3804	PROC_VMSPACE_LOCK(p);
3805	p->p_vmspace = newvmspace;
3806	PROC_VMSPACE_UNLOCK(p);
3807	if (p == curthread->td_proc)
3808		pmap_activate(curthread);
3809	vmspace_free(oldvmspace);
3810	return (0);
3811}
3812
3813/*
3814 *	vm_map_lookup:
3815 *
3816 *	Finds the VM object, offset, and
3817 *	protection for a given virtual address in the
3818 *	specified map, assuming a page fault of the
3819 *	type specified.
3820 *
3821 *	Leaves the map in question locked for read; return
3822 *	values are guaranteed until a vm_map_lookup_done
3823 *	call is performed.  Note that the map argument
3824 *	is in/out; the returned map must be used in
3825 *	the call to vm_map_lookup_done.
3826 *
3827 *	A handle (out_entry) is returned for use in
3828 *	vm_map_lookup_done, to make that fast.
3829 *
3830 *	If a lookup is requested with "write protection"
3831 *	specified, the map may be changed to perform virtual
3832 *	copying operations, although the data referenced will
3833 *	remain the same.
3834 */
3835int
3836vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3837	      vm_offset_t vaddr,
3838	      vm_prot_t fault_typea,
3839	      vm_map_entry_t *out_entry,	/* OUT */
3840	      vm_object_t *object,		/* OUT */
3841	      vm_pindex_t *pindex,		/* OUT */
3842	      vm_prot_t *out_prot,		/* OUT */
3843	      boolean_t *wired)			/* OUT */
3844{
3845	vm_map_entry_t entry;
3846	vm_map_t map = *var_map;
3847	vm_prot_t prot;
3848	vm_prot_t fault_type = fault_typea;
3849	vm_object_t eobject;
3850	vm_size_t size;
3851	struct ucred *cred;
3852
3853RetryLookup:;
3854
3855	vm_map_lock_read(map);
3856
3857	/*
3858	 * Lookup the faulting address.
3859	 */
3860	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3861		vm_map_unlock_read(map);
3862		return (KERN_INVALID_ADDRESS);
3863	}
3864
3865	entry = *out_entry;
3866
3867	/*
3868	 * Handle submaps.
3869	 */
3870	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3871		vm_map_t old_map = map;
3872
3873		*var_map = map = entry->object.sub_map;
3874		vm_map_unlock_read(old_map);
3875		goto RetryLookup;
3876	}
3877
3878	/*
3879	 * Check whether this task is allowed to have this page.
3880	 */
3881	prot = entry->protection;
3882	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3883	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3884		vm_map_unlock_read(map);
3885		return (KERN_PROTECTION_FAILURE);
3886	}
3887	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3888	    (entry->eflags & MAP_ENTRY_COW) &&
3889	    (fault_type & VM_PROT_WRITE)) {
3890		vm_map_unlock_read(map);
3891		return (KERN_PROTECTION_FAILURE);
3892	}
3893	if ((fault_typea & VM_PROT_COPY) != 0 &&
3894	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
3895	    (entry->eflags & MAP_ENTRY_COW) == 0) {
3896		vm_map_unlock_read(map);
3897		return (KERN_PROTECTION_FAILURE);
3898	}
3899
3900	/*
3901	 * If this page is not pageable, we have to get it for all possible
3902	 * accesses.
3903	 */
3904	*wired = (entry->wired_count != 0);
3905	if (*wired)
3906		fault_type = entry->protection;
3907	size = entry->end - entry->start;
3908	/*
3909	 * If the entry was copy-on-write, we either ...
3910	 */
3911	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3912		/*
3913		 * If we want to write the page, we may as well handle that
3914		 * now since we've got the map locked.
3915		 *
3916		 * If we don't need to write the page, we just demote the
3917		 * permissions allowed.
3918		 */
3919		if ((fault_type & VM_PROT_WRITE) != 0 ||
3920		    (fault_typea & VM_PROT_COPY) != 0) {
3921			/*
3922			 * Make a new object, and place it in the object
3923			 * chain.  Note that no new references have appeared
3924			 * -- one just moved from the map to the new
3925			 * object.
3926			 */
3927			if (vm_map_lock_upgrade(map))
3928				goto RetryLookup;
3929
3930			if (entry->cred == NULL) {
3931				/*
3932				 * The debugger owner is charged for
3933				 * the memory.
3934				 */
3935				cred = curthread->td_ucred;
3936				crhold(cred);
3937				if (!swap_reserve_by_cred(size, cred)) {
3938					crfree(cred);
3939					vm_map_unlock(map);
3940					return (KERN_RESOURCE_SHORTAGE);
3941				}
3942				entry->cred = cred;
3943			}
3944			vm_object_shadow(&entry->object.vm_object,
3945			    &entry->offset, size);
3946			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3947			eobject = entry->object.vm_object;
3948			if (eobject->cred != NULL) {
3949				/*
3950				 * The object was not shadowed.
3951				 */
3952				swap_release_by_cred(size, entry->cred);
3953				crfree(entry->cred);
3954				entry->cred = NULL;
3955			} else if (entry->cred != NULL) {
3956				VM_OBJECT_WLOCK(eobject);
3957				eobject->cred = entry->cred;
3958				eobject->charge = size;
3959				VM_OBJECT_WUNLOCK(eobject);
3960				entry->cred = NULL;
3961			}
3962
3963			vm_map_lock_downgrade(map);
3964		} else {
3965			/*
3966			 * We're attempting to read a copy-on-write page --
3967			 * don't allow writes.
3968			 */
3969			prot &= ~VM_PROT_WRITE;
3970		}
3971	}
3972
3973	/*
3974	 * Create an object if necessary.
3975	 */
3976	if (entry->object.vm_object == NULL &&
3977	    !map->system_map) {
3978		if (vm_map_lock_upgrade(map))
3979			goto RetryLookup;
3980		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3981		    atop(size));
3982		entry->offset = 0;
3983		if (entry->cred != NULL) {
3984			VM_OBJECT_WLOCK(entry->object.vm_object);
3985			entry->object.vm_object->cred = entry->cred;
3986			entry->object.vm_object->charge = size;
3987			VM_OBJECT_WUNLOCK(entry->object.vm_object);
3988			entry->cred = NULL;
3989		}
3990		vm_map_lock_downgrade(map);
3991	}
3992
3993	/*
3994	 * Return the object/offset from this entry.  If the entry was
3995	 * copy-on-write or empty, it has been fixed up.
3996	 */
3997	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3998	*object = entry->object.vm_object;
3999
4000	*out_prot = prot;
4001	return (KERN_SUCCESS);
4002}
4003
4004/*
4005 *	vm_map_lookup_locked:
4006 *
4007 *	Lookup the faulting address.  A version of vm_map_lookup that returns
4008 *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4009 */
4010int
4011vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
4012		     vm_offset_t vaddr,
4013		     vm_prot_t fault_typea,
4014		     vm_map_entry_t *out_entry,	/* OUT */
4015		     vm_object_t *object,	/* OUT */
4016		     vm_pindex_t *pindex,	/* OUT */
4017		     vm_prot_t *out_prot,	/* OUT */
4018		     boolean_t *wired)		/* OUT */
4019{
4020	vm_map_entry_t entry;
4021	vm_map_t map = *var_map;
4022	vm_prot_t prot;
4023	vm_prot_t fault_type = fault_typea;
4024
4025	/*
4026	 * Lookup the faulting address.
4027	 */
4028	if (!vm_map_lookup_entry(map, vaddr, out_entry))
4029		return (KERN_INVALID_ADDRESS);
4030
4031	entry = *out_entry;
4032
4033	/*
4034	 * Fail if the entry refers to a submap.
4035	 */
4036	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4037		return (KERN_FAILURE);
4038
4039	/*
4040	 * Check whether this task is allowed to have this page.
4041	 */
4042	prot = entry->protection;
4043	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4044	if ((fault_type & prot) != fault_type)
4045		return (KERN_PROTECTION_FAILURE);
4046	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4047	    (entry->eflags & MAP_ENTRY_COW) &&
4048	    (fault_type & VM_PROT_WRITE))
4049		return (KERN_PROTECTION_FAILURE);
4050
4051	/*
4052	 * If this page is not pageable, we have to get it for all possible
4053	 * accesses.
4054	 */
4055	*wired = (entry->wired_count != 0);
4056	if (*wired)
4057		fault_type = entry->protection;
4058
4059	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4060		/*
4061		 * Fail if the entry was copy-on-write for a write fault.
4062		 */
4063		if (fault_type & VM_PROT_WRITE)
4064			return (KERN_FAILURE);
4065		/*
4066		 * We're attempting to read a copy-on-write page --
4067		 * don't allow writes.
4068		 */
4069		prot &= ~VM_PROT_WRITE;
4070	}
4071
4072	/*
4073	 * Fail if an object should be created.
4074	 */
4075	if (entry->object.vm_object == NULL && !map->system_map)
4076		return (KERN_FAILURE);
4077
4078	/*
4079	 * Return the object/offset from this entry.  If the entry was
4080	 * copy-on-write or empty, it has been fixed up.
4081	 */
4082	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4083	*object = entry->object.vm_object;
4084
4085	*out_prot = prot;
4086	return (KERN_SUCCESS);
4087}
4088
4089/*
4090 *	vm_map_lookup_done:
4091 *
4092 *	Releases locks acquired by a vm_map_lookup
4093 *	(according to the handle returned by that lookup).
4094 */
4095void
4096vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4097{
4098	/*
4099	 * Unlock the main-level map
4100	 */
4101	vm_map_unlock_read(map);
4102}
4103
4104#include "opt_ddb.h"
4105#ifdef DDB
4106#include <sys/kernel.h>
4107
4108#include <ddb/ddb.h>
4109
4110static void
4111vm_map_print(vm_map_t map)
4112{
4113	vm_map_entry_t entry;
4114
4115	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4116	    (void *)map,
4117	    (void *)map->pmap, map->nentries, map->timestamp);
4118
4119	db_indent += 2;
4120	for (entry = map->header.next; entry != &map->header;
4121	    entry = entry->next) {
4122		db_iprintf("map entry %p: start=%p, end=%p\n",
4123		    (void *)entry, (void *)entry->start, (void *)entry->end);
4124		{
4125			static char *inheritance_name[4] =
4126			{"share", "copy", "none", "donate_copy"};
4127
4128			db_iprintf(" prot=%x/%x/%s",
4129			    entry->protection,
4130			    entry->max_protection,
4131			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4132			if (entry->wired_count != 0)
4133				db_printf(", wired");
4134		}
4135		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4136			db_printf(", share=%p, offset=0x%jx\n",
4137			    (void *)entry->object.sub_map,
4138			    (uintmax_t)entry->offset);
4139			if ((entry->prev == &map->header) ||
4140			    (entry->prev->object.sub_map !=
4141				entry->object.sub_map)) {
4142				db_indent += 2;
4143				vm_map_print((vm_map_t)entry->object.sub_map);
4144				db_indent -= 2;
4145			}
4146		} else {
4147			if (entry->cred != NULL)
4148				db_printf(", ruid %d", entry->cred->cr_ruid);
4149			db_printf(", object=%p, offset=0x%jx",
4150			    (void *)entry->object.vm_object,
4151			    (uintmax_t)entry->offset);
4152			if (entry->object.vm_object && entry->object.vm_object->cred)
4153				db_printf(", obj ruid %d charge %jx",
4154				    entry->object.vm_object->cred->cr_ruid,
4155				    (uintmax_t)entry->object.vm_object->charge);
4156			if (entry->eflags & MAP_ENTRY_COW)
4157				db_printf(", copy (%s)",
4158				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4159			db_printf("\n");
4160
4161			if ((entry->prev == &map->header) ||
4162			    (entry->prev->object.vm_object !=
4163				entry->object.vm_object)) {
4164				db_indent += 2;
4165				vm_object_print((db_expr_t)(intptr_t)
4166						entry->object.vm_object,
4167						0, 0, (char *)0);
4168				db_indent -= 2;
4169			}
4170		}
4171	}
4172	db_indent -= 2;
4173}
4174
4175DB_SHOW_COMMAND(map, map)
4176{
4177
4178	if (!have_addr) {
4179		db_printf("usage: show map <addr>\n");
4180		return;
4181	}
4182	vm_map_print((vm_map_t)addr);
4183}
4184
4185DB_SHOW_COMMAND(procvm, procvm)
4186{
4187	struct proc *p;
4188
4189	if (have_addr) {
4190		p = (struct proc *) addr;
4191	} else {
4192		p = curproc;
4193	}
4194
4195	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4196	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4197	    (void *)vmspace_pmap(p->p_vmspace));
4198
4199	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4200}
4201
4202#endif /* DDB */
4203