vm_map.c revision 326189
1336809Sdim/*- 2336809Sdim * Copyright (c) 1991, 1993 3353358Sdim * The Regents of the University of California. All rights reserved. 4353358Sdim * 5353358Sdim * This code is derived from software contributed to Berkeley by 6336809Sdim * The Mach Operating System project at Carnegie-Mellon University. 7336809Sdim * 8336809Sdim * Redistribution and use in source and binary forms, with or without 9336809Sdim * modification, are permitted provided that the following conditions 10336809Sdim * are met: 11336809Sdim * 1. Redistributions of source code must retain the above copyright 12360784Sdim * notice, this list of conditions and the following disclaimer. 13336809Sdim * 2. Redistributions in binary form must reproduce the above copyright 14336809Sdim * notice, this list of conditions and the following disclaimer in the 15336809Sdim * documentation and/or other materials provided with the distribution. 16336809Sdim * 4. Neither the name of the University nor the names of its contributors 17336809Sdim * may be used to endorse or promote products derived from this software 18336809Sdim * without specific prior written permission. 19336809Sdim * 20336809Sdim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21336809Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22336809Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23336809Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24336809Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25336809Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26336809Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27336809Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28336809Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29336809Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30336809Sdim * SUCH DAMAGE. 31336809Sdim * 32336809Sdim * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33336809Sdim * 34336809Sdim * 35336809Sdim * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36336809Sdim * All rights reserved. 37336809Sdim * 38336809Sdim * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39336809Sdim * 40336809Sdim * Permission to use, copy, modify and distribute this software and 41336809Sdim * its documentation is hereby granted, provided that both the copyright 42336809Sdim * notice and this permission notice appear in all copies of the 43336809Sdim * software, derivative works or modified versions, and any portions 44336809Sdim * thereof, and that both notices appear in supporting documentation. 45336809Sdim * 46336809Sdim * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47336809Sdim * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48336809Sdim * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49336809Sdim * 50336809Sdim * Carnegie Mellon requests users of this software to return to 51336809Sdim * 52336809Sdim * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53336809Sdim * School of Computer Science 54336809Sdim * Carnegie Mellon University 55336809Sdim * Pittsburgh PA 15213-3890 56336809Sdim * 57336809Sdim * any improvements or extensions that they make and grant Carnegie the 58336809Sdim * rights to redistribute these changes. 59336809Sdim */ 60336809Sdim 61336809Sdim/* 62336809Sdim * Virtual memory mapping module. 63336809Sdim */ 64336809Sdim 65336809Sdim#include <sys/cdefs.h> 66336809Sdim__FBSDID("$FreeBSD: stable/10/sys/vm/vm_map.c 326189 2017-11-25 14:51:40Z kib $"); 67336809Sdim 68336809Sdim#include <sys/param.h> 69336809Sdim#include <sys/systm.h> 70336809Sdim#include <sys/kernel.h> 71336809Sdim#include <sys/ktr.h> 72336809Sdim#include <sys/lock.h> 73336809Sdim#include <sys/mutex.h> 74336809Sdim#include <sys/proc.h> 75336809Sdim#include <sys/vmmeter.h> 76336809Sdim#include <sys/mman.h> 77336809Sdim#include <sys/vnode.h> 78336809Sdim#include <sys/racct.h> 79336809Sdim#include <sys/resourcevar.h> 80336809Sdim#include <sys/rwlock.h> 81336809Sdim#include <sys/file.h> 82336809Sdim#include <sys/sysctl.h> 83336809Sdim#include <sys/sysent.h> 84336809Sdim#include <sys/shm.h> 85336809Sdim 86336809Sdim#include <vm/vm.h> 87336809Sdim#include <vm/vm_param.h> 88336809Sdim#include <vm/pmap.h> 89336809Sdim#include <vm/vm_map.h> 90336809Sdim#include <vm/vm_page.h> 91336809Sdim#include <vm/vm_object.h> 92336809Sdim#include <vm/vm_pager.h> 93336809Sdim#include <vm/vm_kern.h> 94336809Sdim#include <vm/vm_extern.h> 95336809Sdim#include <vm/vnode_pager.h> 96336809Sdim#include <vm/swap_pager.h> 97336809Sdim#include <vm/uma.h> 98336809Sdim 99336809Sdim/* 100336809Sdim * Virtual memory maps provide for the mapping, protection, 101336809Sdim * and sharing of virtual memory objects. In addition, 102336809Sdim * this module provides for an efficient virtual copy of 103336809Sdim * memory from one map to another. 104336809Sdim * 105336809Sdim * Synchronization is required prior to most operations. 106336809Sdim * 107336809Sdim * Maps consist of an ordered doubly-linked list of simple 108336809Sdim * entries; a self-adjusting binary search tree of these 109336809Sdim * entries is used to speed up lookups. 110336809Sdim * 111336809Sdim * Since portions of maps are specified by start/end addresses, 112336809Sdim * which may not align with existing map entries, all 113336809Sdim * routines merely "clip" entries to these start/end values. 114336809Sdim * [That is, an entry is split into two, bordering at a 115336809Sdim * start or end value.] Note that these clippings may not 116336809Sdim * always be necessary (as the two resulting entries are then 117336809Sdim * not changed); however, the clipping is done for convenience. 118336809Sdim * 119336809Sdim * As mentioned above, virtual copy operations are performed 120336809Sdim * by copying VM object references from one map to 121336809Sdim * another, and then marking both regions as copy-on-write. 122336809Sdim */ 123336809Sdim 124336809Sdimstatic struct mtx map_sleep_mtx; 125336809Sdimstatic uma_zone_t mapentzone; 126336809Sdimstatic uma_zone_t kmapentzone; 127336809Sdimstatic uma_zone_t mapzone; 128336809Sdimstatic uma_zone_t vmspace_zone; 129336809Sdimstatic int vmspace_zinit(void *mem, int size, int flags); 130336809Sdimstatic int vm_map_zinit(void *mem, int ize, int flags); 131336809Sdimstatic void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 132336809Sdim vm_offset_t max); 133336809Sdimstatic void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 134336809Sdimstatic void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 135336809Sdimstatic void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 136336809Sdimstatic int vm_map_growstack(vm_map_t map, vm_offset_t addr, 137336809Sdim vm_map_entry_t gap_entry); 138336809Sdim#ifdef INVARIANTS 139336809Sdimstatic void vm_map_zdtor(void *mem, int size, void *arg); 140336809Sdimstatic void vmspace_zdtor(void *mem, int size, void *arg); 141336809Sdim#endif 142336809Sdimstatic int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 143336809Sdim vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 144336809Sdim int cow); 145336809Sdimstatic void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 146336809Sdim vm_offset_t failed_addr); 147336809Sdim 148336809Sdim#define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 149336809Sdim ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 150336809Sdim !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 151336809Sdim 152336809Sdim/* 153336809Sdim * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 154336809Sdim * stable. 155336809Sdim */ 156336809Sdim#define PROC_VMSPACE_LOCK(p) do { } while (0) 157336809Sdim#define PROC_VMSPACE_UNLOCK(p) do { } while (0) 158336809Sdim 159336809Sdim/* 160336809Sdim * VM_MAP_RANGE_CHECK: [ internal use only ] 161336809Sdim * 162336809Sdim * Asserts that the starting and ending region 163336809Sdim * addresses fall within the valid range of the map. 164336809Sdim */ 165336809Sdim#define VM_MAP_RANGE_CHECK(map, start, end) \ 166336809Sdim { \ 167336809Sdim if (start < vm_map_min(map)) \ 168336809Sdim start = vm_map_min(map); \ 169336809Sdim if (end > vm_map_max(map)) \ 170336809Sdim end = vm_map_max(map); \ 171336809Sdim if (start > end) \ 172336809Sdim start = end; \ 173336809Sdim } 174336809Sdim 175336809Sdim/* 176336809Sdim * vm_map_startup: 177336809Sdim * 178336809Sdim * Initialize the vm_map module. Must be called before 179336809Sdim * any other vm_map routines. 180336809Sdim * 181336809Sdim * Map and entry structures are allocated from the general 182336809Sdim * purpose memory pool with some exceptions: 183336809Sdim * 184336809Sdim * - The kernel map and kmem submap are allocated statically. 185336809Sdim * - Kernel map entries are allocated out of a static pool. 186336809Sdim * 187336809Sdim * These restrictions are necessary since malloc() uses the 188336809Sdim * maps and requires map entries. 189336809Sdim */ 190336809Sdim 191336809Sdimvoid 192336809Sdimvm_map_startup(void) 193336809Sdim{ 194336809Sdim mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 195336809Sdim mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 196336809Sdim#ifdef INVARIANTS 197336809Sdim vm_map_zdtor, 198336809Sdim#else 199336809Sdim NULL, 200336809Sdim#endif 201336809Sdim vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 202336809Sdim uma_prealloc(mapzone, MAX_KMAP); 203336809Sdim kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 204336809Sdim NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 205336809Sdim UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 206336809Sdim mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 207336809Sdim NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 208336809Sdim vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 209336809Sdim#ifdef INVARIANTS 210336809Sdim vmspace_zdtor, 211336809Sdim#else 212336809Sdim NULL, 213336809Sdim#endif 214336809Sdim vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 215336809Sdim} 216336809Sdim 217336809Sdimstatic int 218336809Sdimvmspace_zinit(void *mem, int size, int flags) 219336809Sdim{ 220336809Sdim struct vmspace *vm; 221336809Sdim 222336809Sdim vm = (struct vmspace *)mem; 223336809Sdim 224336809Sdim vm->vm_map.pmap = NULL; 225336809Sdim (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 226336809Sdim PMAP_LOCK_INIT(vmspace_pmap(vm)); 227336809Sdim return (0); 228336809Sdim} 229336809Sdim 230336809Sdimstatic int 231336809Sdimvm_map_zinit(void *mem, int size, int flags) 232336809Sdim{ 233336809Sdim vm_map_t map; 234336809Sdim 235336809Sdim map = (vm_map_t)mem; 236336809Sdim memset(map, 0, sizeof(*map)); 237336809Sdim mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 238336809Sdim sx_init(&map->lock, "vm map (user)"); 239336809Sdim return (0); 240336809Sdim} 241336809Sdim 242336809Sdim#ifdef INVARIANTS 243336809Sdimstatic void 244336809Sdimvmspace_zdtor(void *mem, int size, void *arg) 245336809Sdim{ 246336809Sdim struct vmspace *vm; 247336809Sdim 248336809Sdim vm = (struct vmspace *)mem; 249336809Sdim 250336809Sdim vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 251336809Sdim} 252336809Sdimstatic void 253336809Sdimvm_map_zdtor(void *mem, int size, void *arg) 254336809Sdim{ 255336809Sdim vm_map_t map; 256336809Sdim 257336809Sdim map = (vm_map_t)mem; 258336809Sdim KASSERT(map->nentries == 0, 259336809Sdim ("map %p nentries == %d on free.", 260336809Sdim map, map->nentries)); 261336809Sdim KASSERT(map->size == 0, 262336809Sdim ("map %p size == %lu on free.", 263336809Sdim map, (unsigned long)map->size)); 264336809Sdim} 265336809Sdim#endif /* INVARIANTS */ 266336809Sdim 267336809Sdim/* 268336809Sdim * Allocate a vmspace structure, including a vm_map and pmap, 269336809Sdim * and initialize those structures. The refcnt is set to 1. 270336809Sdim * 271336809Sdim * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 272336809Sdim */ 273336809Sdimstruct vmspace * 274336809Sdimvmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 275336809Sdim{ 276336809Sdim struct vmspace *vm; 277336809Sdim 278336809Sdim vm = uma_zalloc(vmspace_zone, M_WAITOK); 279336809Sdim 280336809Sdim KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 281336809Sdim 282336809Sdim if (pinit == NULL) 283336809Sdim pinit = &pmap_pinit; 284336809Sdim 285336809Sdim if (!pinit(vmspace_pmap(vm))) { 286336809Sdim uma_zfree(vmspace_zone, vm); 287336809Sdim return (NULL); 288336809Sdim } 289336809Sdim CTR1(KTR_VM, "vmspace_alloc: %p", vm); 290336809Sdim _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 291336809Sdim vm->vm_refcnt = 1; 292336809Sdim vm->vm_shm = NULL; 293336809Sdim vm->vm_swrss = 0; 294336809Sdim vm->vm_tsize = 0; 295336809Sdim vm->vm_dsize = 0; 296336809Sdim vm->vm_ssize = 0; 297336809Sdim vm->vm_taddr = 0; 298336809Sdim vm->vm_daddr = 0; 299336809Sdim vm->vm_maxsaddr = 0; 300336809Sdim return (vm); 301336809Sdim} 302336809Sdim 303336809Sdim#ifdef RACCT 304336809Sdimstatic void 305336809Sdimvmspace_container_reset(struct proc *p) 306336809Sdim{ 307336809Sdim 308336809Sdim PROC_LOCK(p); 309336809Sdim racct_set(p, RACCT_DATA, 0); 310336809Sdim racct_set(p, RACCT_STACK, 0); 311336809Sdim racct_set(p, RACCT_RSS, 0); 312336809Sdim racct_set(p, RACCT_MEMLOCK, 0); 313336809Sdim racct_set(p, RACCT_VMEM, 0); 314336809Sdim PROC_UNLOCK(p); 315336809Sdim} 316336809Sdim#endif 317336809Sdim 318336809Sdimstatic inline void 319336809Sdimvmspace_dofree(struct vmspace *vm) 320336809Sdim{ 321336809Sdim 322336809Sdim CTR1(KTR_VM, "vmspace_free: %p", vm); 323336809Sdim 324336809Sdim /* 325336809Sdim * Make sure any SysV shm is freed, it might not have been in 326336809Sdim * exit1(). 327336809Sdim */ 328336809Sdim shmexit(vm); 329336809Sdim 330336809Sdim /* 331336809Sdim * Lock the map, to wait out all other references to it. 332336809Sdim * Delete all of the mappings and pages they hold, then call 333336809Sdim * the pmap module to reclaim anything left. 334336809Sdim */ 335336809Sdim (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 336336809Sdim vm->vm_map.max_offset); 337336809Sdim 338336809Sdim pmap_release(vmspace_pmap(vm)); 339336809Sdim vm->vm_map.pmap = NULL; 340353358Sdim uma_zfree(vmspace_zone, vm); 341353358Sdim} 342353358Sdim 343353358Sdimvoid 344336809Sdimvmspace_free(struct vmspace *vm) 345336809Sdim{ 346336809Sdim 347336809Sdim WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 348336809Sdim "vmspace_free() called with non-sleepable lock held"); 349336809Sdim 350336809Sdim if (vm->vm_refcnt == 0) 351336809Sdim panic("vmspace_free: attempt to free already freed vmspace"); 352336809Sdim 353336809Sdim if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 354336809Sdim vmspace_dofree(vm); 355336809Sdim} 356336809Sdim 357336809Sdimvoid 358336809Sdimvmspace_exitfree(struct proc *p) 359336809Sdim{ 360336809Sdim struct vmspace *vm; 361336809Sdim 362336809Sdim PROC_VMSPACE_LOCK(p); 363336809Sdim vm = p->p_vmspace; 364336809Sdim p->p_vmspace = NULL; 365336809Sdim PROC_VMSPACE_UNLOCK(p); 366336809Sdim KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 367336809Sdim vmspace_free(vm); 368336809Sdim} 369336809Sdim 370336809Sdimvoid 371336809Sdimvmspace_exit(struct thread *td) 372336809Sdim{ 373336809Sdim int refcnt; 374336809Sdim struct vmspace *vm; 375336809Sdim struct proc *p; 376336809Sdim 377336809Sdim /* 378336809Sdim * Release user portion of address space. 379336809Sdim * This releases references to vnodes, 380336809Sdim * which could cause I/O if the file has been unlinked. 381336809Sdim * Need to do this early enough that we can still sleep. 382336809Sdim * 383336809Sdim * The last exiting process to reach this point releases as 384336809Sdim * much of the environment as it can. vmspace_dofree() is the 385336809Sdim * slower fallback in case another process had a temporary 386336809Sdim * reference to the vmspace. 387336809Sdim */ 388336809Sdim 389336809Sdim p = td->td_proc; 390336809Sdim vm = p->p_vmspace; 391336809Sdim atomic_add_int(&vmspace0.vm_refcnt, 1); 392336809Sdim do { 393336809Sdim refcnt = vm->vm_refcnt; 394336809Sdim if (refcnt > 1 && p->p_vmspace != &vmspace0) { 395336809Sdim /* Switch now since other proc might free vmspace */ 396336809Sdim PROC_VMSPACE_LOCK(p); 397336809Sdim p->p_vmspace = &vmspace0; 398336809Sdim PROC_VMSPACE_UNLOCK(p); 399336809Sdim pmap_activate(td); 400336809Sdim } 401336809Sdim } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 402336809Sdim if (refcnt == 1) { 403336809Sdim if (p->p_vmspace != vm) { 404336809Sdim /* vmspace not yet freed, switch back */ 405336809Sdim PROC_VMSPACE_LOCK(p); 406336809Sdim p->p_vmspace = vm; 407336809Sdim PROC_VMSPACE_UNLOCK(p); 408336809Sdim pmap_activate(td); 409336809Sdim } 410336809Sdim pmap_remove_pages(vmspace_pmap(vm)); 411336809Sdim /* Switch now since this proc will free vmspace */ 412336809Sdim PROC_VMSPACE_LOCK(p); 413336809Sdim p->p_vmspace = &vmspace0; 414336809Sdim PROC_VMSPACE_UNLOCK(p); 415336809Sdim pmap_activate(td); 416336809Sdim vmspace_dofree(vm); 417336809Sdim } 418336809Sdim#ifdef RACCT 419336809Sdim if (racct_enable) 420336809Sdim vmspace_container_reset(p); 421336809Sdim#endif 422336809Sdim} 423336809Sdim 424336809Sdim/* Acquire reference to vmspace owned by another process. */ 425336809Sdim 426336809Sdimstruct vmspace * 427336809Sdimvmspace_acquire_ref(struct proc *p) 428336809Sdim{ 429336809Sdim struct vmspace *vm; 430336809Sdim int refcnt; 431336809Sdim 432336809Sdim PROC_VMSPACE_LOCK(p); 433336809Sdim vm = p->p_vmspace; 434336809Sdim if (vm == NULL) { 435336809Sdim PROC_VMSPACE_UNLOCK(p); 436336809Sdim return (NULL); 437336809Sdim } 438336809Sdim do { 439336809Sdim refcnt = vm->vm_refcnt; 440336809Sdim if (refcnt <= 0) { /* Avoid 0->1 transition */ 441336809Sdim PROC_VMSPACE_UNLOCK(p); 442336809Sdim return (NULL); 443336809Sdim } 444336809Sdim } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 445336809Sdim if (vm != p->p_vmspace) { 446336809Sdim PROC_VMSPACE_UNLOCK(p); 447336809Sdim vmspace_free(vm); 448336809Sdim return (NULL); 449336809Sdim } 450336809Sdim PROC_VMSPACE_UNLOCK(p); 451336809Sdim return (vm); 452336809Sdim} 453336809Sdim 454336809Sdimvoid 455336809Sdim_vm_map_lock(vm_map_t map, const char *file, int line) 456336809Sdim{ 457336809Sdim 458336809Sdim if (map->system_map) 459336809Sdim mtx_lock_flags_(&map->system_mtx, 0, file, line); 460336809Sdim else 461336809Sdim sx_xlock_(&map->lock, file, line); 462336809Sdim map->timestamp++; 463336809Sdim} 464336809Sdim 465336809Sdimstatic void 466336809Sdimvm_map_process_deferred(void) 467336809Sdim{ 468336809Sdim struct thread *td; 469336809Sdim vm_map_entry_t entry, next; 470336809Sdim vm_object_t object; 471336809Sdim 472336809Sdim td = curthread; 473 entry = td->td_map_def_user; 474 td->td_map_def_user = NULL; 475 while (entry != NULL) { 476 next = entry->next; 477 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 478 /* 479 * Decrement the object's writemappings and 480 * possibly the vnode's v_writecount. 481 */ 482 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 483 ("Submap with writecount")); 484 object = entry->object.vm_object; 485 KASSERT(object != NULL, ("No object for writecount")); 486 vnode_pager_release_writecount(object, entry->start, 487 entry->end); 488 } 489 vm_map_entry_deallocate(entry, FALSE); 490 entry = next; 491 } 492} 493 494void 495_vm_map_unlock(vm_map_t map, const char *file, int line) 496{ 497 498 if (map->system_map) 499 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 500 else { 501 sx_xunlock_(&map->lock, file, line); 502 vm_map_process_deferred(); 503 } 504} 505 506void 507_vm_map_lock_read(vm_map_t map, const char *file, int line) 508{ 509 510 if (map->system_map) 511 mtx_lock_flags_(&map->system_mtx, 0, file, line); 512 else 513 sx_slock_(&map->lock, file, line); 514} 515 516void 517_vm_map_unlock_read(vm_map_t map, const char *file, int line) 518{ 519 520 if (map->system_map) 521 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 522 else { 523 sx_sunlock_(&map->lock, file, line); 524 vm_map_process_deferred(); 525 } 526} 527 528int 529_vm_map_trylock(vm_map_t map, const char *file, int line) 530{ 531 int error; 532 533 error = map->system_map ? 534 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 535 !sx_try_xlock_(&map->lock, file, line); 536 if (error == 0) 537 map->timestamp++; 538 return (error == 0); 539} 540 541int 542_vm_map_trylock_read(vm_map_t map, const char *file, int line) 543{ 544 int error; 545 546 error = map->system_map ? 547 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 548 !sx_try_slock_(&map->lock, file, line); 549 return (error == 0); 550} 551 552/* 553 * _vm_map_lock_upgrade: [ internal use only ] 554 * 555 * Tries to upgrade a read (shared) lock on the specified map to a write 556 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 557 * non-zero value if the upgrade fails. If the upgrade fails, the map is 558 * returned without a read or write lock held. 559 * 560 * Requires that the map be read locked. 561 */ 562int 563_vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 564{ 565 unsigned int last_timestamp; 566 567 if (map->system_map) { 568 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 569 } else { 570 if (!sx_try_upgrade_(&map->lock, file, line)) { 571 last_timestamp = map->timestamp; 572 sx_sunlock_(&map->lock, file, line); 573 vm_map_process_deferred(); 574 /* 575 * If the map's timestamp does not change while the 576 * map is unlocked, then the upgrade succeeds. 577 */ 578 sx_xlock_(&map->lock, file, line); 579 if (last_timestamp != map->timestamp) { 580 sx_xunlock_(&map->lock, file, line); 581 return (1); 582 } 583 } 584 } 585 map->timestamp++; 586 return (0); 587} 588 589void 590_vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 591{ 592 593 if (map->system_map) { 594 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 595 } else 596 sx_downgrade_(&map->lock, file, line); 597} 598 599/* 600 * vm_map_locked: 601 * 602 * Returns a non-zero value if the caller holds a write (exclusive) lock 603 * on the specified map and the value "0" otherwise. 604 */ 605int 606vm_map_locked(vm_map_t map) 607{ 608 609 if (map->system_map) 610 return (mtx_owned(&map->system_mtx)); 611 else 612 return (sx_xlocked(&map->lock)); 613} 614 615#ifdef INVARIANTS 616static void 617_vm_map_assert_locked(vm_map_t map, const char *file, int line) 618{ 619 620 if (map->system_map) 621 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 622 else 623 sx_assert_(&map->lock, SA_XLOCKED, file, line); 624} 625 626#define VM_MAP_ASSERT_LOCKED(map) \ 627 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 628#else 629#define VM_MAP_ASSERT_LOCKED(map) 630#endif 631 632/* 633 * _vm_map_unlock_and_wait: 634 * 635 * Atomically releases the lock on the specified map and puts the calling 636 * thread to sleep. The calling thread will remain asleep until either 637 * vm_map_wakeup() is performed on the map or the specified timeout is 638 * exceeded. 639 * 640 * WARNING! This function does not perform deferred deallocations of 641 * objects and map entries. Therefore, the calling thread is expected to 642 * reacquire the map lock after reawakening and later perform an ordinary 643 * unlock operation, such as vm_map_unlock(), before completing its 644 * operation on the map. 645 */ 646int 647_vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 648{ 649 650 mtx_lock(&map_sleep_mtx); 651 if (map->system_map) 652 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 653 else 654 sx_xunlock_(&map->lock, file, line); 655 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 656 timo)); 657} 658 659/* 660 * vm_map_wakeup: 661 * 662 * Awaken any threads that have slept on the map using 663 * vm_map_unlock_and_wait(). 664 */ 665void 666vm_map_wakeup(vm_map_t map) 667{ 668 669 /* 670 * Acquire and release map_sleep_mtx to prevent a wakeup() 671 * from being performed (and lost) between the map unlock 672 * and the msleep() in _vm_map_unlock_and_wait(). 673 */ 674 mtx_lock(&map_sleep_mtx); 675 mtx_unlock(&map_sleep_mtx); 676 wakeup(&map->root); 677} 678 679void 680vm_map_busy(vm_map_t map) 681{ 682 683 VM_MAP_ASSERT_LOCKED(map); 684 map->busy++; 685} 686 687void 688vm_map_unbusy(vm_map_t map) 689{ 690 691 VM_MAP_ASSERT_LOCKED(map); 692 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 693 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 694 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 695 wakeup(&map->busy); 696 } 697} 698 699void 700vm_map_wait_busy(vm_map_t map) 701{ 702 703 VM_MAP_ASSERT_LOCKED(map); 704 while (map->busy) { 705 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 706 if (map->system_map) 707 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 708 else 709 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 710 } 711 map->timestamp++; 712} 713 714long 715vmspace_resident_count(struct vmspace *vmspace) 716{ 717 return pmap_resident_count(vmspace_pmap(vmspace)); 718} 719 720/* 721 * vm_map_create: 722 * 723 * Creates and returns a new empty VM map with 724 * the given physical map structure, and having 725 * the given lower and upper address bounds. 726 */ 727vm_map_t 728vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 729{ 730 vm_map_t result; 731 732 result = uma_zalloc(mapzone, M_WAITOK); 733 CTR1(KTR_VM, "vm_map_create: %p", result); 734 _vm_map_init(result, pmap, min, max); 735 return (result); 736} 737 738/* 739 * Initialize an existing vm_map structure 740 * such as that in the vmspace structure. 741 */ 742static void 743_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 744{ 745 746 map->header.next = map->header.prev = &map->header; 747 map->needs_wakeup = FALSE; 748 map->system_map = 0; 749 map->pmap = pmap; 750 map->min_offset = min; 751 map->max_offset = max; 752 map->flags = 0; 753 map->root = NULL; 754 map->timestamp = 0; 755 map->busy = 0; 756} 757 758void 759vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 760{ 761 762 _vm_map_init(map, pmap, min, max); 763 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 764 sx_init(&map->lock, "user map"); 765} 766 767/* 768 * vm_map_entry_dispose: [ internal use only ] 769 * 770 * Inverse of vm_map_entry_create. 771 */ 772static void 773vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 774{ 775 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 776} 777 778/* 779 * vm_map_entry_create: [ internal use only ] 780 * 781 * Allocates a VM map entry for insertion. 782 * No entry fields are filled in. 783 */ 784static vm_map_entry_t 785vm_map_entry_create(vm_map_t map) 786{ 787 vm_map_entry_t new_entry; 788 789 if (map->system_map) 790 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 791 else 792 new_entry = uma_zalloc(mapentzone, M_WAITOK); 793 if (new_entry == NULL) 794 panic("vm_map_entry_create: kernel resources exhausted"); 795 return (new_entry); 796} 797 798/* 799 * vm_map_entry_set_behavior: 800 * 801 * Set the expected access behavior, either normal, random, or 802 * sequential. 803 */ 804static inline void 805vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 806{ 807 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 808 (behavior & MAP_ENTRY_BEHAV_MASK); 809} 810 811/* 812 * vm_map_entry_set_max_free: 813 * 814 * Set the max_free field in a vm_map_entry. 815 */ 816static inline void 817vm_map_entry_set_max_free(vm_map_entry_t entry) 818{ 819 820 entry->max_free = entry->adj_free; 821 if (entry->left != NULL && entry->left->max_free > entry->max_free) 822 entry->max_free = entry->left->max_free; 823 if (entry->right != NULL && entry->right->max_free > entry->max_free) 824 entry->max_free = entry->right->max_free; 825} 826 827/* 828 * vm_map_entry_splay: 829 * 830 * The Sleator and Tarjan top-down splay algorithm with the 831 * following variation. Max_free must be computed bottom-up, so 832 * on the downward pass, maintain the left and right spines in 833 * reverse order. Then, make a second pass up each side to fix 834 * the pointers and compute max_free. The time bound is O(log n) 835 * amortized. 836 * 837 * The new root is the vm_map_entry containing "addr", or else an 838 * adjacent entry (lower or higher) if addr is not in the tree. 839 * 840 * The map must be locked, and leaves it so. 841 * 842 * Returns: the new root. 843 */ 844static vm_map_entry_t 845vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 846{ 847 vm_map_entry_t llist, rlist; 848 vm_map_entry_t ltree, rtree; 849 vm_map_entry_t y; 850 851 /* Special case of empty tree. */ 852 if (root == NULL) 853 return (root); 854 855 /* 856 * Pass One: Splay down the tree until we find addr or a NULL 857 * pointer where addr would go. llist and rlist are the two 858 * sides in reverse order (bottom-up), with llist linked by 859 * the right pointer and rlist linked by the left pointer in 860 * the vm_map_entry. Wait until Pass Two to set max_free on 861 * the two spines. 862 */ 863 llist = NULL; 864 rlist = NULL; 865 for (;;) { 866 /* root is never NULL in here. */ 867 if (addr < root->start) { 868 y = root->left; 869 if (y == NULL) 870 break; 871 if (addr < y->start && y->left != NULL) { 872 /* Rotate right and put y on rlist. */ 873 root->left = y->right; 874 y->right = root; 875 vm_map_entry_set_max_free(root); 876 root = y->left; 877 y->left = rlist; 878 rlist = y; 879 } else { 880 /* Put root on rlist. */ 881 root->left = rlist; 882 rlist = root; 883 root = y; 884 } 885 } else if (addr >= root->end) { 886 y = root->right; 887 if (y == NULL) 888 break; 889 if (addr >= y->end && y->right != NULL) { 890 /* Rotate left and put y on llist. */ 891 root->right = y->left; 892 y->left = root; 893 vm_map_entry_set_max_free(root); 894 root = y->right; 895 y->right = llist; 896 llist = y; 897 } else { 898 /* Put root on llist. */ 899 root->right = llist; 900 llist = root; 901 root = y; 902 } 903 } else 904 break; 905 } 906 907 /* 908 * Pass Two: Walk back up the two spines, flip the pointers 909 * and set max_free. The subtrees of the root go at the 910 * bottom of llist and rlist. 911 */ 912 ltree = root->left; 913 while (llist != NULL) { 914 y = llist->right; 915 llist->right = ltree; 916 vm_map_entry_set_max_free(llist); 917 ltree = llist; 918 llist = y; 919 } 920 rtree = root->right; 921 while (rlist != NULL) { 922 y = rlist->left; 923 rlist->left = rtree; 924 vm_map_entry_set_max_free(rlist); 925 rtree = rlist; 926 rlist = y; 927 } 928 929 /* 930 * Final assembly: add ltree and rtree as subtrees of root. 931 */ 932 root->left = ltree; 933 root->right = rtree; 934 vm_map_entry_set_max_free(root); 935 936 return (root); 937} 938 939/* 940 * vm_map_entry_{un,}link: 941 * 942 * Insert/remove entries from maps. 943 */ 944static void 945vm_map_entry_link(vm_map_t map, 946 vm_map_entry_t after_where, 947 vm_map_entry_t entry) 948{ 949 950 CTR4(KTR_VM, 951 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 952 map->nentries, entry, after_where); 953 VM_MAP_ASSERT_LOCKED(map); 954 KASSERT(after_where == &map->header || 955 after_where->end <= entry->start, 956 ("vm_map_entry_link: prev end %jx new start %jx overlap", 957 (uintmax_t)after_where->end, (uintmax_t)entry->start)); 958 KASSERT(after_where->next == &map->header || 959 entry->end <= after_where->next->start, 960 ("vm_map_entry_link: new end %jx next start %jx overlap", 961 (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); 962 963 map->nentries++; 964 entry->prev = after_where; 965 entry->next = after_where->next; 966 entry->next->prev = entry; 967 after_where->next = entry; 968 969 if (after_where != &map->header) { 970 if (after_where != map->root) 971 vm_map_entry_splay(after_where->start, map->root); 972 entry->right = after_where->right; 973 entry->left = after_where; 974 after_where->right = NULL; 975 after_where->adj_free = entry->start - after_where->end; 976 vm_map_entry_set_max_free(after_where); 977 } else { 978 entry->right = map->root; 979 entry->left = NULL; 980 } 981 entry->adj_free = (entry->next == &map->header ? map->max_offset : 982 entry->next->start) - entry->end; 983 vm_map_entry_set_max_free(entry); 984 map->root = entry; 985} 986 987static void 988vm_map_entry_unlink(vm_map_t map, 989 vm_map_entry_t entry) 990{ 991 vm_map_entry_t next, prev, root; 992 993 VM_MAP_ASSERT_LOCKED(map); 994 if (entry != map->root) 995 vm_map_entry_splay(entry->start, map->root); 996 if (entry->left == NULL) 997 root = entry->right; 998 else { 999 root = vm_map_entry_splay(entry->start, entry->left); 1000 root->right = entry->right; 1001 root->adj_free = (entry->next == &map->header ? map->max_offset : 1002 entry->next->start) - root->end; 1003 vm_map_entry_set_max_free(root); 1004 } 1005 map->root = root; 1006 1007 prev = entry->prev; 1008 next = entry->next; 1009 next->prev = prev; 1010 prev->next = next; 1011 map->nentries--; 1012 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1013 map->nentries, entry); 1014} 1015 1016/* 1017 * vm_map_entry_resize_free: 1018 * 1019 * Recompute the amount of free space following a vm_map_entry 1020 * and propagate that value up the tree. Call this function after 1021 * resizing a map entry in-place, that is, without a call to 1022 * vm_map_entry_link() or _unlink(). 1023 * 1024 * The map must be locked, and leaves it so. 1025 */ 1026static void 1027vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1028{ 1029 1030 /* 1031 * Using splay trees without parent pointers, propagating 1032 * max_free up the tree is done by moving the entry to the 1033 * root and making the change there. 1034 */ 1035 if (entry != map->root) 1036 map->root = vm_map_entry_splay(entry->start, map->root); 1037 1038 entry->adj_free = (entry->next == &map->header ? map->max_offset : 1039 entry->next->start) - entry->end; 1040 vm_map_entry_set_max_free(entry); 1041} 1042 1043/* 1044 * vm_map_lookup_entry: [ internal use only ] 1045 * 1046 * Finds the map entry containing (or 1047 * immediately preceding) the specified address 1048 * in the given map; the entry is returned 1049 * in the "entry" parameter. The boolean 1050 * result indicates whether the address is 1051 * actually contained in the map. 1052 */ 1053boolean_t 1054vm_map_lookup_entry( 1055 vm_map_t map, 1056 vm_offset_t address, 1057 vm_map_entry_t *entry) /* OUT */ 1058{ 1059 vm_map_entry_t cur; 1060 boolean_t locked; 1061 1062 /* 1063 * If the map is empty, then the map entry immediately preceding 1064 * "address" is the map's header. 1065 */ 1066 cur = map->root; 1067 if (cur == NULL) 1068 *entry = &map->header; 1069 else if (address >= cur->start && cur->end > address) { 1070 *entry = cur; 1071 return (TRUE); 1072 } else if ((locked = vm_map_locked(map)) || 1073 sx_try_upgrade(&map->lock)) { 1074 /* 1075 * Splay requires a write lock on the map. However, it only 1076 * restructures the binary search tree; it does not otherwise 1077 * change the map. Thus, the map's timestamp need not change 1078 * on a temporary upgrade. 1079 */ 1080 map->root = cur = vm_map_entry_splay(address, cur); 1081 if (!locked) 1082 sx_downgrade(&map->lock); 1083 1084 /* 1085 * If "address" is contained within a map entry, the new root 1086 * is that map entry. Otherwise, the new root is a map entry 1087 * immediately before or after "address". 1088 */ 1089 if (address >= cur->start) { 1090 *entry = cur; 1091 if (cur->end > address) 1092 return (TRUE); 1093 } else 1094 *entry = cur->prev; 1095 } else 1096 /* 1097 * Since the map is only locked for read access, perform a 1098 * standard binary search tree lookup for "address". 1099 */ 1100 for (;;) { 1101 if (address < cur->start) { 1102 if (cur->left == NULL) { 1103 *entry = cur->prev; 1104 break; 1105 } 1106 cur = cur->left; 1107 } else if (cur->end > address) { 1108 *entry = cur; 1109 return (TRUE); 1110 } else { 1111 if (cur->right == NULL) { 1112 *entry = cur; 1113 break; 1114 } 1115 cur = cur->right; 1116 } 1117 } 1118 return (FALSE); 1119} 1120 1121/* 1122 * vm_map_insert: 1123 * 1124 * Inserts the given whole VM object into the target 1125 * map at the specified address range. The object's 1126 * size should match that of the address range. 1127 * 1128 * Requires that the map be locked, and leaves it so. 1129 * 1130 * If object is non-NULL, ref count must be bumped by caller 1131 * prior to making call to account for the new entry. 1132 */ 1133int 1134vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1135 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1136{ 1137 vm_map_entry_t new_entry, prev_entry, temp_entry; 1138 struct ucred *cred; 1139 vm_eflags_t protoeflags; 1140 vm_inherit_t inheritance; 1141 1142 VM_MAP_ASSERT_LOCKED(map); 1143 KASSERT((object != kmem_object && object != kernel_object) || 1144 (cow & MAP_COPY_ON_WRITE) == 0, 1145 ("vm_map_insert: kmem or kernel object and COW")); 1146 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1147 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1148 KASSERT((prot & ~max) == 0, 1149 ("prot %#x is not subset of max_prot %#x", prot, max)); 1150 1151 /* 1152 * Check that the start and end points are not bogus. 1153 */ 1154 if (start < map->min_offset || end > map->max_offset || start >= end) 1155 return (KERN_INVALID_ADDRESS); 1156 1157 /* 1158 * Find the entry prior to the proposed starting address; if it's part 1159 * of an existing entry, this range is bogus. 1160 */ 1161 if (vm_map_lookup_entry(map, start, &temp_entry)) 1162 return (KERN_NO_SPACE); 1163 1164 prev_entry = temp_entry; 1165 1166 /* 1167 * Assert that the next entry doesn't overlap the end point. 1168 */ 1169 if (prev_entry->next != &map->header && prev_entry->next->start < end) 1170 return (KERN_NO_SPACE); 1171 1172 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1173 max != VM_PROT_NONE)) 1174 return (KERN_INVALID_ARGUMENT); 1175 1176 protoeflags = 0; 1177 if (cow & MAP_COPY_ON_WRITE) 1178 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1179 if (cow & MAP_NOFAULT) 1180 protoeflags |= MAP_ENTRY_NOFAULT; 1181 if (cow & MAP_DISABLE_SYNCER) 1182 protoeflags |= MAP_ENTRY_NOSYNC; 1183 if (cow & MAP_DISABLE_COREDUMP) 1184 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1185 if (cow & MAP_STACK_GROWS_DOWN) 1186 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1187 if (cow & MAP_STACK_GROWS_UP) 1188 protoeflags |= MAP_ENTRY_GROWS_UP; 1189 if (cow & MAP_VN_WRITECOUNT) 1190 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1191 if ((cow & MAP_CREATE_GUARD) != 0) 1192 protoeflags |= MAP_ENTRY_GUARD; 1193 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1194 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1195 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1196 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1197 if (cow & MAP_INHERIT_SHARE) 1198 inheritance = VM_INHERIT_SHARE; 1199 else 1200 inheritance = VM_INHERIT_DEFAULT; 1201 1202 cred = NULL; 1203 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1204 goto charged; 1205 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1206 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1207 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1208 return (KERN_RESOURCE_SHORTAGE); 1209 KASSERT(object == NULL || 1210 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1211 object->cred == NULL, 1212 ("overcommit: vm_map_insert o %p", object)); 1213 cred = curthread->td_ucred; 1214 } 1215 1216charged: 1217 /* Expand the kernel pmap, if necessary. */ 1218 if (map == kernel_map && end > kernel_vm_end) 1219 pmap_growkernel(end); 1220 if (object != NULL) { 1221 /* 1222 * OBJ_ONEMAPPING must be cleared unless this mapping 1223 * is trivially proven to be the only mapping for any 1224 * of the object's pages. (Object granularity 1225 * reference counting is insufficient to recognize 1226 * aliases with precision.) 1227 */ 1228 VM_OBJECT_WLOCK(object); 1229 if (object->ref_count > 1 || object->shadow_count != 0) 1230 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1231 VM_OBJECT_WUNLOCK(object); 1232 } else if (prev_entry != &map->header && 1233 prev_entry->eflags == protoeflags && 1234 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && 1235 prev_entry->end == start && prev_entry->wired_count == 0 && 1236 (prev_entry->cred == cred || 1237 (prev_entry->object.vm_object != NULL && 1238 prev_entry->object.vm_object->cred == cred)) && 1239 vm_object_coalesce(prev_entry->object.vm_object, 1240 prev_entry->offset, 1241 (vm_size_t)(prev_entry->end - prev_entry->start), 1242 (vm_size_t)(end - prev_entry->end), cred != NULL && 1243 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1244 /* 1245 * We were able to extend the object. Determine if we 1246 * can extend the previous map entry to include the 1247 * new range as well. 1248 */ 1249 if (prev_entry->inheritance == inheritance && 1250 prev_entry->protection == prot && 1251 prev_entry->max_protection == max) { 1252 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1253 map->size += end - prev_entry->end; 1254 prev_entry->end = end; 1255 vm_map_entry_resize_free(map, prev_entry); 1256 vm_map_simplify_entry(map, prev_entry); 1257 return (KERN_SUCCESS); 1258 } 1259 1260 /* 1261 * If we can extend the object but cannot extend the 1262 * map entry, we have to create a new map entry. We 1263 * must bump the ref count on the extended object to 1264 * account for it. object may be NULL. 1265 */ 1266 object = prev_entry->object.vm_object; 1267 offset = prev_entry->offset + 1268 (prev_entry->end - prev_entry->start); 1269 vm_object_reference(object); 1270 if (cred != NULL && object != NULL && object->cred != NULL && 1271 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1272 /* Object already accounts for this uid. */ 1273 cred = NULL; 1274 } 1275 } 1276 if (cred != NULL) 1277 crhold(cred); 1278 1279 /* 1280 * Create a new entry 1281 */ 1282 new_entry = vm_map_entry_create(map); 1283 new_entry->start = start; 1284 new_entry->end = end; 1285 new_entry->cred = NULL; 1286 1287 new_entry->eflags = protoeflags; 1288 new_entry->object.vm_object = object; 1289 new_entry->offset = offset; 1290 1291 new_entry->inheritance = inheritance; 1292 new_entry->protection = prot; 1293 new_entry->max_protection = max; 1294 new_entry->wired_count = 0; 1295 new_entry->wiring_thread = NULL; 1296 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1297 new_entry->next_read = OFF_TO_IDX(offset); 1298 1299 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1300 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1301 new_entry->cred = cred; 1302 1303 /* 1304 * Insert the new entry into the list 1305 */ 1306 vm_map_entry_link(map, prev_entry, new_entry); 1307 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1308 map->size += new_entry->end - new_entry->start; 1309 1310 /* 1311 * Try to coalesce the new entry with both the previous and next 1312 * entries in the list. Previously, we only attempted to coalesce 1313 * with the previous entry when object is NULL. Here, we handle the 1314 * other cases, which are less common. 1315 */ 1316 vm_map_simplify_entry(map, new_entry); 1317 1318 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1319 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1320 end - start, cow & MAP_PREFAULT_PARTIAL); 1321 } 1322 1323 return (KERN_SUCCESS); 1324} 1325 1326/* 1327 * vm_map_findspace: 1328 * 1329 * Find the first fit (lowest VM address) for "length" free bytes 1330 * beginning at address >= start in the given map. 1331 * 1332 * In a vm_map_entry, "adj_free" is the amount of free space 1333 * adjacent (higher address) to this entry, and "max_free" is the 1334 * maximum amount of contiguous free space in its subtree. This 1335 * allows finding a free region in one path down the tree, so 1336 * O(log n) amortized with splay trees. 1337 * 1338 * The map must be locked, and leaves it so. 1339 * 1340 * Returns: 0 on success, and starting address in *addr, 1341 * 1 if insufficient space. 1342 */ 1343int 1344vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1345 vm_offset_t *addr) /* OUT */ 1346{ 1347 vm_map_entry_t entry; 1348 vm_offset_t st; 1349 1350 /* 1351 * Request must fit within min/max VM address and must avoid 1352 * address wrap. 1353 */ 1354 if (start < map->min_offset) 1355 start = map->min_offset; 1356 if (start + length > map->max_offset || start + length < start) 1357 return (1); 1358 1359 /* Empty tree means wide open address space. */ 1360 if (map->root == NULL) { 1361 *addr = start; 1362 return (0); 1363 } 1364 1365 /* 1366 * After splay, if start comes before root node, then there 1367 * must be a gap from start to the root. 1368 */ 1369 map->root = vm_map_entry_splay(start, map->root); 1370 if (start + length <= map->root->start) { 1371 *addr = start; 1372 return (0); 1373 } 1374 1375 /* 1376 * Root is the last node that might begin its gap before 1377 * start, and this is the last comparison where address 1378 * wrap might be a problem. 1379 */ 1380 st = (start > map->root->end) ? start : map->root->end; 1381 if (length <= map->root->end + map->root->adj_free - st) { 1382 *addr = st; 1383 return (0); 1384 } 1385 1386 /* With max_free, can immediately tell if no solution. */ 1387 entry = map->root->right; 1388 if (entry == NULL || length > entry->max_free) 1389 return (1); 1390 1391 /* 1392 * Search the right subtree in the order: left subtree, root, 1393 * right subtree (first fit). The previous splay implies that 1394 * all regions in the right subtree have addresses > start. 1395 */ 1396 while (entry != NULL) { 1397 if (entry->left != NULL && entry->left->max_free >= length) 1398 entry = entry->left; 1399 else if (entry->adj_free >= length) { 1400 *addr = entry->end; 1401 return (0); 1402 } else 1403 entry = entry->right; 1404 } 1405 1406 /* Can't get here, so panic if we do. */ 1407 panic("vm_map_findspace: max_free corrupt"); 1408} 1409 1410int 1411vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1412 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1413 vm_prot_t max, int cow) 1414{ 1415 vm_offset_t end; 1416 int result; 1417 1418 end = start + length; 1419 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1420 object == NULL, 1421 ("vm_map_fixed: non-NULL backing object for stack")); 1422 vm_map_lock(map); 1423 VM_MAP_RANGE_CHECK(map, start, end); 1424 if ((cow & MAP_CHECK_EXCL) == 0) 1425 vm_map_delete(map, start, end); 1426 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1427 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1428 prot, max, cow); 1429 } else { 1430 result = vm_map_insert(map, object, offset, start, end, 1431 prot, max, cow); 1432 } 1433 vm_map_unlock(map); 1434 return (result); 1435} 1436 1437/* 1438 * vm_map_find finds an unallocated region in the target address 1439 * map with the given length. The search is defined to be 1440 * first-fit from the specified address; the region found is 1441 * returned in the same parameter. 1442 * 1443 * If object is non-NULL, ref count must be bumped by caller 1444 * prior to making call to account for the new entry. 1445 */ 1446int 1447vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1448 vm_offset_t *addr, /* IN/OUT */ 1449 vm_size_t length, vm_offset_t max_addr, int find_space, 1450 vm_prot_t prot, vm_prot_t max, int cow) 1451{ 1452 vm_offset_t alignment, initial_addr, start; 1453 int result; 1454 1455 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1456 object == NULL, 1457 ("vm_map_find: non-NULL backing object for stack")); 1458 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1459 (object->flags & OBJ_COLORED) == 0)) 1460 find_space = VMFS_ANY_SPACE; 1461 if (find_space >> 8 != 0) { 1462 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1463 alignment = (vm_offset_t)1 << (find_space >> 8); 1464 } else 1465 alignment = 0; 1466 initial_addr = *addr; 1467again: 1468 start = initial_addr; 1469 vm_map_lock(map); 1470 do { 1471 if (find_space != VMFS_NO_SPACE) { 1472 if (vm_map_findspace(map, start, length, addr) || 1473 (max_addr != 0 && *addr + length > max_addr)) { 1474 vm_map_unlock(map); 1475 if (find_space == VMFS_OPTIMAL_SPACE) { 1476 find_space = VMFS_ANY_SPACE; 1477 goto again; 1478 } 1479 return (KERN_NO_SPACE); 1480 } 1481 switch (find_space) { 1482 case VMFS_SUPER_SPACE: 1483 case VMFS_OPTIMAL_SPACE: 1484 pmap_align_superpage(object, offset, addr, 1485 length); 1486 break; 1487 case VMFS_ANY_SPACE: 1488 break; 1489 default: 1490 if ((*addr & (alignment - 1)) != 0) { 1491 *addr &= ~(alignment - 1); 1492 *addr += alignment; 1493 } 1494 break; 1495 } 1496 1497 start = *addr; 1498 } 1499 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1500 result = vm_map_stack_locked(map, start, length, 1501 sgrowsiz, prot, max, cow); 1502 } else { 1503 result = vm_map_insert(map, object, offset, start, 1504 start + length, prot, max, cow); 1505 } 1506 } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE && 1507 find_space != VMFS_ANY_SPACE); 1508 vm_map_unlock(map); 1509 return (result); 1510} 1511 1512int 1513vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1514 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 1515 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 1516 int cow) 1517{ 1518 vm_offset_t hint; 1519 int rv; 1520 1521 hint = *addr; 1522 for (;;) { 1523 rv = vm_map_find(map, object, offset, addr, length, max_addr, 1524 find_space, prot, max, cow); 1525 if (rv == KERN_SUCCESS || min_addr >= hint) 1526 return (rv); 1527 *addr = hint = min_addr; 1528 } 1529} 1530 1531/* 1532 * vm_map_simplify_entry: 1533 * 1534 * Simplify the given map entry by merging with either neighbor. This 1535 * routine also has the ability to merge with both neighbors. 1536 * 1537 * The map must be locked. 1538 * 1539 * This routine guarentees that the passed entry remains valid (though 1540 * possibly extended). When merging, this routine may delete one or 1541 * both neighbors. 1542 */ 1543void 1544vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1545{ 1546 vm_map_entry_t next, prev; 1547 vm_size_t prevsize, esize; 1548 1549 if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | 1550 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) 1551 return; 1552 1553 prev = entry->prev; 1554 if (prev != &map->header) { 1555 prevsize = prev->end - prev->start; 1556 if ( (prev->end == entry->start) && 1557 (prev->object.vm_object == entry->object.vm_object) && 1558 (!prev->object.vm_object || 1559 (prev->offset + prevsize == entry->offset)) && 1560 (prev->eflags == entry->eflags) && 1561 (prev->protection == entry->protection) && 1562 (prev->max_protection == entry->max_protection) && 1563 (prev->inheritance == entry->inheritance) && 1564 (prev->wired_count == entry->wired_count) && 1565 (prev->cred == entry->cred)) { 1566 vm_map_entry_unlink(map, prev); 1567 entry->start = prev->start; 1568 entry->offset = prev->offset; 1569 if (entry->prev != &map->header) 1570 vm_map_entry_resize_free(map, entry->prev); 1571 1572 /* 1573 * If the backing object is a vnode object, 1574 * vm_object_deallocate() calls vrele(). 1575 * However, vrele() does not lock the vnode 1576 * because the vnode has additional 1577 * references. Thus, the map lock can be kept 1578 * without causing a lock-order reversal with 1579 * the vnode lock. 1580 * 1581 * Since we count the number of virtual page 1582 * mappings in object->un_pager.vnp.writemappings, 1583 * the writemappings value should not be adjusted 1584 * when the entry is disposed of. 1585 */ 1586 if (prev->object.vm_object) 1587 vm_object_deallocate(prev->object.vm_object); 1588 if (prev->cred != NULL) 1589 crfree(prev->cred); 1590 vm_map_entry_dispose(map, prev); 1591 } 1592 } 1593 1594 next = entry->next; 1595 if (next != &map->header) { 1596 esize = entry->end - entry->start; 1597 if ((entry->end == next->start) && 1598 (next->object.vm_object == entry->object.vm_object) && 1599 (!entry->object.vm_object || 1600 (entry->offset + esize == next->offset)) && 1601 (next->eflags == entry->eflags) && 1602 (next->protection == entry->protection) && 1603 (next->max_protection == entry->max_protection) && 1604 (next->inheritance == entry->inheritance) && 1605 (next->wired_count == entry->wired_count) && 1606 (next->cred == entry->cred)) { 1607 vm_map_entry_unlink(map, next); 1608 entry->end = next->end; 1609 vm_map_entry_resize_free(map, entry); 1610 1611 /* 1612 * See comment above. 1613 */ 1614 if (next->object.vm_object) 1615 vm_object_deallocate(next->object.vm_object); 1616 if (next->cred != NULL) 1617 crfree(next->cred); 1618 vm_map_entry_dispose(map, next); 1619 } 1620 } 1621} 1622/* 1623 * vm_map_clip_start: [ internal use only ] 1624 * 1625 * Asserts that the given entry begins at or after 1626 * the specified address; if necessary, 1627 * it splits the entry into two. 1628 */ 1629#define vm_map_clip_start(map, entry, startaddr) \ 1630{ \ 1631 if (startaddr > entry->start) \ 1632 _vm_map_clip_start(map, entry, startaddr); \ 1633} 1634 1635/* 1636 * This routine is called only when it is known that 1637 * the entry must be split. 1638 */ 1639static void 1640_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1641{ 1642 vm_map_entry_t new_entry; 1643 1644 VM_MAP_ASSERT_LOCKED(map); 1645 1646 /* 1647 * Split off the front portion -- note that we must insert the new 1648 * entry BEFORE this one, so that this entry has the specified 1649 * starting address. 1650 */ 1651 vm_map_simplify_entry(map, entry); 1652 1653 /* 1654 * If there is no object backing this entry, we might as well create 1655 * one now. If we defer it, an object can get created after the map 1656 * is clipped, and individual objects will be created for the split-up 1657 * map. This is a bit of a hack, but is also about the best place to 1658 * put this improvement. 1659 */ 1660 if (entry->object.vm_object == NULL && !map->system_map && 1661 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1662 vm_object_t object; 1663 object = vm_object_allocate(OBJT_DEFAULT, 1664 atop(entry->end - entry->start)); 1665 entry->object.vm_object = object; 1666 entry->offset = 0; 1667 if (entry->cred != NULL) { 1668 object->cred = entry->cred; 1669 object->charge = entry->end - entry->start; 1670 entry->cred = NULL; 1671 } 1672 } else if (entry->object.vm_object != NULL && 1673 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1674 entry->cred != NULL) { 1675 VM_OBJECT_WLOCK(entry->object.vm_object); 1676 KASSERT(entry->object.vm_object->cred == NULL, 1677 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1678 entry->object.vm_object->cred = entry->cred; 1679 entry->object.vm_object->charge = entry->end - entry->start; 1680 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1681 entry->cred = NULL; 1682 } 1683 1684 new_entry = vm_map_entry_create(map); 1685 *new_entry = *entry; 1686 1687 new_entry->end = start; 1688 entry->offset += (start - entry->start); 1689 entry->start = start; 1690 if (new_entry->cred != NULL) 1691 crhold(entry->cred); 1692 1693 vm_map_entry_link(map, entry->prev, new_entry); 1694 1695 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1696 vm_object_reference(new_entry->object.vm_object); 1697 /* 1698 * The object->un_pager.vnp.writemappings for the 1699 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 1700 * kept as is here. The virtual pages are 1701 * re-distributed among the clipped entries, so the sum is 1702 * left the same. 1703 */ 1704 } 1705} 1706 1707/* 1708 * vm_map_clip_end: [ internal use only ] 1709 * 1710 * Asserts that the given entry ends at or before 1711 * the specified address; if necessary, 1712 * it splits the entry into two. 1713 */ 1714#define vm_map_clip_end(map, entry, endaddr) \ 1715{ \ 1716 if ((endaddr) < (entry->end)) \ 1717 _vm_map_clip_end((map), (entry), (endaddr)); \ 1718} 1719 1720/* 1721 * This routine is called only when it is known that 1722 * the entry must be split. 1723 */ 1724static void 1725_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1726{ 1727 vm_map_entry_t new_entry; 1728 1729 VM_MAP_ASSERT_LOCKED(map); 1730 1731 /* 1732 * If there is no object backing this entry, we might as well create 1733 * one now. If we defer it, an object can get created after the map 1734 * is clipped, and individual objects will be created for the split-up 1735 * map. This is a bit of a hack, but is also about the best place to 1736 * put this improvement. 1737 */ 1738 if (entry->object.vm_object == NULL && !map->system_map && 1739 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1740 vm_object_t object; 1741 object = vm_object_allocate(OBJT_DEFAULT, 1742 atop(entry->end - entry->start)); 1743 entry->object.vm_object = object; 1744 entry->offset = 0; 1745 if (entry->cred != NULL) { 1746 object->cred = entry->cred; 1747 object->charge = entry->end - entry->start; 1748 entry->cred = NULL; 1749 } 1750 } else if (entry->object.vm_object != NULL && 1751 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1752 entry->cred != NULL) { 1753 VM_OBJECT_WLOCK(entry->object.vm_object); 1754 KASSERT(entry->object.vm_object->cred == NULL, 1755 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1756 entry->object.vm_object->cred = entry->cred; 1757 entry->object.vm_object->charge = entry->end - entry->start; 1758 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1759 entry->cred = NULL; 1760 } 1761 1762 /* 1763 * Create a new entry and insert it AFTER the specified entry 1764 */ 1765 new_entry = vm_map_entry_create(map); 1766 *new_entry = *entry; 1767 1768 new_entry->start = entry->end = end; 1769 new_entry->offset += (end - entry->start); 1770 if (new_entry->cred != NULL) 1771 crhold(entry->cred); 1772 1773 vm_map_entry_link(map, entry, new_entry); 1774 1775 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1776 vm_object_reference(new_entry->object.vm_object); 1777 } 1778} 1779 1780/* 1781 * vm_map_submap: [ kernel use only ] 1782 * 1783 * Mark the given range as handled by a subordinate map. 1784 * 1785 * This range must have been created with vm_map_find, 1786 * and no other operations may have been performed on this 1787 * range prior to calling vm_map_submap. 1788 * 1789 * Only a limited number of operations can be performed 1790 * within this rage after calling vm_map_submap: 1791 * vm_fault 1792 * [Don't try vm_map_copy!] 1793 * 1794 * To remove a submapping, one must first remove the 1795 * range from the superior map, and then destroy the 1796 * submap (if desired). [Better yet, don't try it.] 1797 */ 1798int 1799vm_map_submap( 1800 vm_map_t map, 1801 vm_offset_t start, 1802 vm_offset_t end, 1803 vm_map_t submap) 1804{ 1805 vm_map_entry_t entry; 1806 int result = KERN_INVALID_ARGUMENT; 1807 1808 vm_map_lock(map); 1809 1810 VM_MAP_RANGE_CHECK(map, start, end); 1811 1812 if (vm_map_lookup_entry(map, start, &entry)) { 1813 vm_map_clip_start(map, entry, start); 1814 } else 1815 entry = entry->next; 1816 1817 vm_map_clip_end(map, entry, end); 1818 1819 if ((entry->start == start) && (entry->end == end) && 1820 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1821 (entry->object.vm_object == NULL)) { 1822 entry->object.sub_map = submap; 1823 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1824 result = KERN_SUCCESS; 1825 } 1826 vm_map_unlock(map); 1827 1828 return (result); 1829} 1830 1831/* 1832 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 1833 */ 1834#define MAX_INIT_PT 96 1835 1836/* 1837 * vm_map_pmap_enter: 1838 * 1839 * Preload the specified map's pmap with mappings to the specified 1840 * object's memory-resident pages. No further physical pages are 1841 * allocated, and no further virtual pages are retrieved from secondary 1842 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 1843 * limited number of page mappings are created at the low-end of the 1844 * specified address range. (For this purpose, a superpage mapping 1845 * counts as one page mapping.) Otherwise, all resident pages within 1846 * the specified address range are mapped. Because these mappings are 1847 * being created speculatively, cached pages are not reactivated and 1848 * mapped. 1849 */ 1850void 1851vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1852 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1853{ 1854 vm_offset_t start; 1855 vm_page_t p, p_start; 1856 vm_pindex_t mask, psize, threshold, tmpidx; 1857 1858 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1859 return; 1860 VM_OBJECT_RLOCK(object); 1861 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1862 VM_OBJECT_RUNLOCK(object); 1863 VM_OBJECT_WLOCK(object); 1864 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1865 pmap_object_init_pt(map->pmap, addr, object, pindex, 1866 size); 1867 VM_OBJECT_WUNLOCK(object); 1868 return; 1869 } 1870 VM_OBJECT_LOCK_DOWNGRADE(object); 1871 } 1872 1873 psize = atop(size); 1874 if (psize + pindex > object->size) { 1875 if (object->size < pindex) { 1876 VM_OBJECT_RUNLOCK(object); 1877 return; 1878 } 1879 psize = object->size - pindex; 1880 } 1881 1882 start = 0; 1883 p_start = NULL; 1884 threshold = MAX_INIT_PT; 1885 1886 p = vm_page_find_least(object, pindex); 1887 /* 1888 * Assert: the variable p is either (1) the page with the 1889 * least pindex greater than or equal to the parameter pindex 1890 * or (2) NULL. 1891 */ 1892 for (; 1893 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1894 p = TAILQ_NEXT(p, listq)) { 1895 /* 1896 * don't allow an madvise to blow away our really 1897 * free pages allocating pv entries. 1898 */ 1899 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 1900 cnt.v_free_count < cnt.v_free_reserved) || 1901 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 1902 tmpidx >= threshold)) { 1903 psize = tmpidx; 1904 break; 1905 } 1906 if (p->valid == VM_PAGE_BITS_ALL) { 1907 if (p_start == NULL) { 1908 start = addr + ptoa(tmpidx); 1909 p_start = p; 1910 } 1911 /* Jump ahead if a superpage mapping is possible. */ 1912 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 1913 (pagesizes[p->psind] - 1)) == 0) { 1914 mask = atop(pagesizes[p->psind]) - 1; 1915 if (tmpidx + mask < psize && 1916 vm_page_ps_is_valid(p)) { 1917 p += mask; 1918 threshold += mask; 1919 } 1920 } 1921 } else if (p_start != NULL) { 1922 pmap_enter_object(map->pmap, start, addr + 1923 ptoa(tmpidx), p_start, prot); 1924 p_start = NULL; 1925 } 1926 } 1927 if (p_start != NULL) 1928 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 1929 p_start, prot); 1930 VM_OBJECT_RUNLOCK(object); 1931} 1932 1933/* 1934 * vm_map_protect: 1935 * 1936 * Sets the protection of the specified address 1937 * region in the target map. If "set_max" is 1938 * specified, the maximum protection is to be set; 1939 * otherwise, only the current protection is affected. 1940 */ 1941int 1942vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1943 vm_prot_t new_prot, boolean_t set_max) 1944{ 1945 vm_map_entry_t current, entry; 1946 vm_object_t obj; 1947 struct ucred *cred; 1948 vm_prot_t old_prot; 1949 1950 if (start == end) 1951 return (KERN_SUCCESS); 1952 1953 vm_map_lock(map); 1954 1955 VM_MAP_RANGE_CHECK(map, start, end); 1956 1957 if (vm_map_lookup_entry(map, start, &entry)) { 1958 vm_map_clip_start(map, entry, start); 1959 } else { 1960 entry = entry->next; 1961 } 1962 1963 /* 1964 * Make a first pass to check for protection violations. 1965 */ 1966 for (current = entry; current != &map->header && current->start < end; 1967 current = current->next) { 1968 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 1969 continue; 1970 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1971 vm_map_unlock(map); 1972 return (KERN_INVALID_ARGUMENT); 1973 } 1974 if ((new_prot & current->max_protection) != new_prot) { 1975 vm_map_unlock(map); 1976 return (KERN_PROTECTION_FAILURE); 1977 } 1978 } 1979 1980 /* 1981 * Do an accounting pass for private read-only mappings that 1982 * now will do cow due to allowed write (e.g. debugger sets 1983 * breakpoint on text segment) 1984 */ 1985 for (current = entry; current != &map->header && current->start < end; 1986 current = current->next) { 1987 1988 vm_map_clip_end(map, current, end); 1989 1990 if (set_max || 1991 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 1992 ENTRY_CHARGED(current) || 1993 (current->eflags & MAP_ENTRY_GUARD) != 0) { 1994 continue; 1995 } 1996 1997 cred = curthread->td_ucred; 1998 obj = current->object.vm_object; 1999 2000 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2001 if (!swap_reserve(current->end - current->start)) { 2002 vm_map_unlock(map); 2003 return (KERN_RESOURCE_SHORTAGE); 2004 } 2005 crhold(cred); 2006 current->cred = cred; 2007 continue; 2008 } 2009 2010 VM_OBJECT_WLOCK(obj); 2011 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2012 VM_OBJECT_WUNLOCK(obj); 2013 continue; 2014 } 2015 2016 /* 2017 * Charge for the whole object allocation now, since 2018 * we cannot distinguish between non-charged and 2019 * charged clipped mapping of the same object later. 2020 */ 2021 KASSERT(obj->charge == 0, 2022 ("vm_map_protect: object %p overcharged (entry %p)", 2023 obj, current)); 2024 if (!swap_reserve(ptoa(obj->size))) { 2025 VM_OBJECT_WUNLOCK(obj); 2026 vm_map_unlock(map); 2027 return (KERN_RESOURCE_SHORTAGE); 2028 } 2029 2030 crhold(cred); 2031 obj->cred = cred; 2032 obj->charge = ptoa(obj->size); 2033 VM_OBJECT_WUNLOCK(obj); 2034 } 2035 2036 /* 2037 * Go back and fix up protections. [Note that clipping is not 2038 * necessary the second time.] 2039 */ 2040 for (current = entry; current != &map->header && current->start < end; 2041 current = current->next) { 2042 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2043 continue; 2044 2045 old_prot = current->protection; 2046 2047 if (set_max) 2048 current->protection = 2049 (current->max_protection = new_prot) & 2050 old_prot; 2051 else 2052 current->protection = new_prot; 2053 2054 /* 2055 * For user wired map entries, the normal lazy evaluation of 2056 * write access upgrades through soft page faults is 2057 * undesirable. Instead, immediately copy any pages that are 2058 * copy-on-write and enable write access in the physical map. 2059 */ 2060 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2061 (current->protection & VM_PROT_WRITE) != 0 && 2062 (old_prot & VM_PROT_WRITE) == 0) 2063 vm_fault_copy_entry(map, map, current, current, NULL); 2064 2065 /* 2066 * When restricting access, update the physical map. Worry 2067 * about copy-on-write here. 2068 */ 2069 if ((old_prot & ~current->protection) != 0) { 2070#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2071 VM_PROT_ALL) 2072 pmap_protect(map->pmap, current->start, 2073 current->end, 2074 current->protection & MASK(current)); 2075#undef MASK 2076 } 2077 vm_map_simplify_entry(map, current); 2078 } 2079 vm_map_unlock(map); 2080 return (KERN_SUCCESS); 2081} 2082 2083/* 2084 * vm_map_madvise: 2085 * 2086 * This routine traverses a processes map handling the madvise 2087 * system call. Advisories are classified as either those effecting 2088 * the vm_map_entry structure, or those effecting the underlying 2089 * objects. 2090 */ 2091int 2092vm_map_madvise( 2093 vm_map_t map, 2094 vm_offset_t start, 2095 vm_offset_t end, 2096 int behav) 2097{ 2098 vm_map_entry_t current, entry; 2099 int modify_map = 0; 2100 2101 /* 2102 * Some madvise calls directly modify the vm_map_entry, in which case 2103 * we need to use an exclusive lock on the map and we need to perform 2104 * various clipping operations. Otherwise we only need a read-lock 2105 * on the map. 2106 */ 2107 switch(behav) { 2108 case MADV_NORMAL: 2109 case MADV_SEQUENTIAL: 2110 case MADV_RANDOM: 2111 case MADV_NOSYNC: 2112 case MADV_AUTOSYNC: 2113 case MADV_NOCORE: 2114 case MADV_CORE: 2115 if (start == end) 2116 return (KERN_SUCCESS); 2117 modify_map = 1; 2118 vm_map_lock(map); 2119 break; 2120 case MADV_WILLNEED: 2121 case MADV_DONTNEED: 2122 case MADV_FREE: 2123 if (start == end) 2124 return (KERN_SUCCESS); 2125 vm_map_lock_read(map); 2126 break; 2127 default: 2128 return (KERN_INVALID_ARGUMENT); 2129 } 2130 2131 /* 2132 * Locate starting entry and clip if necessary. 2133 */ 2134 VM_MAP_RANGE_CHECK(map, start, end); 2135 2136 if (vm_map_lookup_entry(map, start, &entry)) { 2137 if (modify_map) 2138 vm_map_clip_start(map, entry, start); 2139 } else { 2140 entry = entry->next; 2141 } 2142 2143 if (modify_map) { 2144 /* 2145 * madvise behaviors that are implemented in the vm_map_entry. 2146 * 2147 * We clip the vm_map_entry so that behavioral changes are 2148 * limited to the specified address range. 2149 */ 2150 for (current = entry; 2151 (current != &map->header) && (current->start < end); 2152 current = current->next 2153 ) { 2154 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2155 continue; 2156 2157 vm_map_clip_end(map, current, end); 2158 2159 switch (behav) { 2160 case MADV_NORMAL: 2161 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2162 break; 2163 case MADV_SEQUENTIAL: 2164 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2165 break; 2166 case MADV_RANDOM: 2167 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2168 break; 2169 case MADV_NOSYNC: 2170 current->eflags |= MAP_ENTRY_NOSYNC; 2171 break; 2172 case MADV_AUTOSYNC: 2173 current->eflags &= ~MAP_ENTRY_NOSYNC; 2174 break; 2175 case MADV_NOCORE: 2176 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2177 break; 2178 case MADV_CORE: 2179 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2180 break; 2181 default: 2182 break; 2183 } 2184 vm_map_simplify_entry(map, current); 2185 } 2186 vm_map_unlock(map); 2187 } else { 2188 vm_pindex_t pstart, pend; 2189 2190 /* 2191 * madvise behaviors that are implemented in the underlying 2192 * vm_object. 2193 * 2194 * Since we don't clip the vm_map_entry, we have to clip 2195 * the vm_object pindex and count. 2196 */ 2197 for (current = entry; 2198 (current != &map->header) && (current->start < end); 2199 current = current->next 2200 ) { 2201 vm_offset_t useEnd, useStart; 2202 2203 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2204 continue; 2205 2206 pstart = OFF_TO_IDX(current->offset); 2207 pend = pstart + atop(current->end - current->start); 2208 useStart = current->start; 2209 useEnd = current->end; 2210 2211 if (current->start < start) { 2212 pstart += atop(start - current->start); 2213 useStart = start; 2214 } 2215 if (current->end > end) { 2216 pend -= atop(current->end - end); 2217 useEnd = end; 2218 } 2219 2220 if (pstart >= pend) 2221 continue; 2222 2223 /* 2224 * Perform the pmap_advise() before clearing 2225 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2226 * concurrent pmap operation, such as pmap_remove(), 2227 * could clear a reference in the pmap and set 2228 * PGA_REFERENCED on the page before the pmap_advise() 2229 * had completed. Consequently, the page would appear 2230 * referenced based upon an old reference that 2231 * occurred before this pmap_advise() ran. 2232 */ 2233 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2234 pmap_advise(map->pmap, useStart, useEnd, 2235 behav); 2236 2237 vm_object_madvise(current->object.vm_object, pstart, 2238 pend, behav); 2239 2240 /* 2241 * Pre-populate paging structures in the 2242 * WILLNEED case. For wired entries, the 2243 * paging structures are already populated. 2244 */ 2245 if (behav == MADV_WILLNEED && 2246 current->wired_count == 0) { 2247 vm_map_pmap_enter(map, 2248 useStart, 2249 current->protection, 2250 current->object.vm_object, 2251 pstart, 2252 ptoa(pend - pstart), 2253 MAP_PREFAULT_MADVISE 2254 ); 2255 } 2256 } 2257 vm_map_unlock_read(map); 2258 } 2259 return (0); 2260} 2261 2262 2263/* 2264 * vm_map_inherit: 2265 * 2266 * Sets the inheritance of the specified address 2267 * range in the target map. Inheritance 2268 * affects how the map will be shared with 2269 * child maps at the time of vmspace_fork. 2270 */ 2271int 2272vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2273 vm_inherit_t new_inheritance) 2274{ 2275 vm_map_entry_t entry; 2276 vm_map_entry_t temp_entry; 2277 2278 switch (new_inheritance) { 2279 case VM_INHERIT_NONE: 2280 case VM_INHERIT_COPY: 2281 case VM_INHERIT_SHARE: 2282 case VM_INHERIT_ZERO: 2283 break; 2284 default: 2285 return (KERN_INVALID_ARGUMENT); 2286 } 2287 if (start == end) 2288 return (KERN_SUCCESS); 2289 vm_map_lock(map); 2290 VM_MAP_RANGE_CHECK(map, start, end); 2291 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2292 entry = temp_entry; 2293 vm_map_clip_start(map, entry, start); 2294 } else 2295 entry = temp_entry->next; 2296 while ((entry != &map->header) && (entry->start < end)) { 2297 vm_map_clip_end(map, entry, end); 2298 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2299 new_inheritance != VM_INHERIT_ZERO) 2300 entry->inheritance = new_inheritance; 2301 vm_map_simplify_entry(map, entry); 2302 entry = entry->next; 2303 } 2304 vm_map_unlock(map); 2305 return (KERN_SUCCESS); 2306} 2307 2308/* 2309 * vm_map_unwire: 2310 * 2311 * Implements both kernel and user unwiring. 2312 */ 2313int 2314vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2315 int flags) 2316{ 2317 vm_map_entry_t entry, first_entry, tmp_entry; 2318 vm_offset_t saved_start; 2319 unsigned int last_timestamp; 2320 int rv; 2321 boolean_t need_wakeup, result, user_unwire; 2322 2323 if (start == end) 2324 return (KERN_SUCCESS); 2325 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2326 vm_map_lock(map); 2327 VM_MAP_RANGE_CHECK(map, start, end); 2328 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2329 if (flags & VM_MAP_WIRE_HOLESOK) 2330 first_entry = first_entry->next; 2331 else { 2332 vm_map_unlock(map); 2333 return (KERN_INVALID_ADDRESS); 2334 } 2335 } 2336 last_timestamp = map->timestamp; 2337 entry = first_entry; 2338 while (entry != &map->header && entry->start < end) { 2339 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2340 /* 2341 * We have not yet clipped the entry. 2342 */ 2343 saved_start = (start >= entry->start) ? start : 2344 entry->start; 2345 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2346 if (vm_map_unlock_and_wait(map, 0)) { 2347 /* 2348 * Allow interruption of user unwiring? 2349 */ 2350 } 2351 vm_map_lock(map); 2352 if (last_timestamp+1 != map->timestamp) { 2353 /* 2354 * Look again for the entry because the map was 2355 * modified while it was unlocked. 2356 * Specifically, the entry may have been 2357 * clipped, merged, or deleted. 2358 */ 2359 if (!vm_map_lookup_entry(map, saved_start, 2360 &tmp_entry)) { 2361 if (flags & VM_MAP_WIRE_HOLESOK) 2362 tmp_entry = tmp_entry->next; 2363 else { 2364 if (saved_start == start) { 2365 /* 2366 * First_entry has been deleted. 2367 */ 2368 vm_map_unlock(map); 2369 return (KERN_INVALID_ADDRESS); 2370 } 2371 end = saved_start; 2372 rv = KERN_INVALID_ADDRESS; 2373 goto done; 2374 } 2375 } 2376 if (entry == first_entry) 2377 first_entry = tmp_entry; 2378 else 2379 first_entry = NULL; 2380 entry = tmp_entry; 2381 } 2382 last_timestamp = map->timestamp; 2383 continue; 2384 } 2385 vm_map_clip_start(map, entry, start); 2386 vm_map_clip_end(map, entry, end); 2387 /* 2388 * Mark the entry in case the map lock is released. (See 2389 * above.) 2390 */ 2391 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2392 entry->wiring_thread == NULL, 2393 ("owned map entry %p", entry)); 2394 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2395 entry->wiring_thread = curthread; 2396 /* 2397 * Check the map for holes in the specified region. 2398 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2399 */ 2400 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2401 (entry->end < end && (entry->next == &map->header || 2402 entry->next->start > entry->end))) { 2403 end = entry->end; 2404 rv = KERN_INVALID_ADDRESS; 2405 goto done; 2406 } 2407 /* 2408 * If system unwiring, require that the entry is system wired. 2409 */ 2410 if (!user_unwire && 2411 vm_map_entry_system_wired_count(entry) == 0) { 2412 end = entry->end; 2413 rv = KERN_INVALID_ARGUMENT; 2414 goto done; 2415 } 2416 entry = entry->next; 2417 } 2418 rv = KERN_SUCCESS; 2419done: 2420 need_wakeup = FALSE; 2421 if (first_entry == NULL) { 2422 result = vm_map_lookup_entry(map, start, &first_entry); 2423 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2424 first_entry = first_entry->next; 2425 else 2426 KASSERT(result, ("vm_map_unwire: lookup failed")); 2427 } 2428 for (entry = first_entry; entry != &map->header && entry->start < end; 2429 entry = entry->next) { 2430 /* 2431 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2432 * space in the unwired region could have been mapped 2433 * while the map lock was dropped for draining 2434 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2435 * could be simultaneously wiring this new mapping 2436 * entry. Detect these cases and skip any entries 2437 * marked as in transition by us. 2438 */ 2439 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2440 entry->wiring_thread != curthread) { 2441 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2442 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2443 continue; 2444 } 2445 2446 if (rv == KERN_SUCCESS && (!user_unwire || 2447 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2448 if (user_unwire) 2449 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2450 if (entry->wired_count == 1) 2451 vm_map_entry_unwire(map, entry); 2452 else 2453 entry->wired_count--; 2454 } 2455 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2456 ("vm_map_unwire: in-transition flag missing %p", entry)); 2457 KASSERT(entry->wiring_thread == curthread, 2458 ("vm_map_unwire: alien wire %p", entry)); 2459 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2460 entry->wiring_thread = NULL; 2461 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2462 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2463 need_wakeup = TRUE; 2464 } 2465 vm_map_simplify_entry(map, entry); 2466 } 2467 vm_map_unlock(map); 2468 if (need_wakeup) 2469 vm_map_wakeup(map); 2470 return (rv); 2471} 2472 2473/* 2474 * vm_map_wire_entry_failure: 2475 * 2476 * Handle a wiring failure on the given entry. 2477 * 2478 * The map should be locked. 2479 */ 2480static void 2481vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 2482 vm_offset_t failed_addr) 2483{ 2484 2485 VM_MAP_ASSERT_LOCKED(map); 2486 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 2487 entry->wired_count == 1, 2488 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 2489 KASSERT(failed_addr < entry->end, 2490 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 2491 2492 /* 2493 * If any pages at the start of this entry were successfully wired, 2494 * then unwire them. 2495 */ 2496 if (failed_addr > entry->start) { 2497 pmap_unwire(map->pmap, entry->start, failed_addr); 2498 vm_object_unwire(entry->object.vm_object, entry->offset, 2499 failed_addr - entry->start, PQ_ACTIVE); 2500 } 2501 2502 /* 2503 * Assign an out-of-range value to represent the failure to wire this 2504 * entry. 2505 */ 2506 entry->wired_count = -1; 2507} 2508 2509/* 2510 * vm_map_wire: 2511 * 2512 * Implements both kernel and user wiring. 2513 */ 2514int 2515vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2516 int flags) 2517{ 2518 vm_map_entry_t entry, first_entry, tmp_entry; 2519 vm_offset_t faddr, saved_end, saved_start; 2520 unsigned int last_timestamp; 2521 int rv; 2522 boolean_t need_wakeup, result, user_wire; 2523 vm_prot_t prot; 2524 2525 if (start == end) 2526 return (KERN_SUCCESS); 2527 prot = 0; 2528 if (flags & VM_MAP_WIRE_WRITE) 2529 prot |= VM_PROT_WRITE; 2530 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2531 vm_map_lock(map); 2532 VM_MAP_RANGE_CHECK(map, start, end); 2533 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2534 if (flags & VM_MAP_WIRE_HOLESOK) 2535 first_entry = first_entry->next; 2536 else { 2537 vm_map_unlock(map); 2538 return (KERN_INVALID_ADDRESS); 2539 } 2540 } 2541 last_timestamp = map->timestamp; 2542 entry = first_entry; 2543 while (entry != &map->header && entry->start < end) { 2544 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2545 /* 2546 * We have not yet clipped the entry. 2547 */ 2548 saved_start = (start >= entry->start) ? start : 2549 entry->start; 2550 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2551 if (vm_map_unlock_and_wait(map, 0)) { 2552 /* 2553 * Allow interruption of user wiring? 2554 */ 2555 } 2556 vm_map_lock(map); 2557 if (last_timestamp + 1 != map->timestamp) { 2558 /* 2559 * Look again for the entry because the map was 2560 * modified while it was unlocked. 2561 * Specifically, the entry may have been 2562 * clipped, merged, or deleted. 2563 */ 2564 if (!vm_map_lookup_entry(map, saved_start, 2565 &tmp_entry)) { 2566 if (flags & VM_MAP_WIRE_HOLESOK) 2567 tmp_entry = tmp_entry->next; 2568 else { 2569 if (saved_start == start) { 2570 /* 2571 * first_entry has been deleted. 2572 */ 2573 vm_map_unlock(map); 2574 return (KERN_INVALID_ADDRESS); 2575 } 2576 end = saved_start; 2577 rv = KERN_INVALID_ADDRESS; 2578 goto done; 2579 } 2580 } 2581 if (entry == first_entry) 2582 first_entry = tmp_entry; 2583 else 2584 first_entry = NULL; 2585 entry = tmp_entry; 2586 } 2587 last_timestamp = map->timestamp; 2588 continue; 2589 } 2590 vm_map_clip_start(map, entry, start); 2591 vm_map_clip_end(map, entry, end); 2592 /* 2593 * Mark the entry in case the map lock is released. (See 2594 * above.) 2595 */ 2596 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2597 entry->wiring_thread == NULL, 2598 ("owned map entry %p", entry)); 2599 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2600 entry->wiring_thread = curthread; 2601 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2602 || (entry->protection & prot) != prot) { 2603 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2604 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2605 end = entry->end; 2606 rv = KERN_INVALID_ADDRESS; 2607 goto done; 2608 } 2609 goto next_entry; 2610 } 2611 if (entry->wired_count == 0) { 2612 entry->wired_count++; 2613 saved_start = entry->start; 2614 saved_end = entry->end; 2615 2616 /* 2617 * Release the map lock, relying on the in-transition 2618 * mark. Mark the map busy for fork. 2619 */ 2620 vm_map_busy(map); 2621 vm_map_unlock(map); 2622 2623 faddr = saved_start; 2624 do { 2625 /* 2626 * Simulate a fault to get the page and enter 2627 * it into the physical map. 2628 */ 2629 if ((rv = vm_fault(map, faddr, VM_PROT_NONE, 2630 VM_FAULT_WIRE)) != KERN_SUCCESS) 2631 break; 2632 } while ((faddr += PAGE_SIZE) < saved_end); 2633 vm_map_lock(map); 2634 vm_map_unbusy(map); 2635 if (last_timestamp + 1 != map->timestamp) { 2636 /* 2637 * Look again for the entry because the map was 2638 * modified while it was unlocked. The entry 2639 * may have been clipped, but NOT merged or 2640 * deleted. 2641 */ 2642 result = vm_map_lookup_entry(map, saved_start, 2643 &tmp_entry); 2644 KASSERT(result, ("vm_map_wire: lookup failed")); 2645 if (entry == first_entry) 2646 first_entry = tmp_entry; 2647 else 2648 first_entry = NULL; 2649 entry = tmp_entry; 2650 while (entry->end < saved_end) { 2651 /* 2652 * In case of failure, handle entries 2653 * that were not fully wired here; 2654 * fully wired entries are handled 2655 * later. 2656 */ 2657 if (rv != KERN_SUCCESS && 2658 faddr < entry->end) 2659 vm_map_wire_entry_failure(map, 2660 entry, faddr); 2661 entry = entry->next; 2662 } 2663 } 2664 last_timestamp = map->timestamp; 2665 if (rv != KERN_SUCCESS) { 2666 vm_map_wire_entry_failure(map, entry, faddr); 2667 end = entry->end; 2668 goto done; 2669 } 2670 } else if (!user_wire || 2671 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2672 entry->wired_count++; 2673 } 2674 /* 2675 * Check the map for holes in the specified region. 2676 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2677 */ 2678 next_entry: 2679 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && 2680 entry->end < end && (entry->next == &map->header || 2681 entry->next->start > entry->end)) { 2682 end = entry->end; 2683 rv = KERN_INVALID_ADDRESS; 2684 goto done; 2685 } 2686 entry = entry->next; 2687 } 2688 rv = KERN_SUCCESS; 2689done: 2690 need_wakeup = FALSE; 2691 if (first_entry == NULL) { 2692 result = vm_map_lookup_entry(map, start, &first_entry); 2693 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2694 first_entry = first_entry->next; 2695 else 2696 KASSERT(result, ("vm_map_wire: lookup failed")); 2697 } 2698 for (entry = first_entry; entry != &map->header && entry->start < end; 2699 entry = entry->next) { 2700 /* 2701 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2702 * space in the unwired region could have been mapped 2703 * while the map lock was dropped for faulting in the 2704 * pages or draining MAP_ENTRY_IN_TRANSITION. 2705 * Moreover, another thread could be simultaneously 2706 * wiring this new mapping entry. Detect these cases 2707 * and skip any entries marked as in transition not by us. 2708 */ 2709 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2710 entry->wiring_thread != curthread) { 2711 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2712 ("vm_map_wire: !HOLESOK and new/changed entry")); 2713 continue; 2714 } 2715 2716 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2717 goto next_entry_done; 2718 2719 if (rv == KERN_SUCCESS) { 2720 if (user_wire) 2721 entry->eflags |= MAP_ENTRY_USER_WIRED; 2722 } else if (entry->wired_count == -1) { 2723 /* 2724 * Wiring failed on this entry. Thus, unwiring is 2725 * unnecessary. 2726 */ 2727 entry->wired_count = 0; 2728 } else if (!user_wire || 2729 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2730 /* 2731 * Undo the wiring. Wiring succeeded on this entry 2732 * but failed on a later entry. 2733 */ 2734 if (entry->wired_count == 1) 2735 vm_map_entry_unwire(map, entry); 2736 else 2737 entry->wired_count--; 2738 } 2739 next_entry_done: 2740 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2741 ("vm_map_wire: in-transition flag missing %p", entry)); 2742 KASSERT(entry->wiring_thread == curthread, 2743 ("vm_map_wire: alien wire %p", entry)); 2744 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 2745 MAP_ENTRY_WIRE_SKIPPED); 2746 entry->wiring_thread = NULL; 2747 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2748 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2749 need_wakeup = TRUE; 2750 } 2751 vm_map_simplify_entry(map, entry); 2752 } 2753 vm_map_unlock(map); 2754 if (need_wakeup) 2755 vm_map_wakeup(map); 2756 return (rv); 2757} 2758 2759/* 2760 * vm_map_sync 2761 * 2762 * Push any dirty cached pages in the address range to their pager. 2763 * If syncio is TRUE, dirty pages are written synchronously. 2764 * If invalidate is TRUE, any cached pages are freed as well. 2765 * 2766 * If the size of the region from start to end is zero, we are 2767 * supposed to flush all modified pages within the region containing 2768 * start. Unfortunately, a region can be split or coalesced with 2769 * neighboring regions, making it difficult to determine what the 2770 * original region was. Therefore, we approximate this requirement by 2771 * flushing the current region containing start. 2772 * 2773 * Returns an error if any part of the specified range is not mapped. 2774 */ 2775int 2776vm_map_sync( 2777 vm_map_t map, 2778 vm_offset_t start, 2779 vm_offset_t end, 2780 boolean_t syncio, 2781 boolean_t invalidate) 2782{ 2783 vm_map_entry_t current; 2784 vm_map_entry_t entry; 2785 vm_size_t size; 2786 vm_object_t object; 2787 vm_ooffset_t offset; 2788 unsigned int last_timestamp; 2789 boolean_t failed; 2790 2791 vm_map_lock_read(map); 2792 VM_MAP_RANGE_CHECK(map, start, end); 2793 if (!vm_map_lookup_entry(map, start, &entry)) { 2794 vm_map_unlock_read(map); 2795 return (KERN_INVALID_ADDRESS); 2796 } else if (start == end) { 2797 start = entry->start; 2798 end = entry->end; 2799 } 2800 /* 2801 * Make a first pass to check for user-wired memory and holes. 2802 */ 2803 for (current = entry; current != &map->header && current->start < end; 2804 current = current->next) { 2805 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2806 vm_map_unlock_read(map); 2807 return (KERN_INVALID_ARGUMENT); 2808 } 2809 if (end > current->end && 2810 (current->next == &map->header || 2811 current->end != current->next->start)) { 2812 vm_map_unlock_read(map); 2813 return (KERN_INVALID_ADDRESS); 2814 } 2815 } 2816 2817 if (invalidate) 2818 pmap_remove(map->pmap, start, end); 2819 failed = FALSE; 2820 2821 /* 2822 * Make a second pass, cleaning/uncaching pages from the indicated 2823 * objects as we go. 2824 */ 2825 for (current = entry; current != &map->header && current->start < end;) { 2826 offset = current->offset + (start - current->start); 2827 size = (end <= current->end ? end : current->end) - start; 2828 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2829 vm_map_t smap; 2830 vm_map_entry_t tentry; 2831 vm_size_t tsize; 2832 2833 smap = current->object.sub_map; 2834 vm_map_lock_read(smap); 2835 (void) vm_map_lookup_entry(smap, offset, &tentry); 2836 tsize = tentry->end - offset; 2837 if (tsize < size) 2838 size = tsize; 2839 object = tentry->object.vm_object; 2840 offset = tentry->offset + (offset - tentry->start); 2841 vm_map_unlock_read(smap); 2842 } else { 2843 object = current->object.vm_object; 2844 } 2845 vm_object_reference(object); 2846 last_timestamp = map->timestamp; 2847 vm_map_unlock_read(map); 2848 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2849 failed = TRUE; 2850 start += size; 2851 vm_object_deallocate(object); 2852 vm_map_lock_read(map); 2853 if (last_timestamp == map->timestamp || 2854 !vm_map_lookup_entry(map, start, ¤t)) 2855 current = current->next; 2856 } 2857 2858 vm_map_unlock_read(map); 2859 return (failed ? KERN_FAILURE : KERN_SUCCESS); 2860} 2861 2862/* 2863 * vm_map_entry_unwire: [ internal use only ] 2864 * 2865 * Make the region specified by this entry pageable. 2866 * 2867 * The map in question should be locked. 2868 * [This is the reason for this routine's existence.] 2869 */ 2870static void 2871vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2872{ 2873 2874 VM_MAP_ASSERT_LOCKED(map); 2875 KASSERT(entry->wired_count > 0, 2876 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 2877 pmap_unwire(map->pmap, entry->start, entry->end); 2878 vm_object_unwire(entry->object.vm_object, entry->offset, entry->end - 2879 entry->start, PQ_ACTIVE); 2880 entry->wired_count = 0; 2881} 2882 2883static void 2884vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2885{ 2886 2887 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 2888 vm_object_deallocate(entry->object.vm_object); 2889 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 2890} 2891 2892/* 2893 * vm_map_entry_delete: [ internal use only ] 2894 * 2895 * Deallocate the given entry from the target map. 2896 */ 2897static void 2898vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2899{ 2900 vm_object_t object; 2901 vm_pindex_t offidxstart, offidxend, count, size1; 2902 vm_ooffset_t size; 2903 2904 vm_map_entry_unlink(map, entry); 2905 object = entry->object.vm_object; 2906 2907 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 2908 MPASS(entry->cred == NULL); 2909 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 2910 MPASS(object == NULL); 2911 vm_map_entry_deallocate(entry, map->system_map); 2912 return; 2913 } 2914 2915 size = entry->end - entry->start; 2916 map->size -= size; 2917 2918 if (entry->cred != NULL) { 2919 swap_release_by_cred(size, entry->cred); 2920 crfree(entry->cred); 2921 } 2922 2923 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2924 (object != NULL)) { 2925 KASSERT(entry->cred == NULL || object->cred == NULL || 2926 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 2927 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 2928 count = OFF_TO_IDX(size); 2929 offidxstart = OFF_TO_IDX(entry->offset); 2930 offidxend = offidxstart + count; 2931 VM_OBJECT_WLOCK(object); 2932 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | 2933 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2934 object == kernel_object || object == kmem_object)) { 2935 vm_object_collapse(object); 2936 2937 /* 2938 * The option OBJPR_NOTMAPPED can be passed here 2939 * because vm_map_delete() already performed 2940 * pmap_remove() on the only mapping to this range 2941 * of pages. 2942 */ 2943 vm_object_page_remove(object, offidxstart, offidxend, 2944 OBJPR_NOTMAPPED); 2945 if (object->type == OBJT_SWAP) 2946 swap_pager_freespace(object, offidxstart, 2947 count); 2948 if (offidxend >= object->size && 2949 offidxstart < object->size) { 2950 size1 = object->size; 2951 object->size = offidxstart; 2952 if (object->cred != NULL) { 2953 size1 -= object->size; 2954 KASSERT(object->charge >= ptoa(size1), 2955 ("object %p charge < 0", object)); 2956 swap_release_by_cred(ptoa(size1), 2957 object->cred); 2958 object->charge -= ptoa(size1); 2959 } 2960 } 2961 } 2962 VM_OBJECT_WUNLOCK(object); 2963 } else 2964 entry->object.vm_object = NULL; 2965 if (map->system_map) 2966 vm_map_entry_deallocate(entry, TRUE); 2967 else { 2968 entry->next = curthread->td_map_def_user; 2969 curthread->td_map_def_user = entry; 2970 } 2971} 2972 2973/* 2974 * vm_map_delete: [ internal use only ] 2975 * 2976 * Deallocates the given address range from the target 2977 * map. 2978 */ 2979int 2980vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2981{ 2982 vm_map_entry_t entry; 2983 vm_map_entry_t first_entry; 2984 2985 VM_MAP_ASSERT_LOCKED(map); 2986 if (start == end) 2987 return (KERN_SUCCESS); 2988 2989 /* 2990 * Find the start of the region, and clip it 2991 */ 2992 if (!vm_map_lookup_entry(map, start, &first_entry)) 2993 entry = first_entry->next; 2994 else { 2995 entry = first_entry; 2996 vm_map_clip_start(map, entry, start); 2997 } 2998 2999 /* 3000 * Step through all entries in this region 3001 */ 3002 while ((entry != &map->header) && (entry->start < end)) { 3003 vm_map_entry_t next; 3004 3005 /* 3006 * Wait for wiring or unwiring of an entry to complete. 3007 * Also wait for any system wirings to disappear on 3008 * user maps. 3009 */ 3010 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3011 (vm_map_pmap(map) != kernel_pmap && 3012 vm_map_entry_system_wired_count(entry) != 0)) { 3013 unsigned int last_timestamp; 3014 vm_offset_t saved_start; 3015 vm_map_entry_t tmp_entry; 3016 3017 saved_start = entry->start; 3018 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3019 last_timestamp = map->timestamp; 3020 (void) vm_map_unlock_and_wait(map, 0); 3021 vm_map_lock(map); 3022 if (last_timestamp + 1 != map->timestamp) { 3023 /* 3024 * Look again for the entry because the map was 3025 * modified while it was unlocked. 3026 * Specifically, the entry may have been 3027 * clipped, merged, or deleted. 3028 */ 3029 if (!vm_map_lookup_entry(map, saved_start, 3030 &tmp_entry)) 3031 entry = tmp_entry->next; 3032 else { 3033 entry = tmp_entry; 3034 vm_map_clip_start(map, entry, 3035 saved_start); 3036 } 3037 } 3038 continue; 3039 } 3040 vm_map_clip_end(map, entry, end); 3041 3042 next = entry->next; 3043 3044 /* 3045 * Unwire before removing addresses from the pmap; otherwise, 3046 * unwiring will put the entries back in the pmap. 3047 */ 3048 if (entry->wired_count != 0) { 3049 vm_map_entry_unwire(map, entry); 3050 } 3051 3052 pmap_remove(map->pmap, entry->start, entry->end); 3053 3054 /* 3055 * Delete the entry only after removing all pmap 3056 * entries pointing to its pages. (Otherwise, its 3057 * page frames may be reallocated, and any modify bits 3058 * will be set in the wrong object!) 3059 */ 3060 vm_map_entry_delete(map, entry); 3061 entry = next; 3062 } 3063 return (KERN_SUCCESS); 3064} 3065 3066/* 3067 * vm_map_remove: 3068 * 3069 * Remove the given address range from the target map. 3070 * This is the exported form of vm_map_delete. 3071 */ 3072int 3073vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3074{ 3075 int result; 3076 3077 vm_map_lock(map); 3078 VM_MAP_RANGE_CHECK(map, start, end); 3079 result = vm_map_delete(map, start, end); 3080 vm_map_unlock(map); 3081 return (result); 3082} 3083 3084/* 3085 * vm_map_check_protection: 3086 * 3087 * Assert that the target map allows the specified privilege on the 3088 * entire address region given. The entire region must be allocated. 3089 * 3090 * WARNING! This code does not and should not check whether the 3091 * contents of the region is accessible. For example a smaller file 3092 * might be mapped into a larger address space. 3093 * 3094 * NOTE! This code is also called by munmap(). 3095 * 3096 * The map must be locked. A read lock is sufficient. 3097 */ 3098boolean_t 3099vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3100 vm_prot_t protection) 3101{ 3102 vm_map_entry_t entry; 3103 vm_map_entry_t tmp_entry; 3104 3105 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3106 return (FALSE); 3107 entry = tmp_entry; 3108 3109 while (start < end) { 3110 if (entry == &map->header) 3111 return (FALSE); 3112 /* 3113 * No holes allowed! 3114 */ 3115 if (start < entry->start) 3116 return (FALSE); 3117 /* 3118 * Check protection associated with entry. 3119 */ 3120 if ((entry->protection & protection) != protection) 3121 return (FALSE); 3122 /* go to next entry */ 3123 start = entry->end; 3124 entry = entry->next; 3125 } 3126 return (TRUE); 3127} 3128 3129/* 3130 * vm_map_copy_entry: 3131 * 3132 * Copies the contents of the source entry to the destination 3133 * entry. The entries *must* be aligned properly. 3134 */ 3135static void 3136vm_map_copy_entry( 3137 vm_map_t src_map, 3138 vm_map_t dst_map, 3139 vm_map_entry_t src_entry, 3140 vm_map_entry_t dst_entry, 3141 vm_ooffset_t *fork_charge) 3142{ 3143 vm_object_t src_object; 3144 vm_map_entry_t fake_entry; 3145 vm_offset_t size; 3146 struct ucred *cred; 3147 int charged; 3148 3149 VM_MAP_ASSERT_LOCKED(dst_map); 3150 3151 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3152 return; 3153 3154 if (src_entry->wired_count == 0 || 3155 (src_entry->protection & VM_PROT_WRITE) == 0) { 3156 /* 3157 * If the source entry is marked needs_copy, it is already 3158 * write-protected. 3159 */ 3160 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3161 (src_entry->protection & VM_PROT_WRITE) != 0) { 3162 pmap_protect(src_map->pmap, 3163 src_entry->start, 3164 src_entry->end, 3165 src_entry->protection & ~VM_PROT_WRITE); 3166 } 3167 3168 /* 3169 * Make a copy of the object. 3170 */ 3171 size = src_entry->end - src_entry->start; 3172 if ((src_object = src_entry->object.vm_object) != NULL) { 3173 VM_OBJECT_WLOCK(src_object); 3174 charged = ENTRY_CHARGED(src_entry); 3175 if (src_object->handle == NULL && 3176 (src_object->type == OBJT_DEFAULT || 3177 src_object->type == OBJT_SWAP)) { 3178 vm_object_collapse(src_object); 3179 if ((src_object->flags & (OBJ_NOSPLIT | 3180 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3181 vm_object_split(src_entry); 3182 src_object = 3183 src_entry->object.vm_object; 3184 } 3185 } 3186 vm_object_reference_locked(src_object); 3187 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3188 if (src_entry->cred != NULL && 3189 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3190 KASSERT(src_object->cred == NULL, 3191 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3192 src_object)); 3193 src_object->cred = src_entry->cred; 3194 src_object->charge = size; 3195 } 3196 VM_OBJECT_WUNLOCK(src_object); 3197 dst_entry->object.vm_object = src_object; 3198 if (charged) { 3199 cred = curthread->td_ucred; 3200 crhold(cred); 3201 dst_entry->cred = cred; 3202 *fork_charge += size; 3203 if (!(src_entry->eflags & 3204 MAP_ENTRY_NEEDS_COPY)) { 3205 crhold(cred); 3206 src_entry->cred = cred; 3207 *fork_charge += size; 3208 } 3209 } 3210 src_entry->eflags |= MAP_ENTRY_COW | 3211 MAP_ENTRY_NEEDS_COPY; 3212 dst_entry->eflags |= MAP_ENTRY_COW | 3213 MAP_ENTRY_NEEDS_COPY; 3214 dst_entry->offset = src_entry->offset; 3215 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3216 /* 3217 * MAP_ENTRY_VN_WRITECNT cannot 3218 * indicate write reference from 3219 * src_entry, since the entry is 3220 * marked as needs copy. Allocate a 3221 * fake entry that is used to 3222 * decrement object->un_pager.vnp.writecount 3223 * at the appropriate time. Attach 3224 * fake_entry to the deferred list. 3225 */ 3226 fake_entry = vm_map_entry_create(dst_map); 3227 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3228 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3229 vm_object_reference(src_object); 3230 fake_entry->object.vm_object = src_object; 3231 fake_entry->start = src_entry->start; 3232 fake_entry->end = src_entry->end; 3233 fake_entry->next = curthread->td_map_def_user; 3234 curthread->td_map_def_user = fake_entry; 3235 } 3236 3237 pmap_copy(dst_map->pmap, src_map->pmap, 3238 dst_entry->start, dst_entry->end - dst_entry->start, 3239 src_entry->start); 3240 } else { 3241 dst_entry->object.vm_object = NULL; 3242 dst_entry->offset = 0; 3243 if (src_entry->cred != NULL) { 3244 dst_entry->cred = curthread->td_ucred; 3245 crhold(dst_entry->cred); 3246 *fork_charge += size; 3247 } 3248 } 3249 } else { 3250 /* 3251 * We don't want to make writeable wired pages copy-on-write. 3252 * Immediately copy these pages into the new map by simulating 3253 * page faults. The new pages are pageable. 3254 */ 3255 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3256 fork_charge); 3257 } 3258} 3259 3260/* 3261 * vmspace_map_entry_forked: 3262 * Update the newly-forked vmspace each time a map entry is inherited 3263 * or copied. The values for vm_dsize and vm_tsize are approximate 3264 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3265 */ 3266static void 3267vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3268 vm_map_entry_t entry) 3269{ 3270 vm_size_t entrysize; 3271 vm_offset_t newend; 3272 3273 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3274 return; 3275 entrysize = entry->end - entry->start; 3276 vm2->vm_map.size += entrysize; 3277 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3278 vm2->vm_ssize += btoc(entrysize); 3279 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3280 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3281 newend = MIN(entry->end, 3282 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3283 vm2->vm_dsize += btoc(newend - entry->start); 3284 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3285 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3286 newend = MIN(entry->end, 3287 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3288 vm2->vm_tsize += btoc(newend - entry->start); 3289 } 3290} 3291 3292/* 3293 * vmspace_fork: 3294 * Create a new process vmspace structure and vm_map 3295 * based on those of an existing process. The new map 3296 * is based on the old map, according to the inheritance 3297 * values on the regions in that map. 3298 * 3299 * XXX It might be worth coalescing the entries added to the new vmspace. 3300 * 3301 * The source map must not be locked. 3302 */ 3303struct vmspace * 3304vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3305{ 3306 struct vmspace *vm2; 3307 vm_map_t new_map, old_map; 3308 vm_map_entry_t new_entry, old_entry; 3309 vm_object_t object; 3310 int locked; 3311 vm_inherit_t inh; 3312 3313 old_map = &vm1->vm_map; 3314 /* Copy immutable fields of vm1 to vm2. */ 3315 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL); 3316 if (vm2 == NULL) 3317 return (NULL); 3318 vm2->vm_taddr = vm1->vm_taddr; 3319 vm2->vm_daddr = vm1->vm_daddr; 3320 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3321 vm_map_lock(old_map); 3322 if (old_map->busy) 3323 vm_map_wait_busy(old_map); 3324 new_map = &vm2->vm_map; 3325 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3326 KASSERT(locked, ("vmspace_fork: lock failed")); 3327 3328 old_entry = old_map->header.next; 3329 3330 while (old_entry != &old_map->header) { 3331 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3332 panic("vm_map_fork: encountered a submap"); 3333 3334 inh = old_entry->inheritance; 3335 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3336 inh != VM_INHERIT_NONE) 3337 inh = VM_INHERIT_COPY; 3338 3339 switch (inh) { 3340 case VM_INHERIT_NONE: 3341 break; 3342 3343 case VM_INHERIT_SHARE: 3344 /* 3345 * Clone the entry, creating the shared object if necessary. 3346 */ 3347 object = old_entry->object.vm_object; 3348 if (object == NULL) { 3349 object = vm_object_allocate(OBJT_DEFAULT, 3350 atop(old_entry->end - old_entry->start)); 3351 old_entry->object.vm_object = object; 3352 old_entry->offset = 0; 3353 if (old_entry->cred != NULL) { 3354 object->cred = old_entry->cred; 3355 object->charge = old_entry->end - 3356 old_entry->start; 3357 old_entry->cred = NULL; 3358 } 3359 } 3360 3361 /* 3362 * Add the reference before calling vm_object_shadow 3363 * to insure that a shadow object is created. 3364 */ 3365 vm_object_reference(object); 3366 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3367 vm_object_shadow(&old_entry->object.vm_object, 3368 &old_entry->offset, 3369 old_entry->end - old_entry->start); 3370 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3371 /* Transfer the second reference too. */ 3372 vm_object_reference( 3373 old_entry->object.vm_object); 3374 3375 /* 3376 * As in vm_map_simplify_entry(), the 3377 * vnode lock will not be acquired in 3378 * this call to vm_object_deallocate(). 3379 */ 3380 vm_object_deallocate(object); 3381 object = old_entry->object.vm_object; 3382 } 3383 VM_OBJECT_WLOCK(object); 3384 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3385 if (old_entry->cred != NULL) { 3386 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3387 object->cred = old_entry->cred; 3388 object->charge = old_entry->end - old_entry->start; 3389 old_entry->cred = NULL; 3390 } 3391 3392 /* 3393 * Assert the correct state of the vnode 3394 * v_writecount while the object is locked, to 3395 * not relock it later for the assertion 3396 * correctness. 3397 */ 3398 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3399 object->type == OBJT_VNODE) { 3400 KASSERT(((struct vnode *)object->handle)-> 3401 v_writecount > 0, 3402 ("vmspace_fork: v_writecount %p", object)); 3403 KASSERT(object->un_pager.vnp.writemappings > 0, 3404 ("vmspace_fork: vnp.writecount %p", 3405 object)); 3406 } 3407 VM_OBJECT_WUNLOCK(object); 3408 3409 /* 3410 * Clone the entry, referencing the shared object. 3411 */ 3412 new_entry = vm_map_entry_create(new_map); 3413 *new_entry = *old_entry; 3414 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3415 MAP_ENTRY_IN_TRANSITION); 3416 new_entry->wiring_thread = NULL; 3417 new_entry->wired_count = 0; 3418 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3419 vnode_pager_update_writecount(object, 3420 new_entry->start, new_entry->end); 3421 } 3422 3423 /* 3424 * Insert the entry into the new map -- we know we're 3425 * inserting at the end of the new map. 3426 */ 3427 vm_map_entry_link(new_map, new_map->header.prev, 3428 new_entry); 3429 vmspace_map_entry_forked(vm1, vm2, new_entry); 3430 3431 /* 3432 * Update the physical map 3433 */ 3434 pmap_copy(new_map->pmap, old_map->pmap, 3435 new_entry->start, 3436 (old_entry->end - old_entry->start), 3437 old_entry->start); 3438 break; 3439 3440 case VM_INHERIT_COPY: 3441 /* 3442 * Clone the entry and link into the map. 3443 */ 3444 new_entry = vm_map_entry_create(new_map); 3445 *new_entry = *old_entry; 3446 /* 3447 * Copied entry is COW over the old object. 3448 */ 3449 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3450 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3451 new_entry->wiring_thread = NULL; 3452 new_entry->wired_count = 0; 3453 new_entry->object.vm_object = NULL; 3454 new_entry->cred = NULL; 3455 vm_map_entry_link(new_map, new_map->header.prev, 3456 new_entry); 3457 vmspace_map_entry_forked(vm1, vm2, new_entry); 3458 vm_map_copy_entry(old_map, new_map, old_entry, 3459 new_entry, fork_charge); 3460 break; 3461 3462 case VM_INHERIT_ZERO: 3463 /* 3464 * Create a new anonymous mapping entry modelled from 3465 * the old one. 3466 */ 3467 new_entry = vm_map_entry_create(new_map); 3468 memset(new_entry, 0, sizeof(*new_entry)); 3469 3470 new_entry->start = old_entry->start; 3471 new_entry->end = old_entry->end; 3472 new_entry->eflags = old_entry->eflags & 3473 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 3474 MAP_ENTRY_VN_WRITECNT); 3475 new_entry->protection = old_entry->protection; 3476 new_entry->max_protection = old_entry->max_protection; 3477 new_entry->inheritance = VM_INHERIT_ZERO; 3478 3479 vm_map_entry_link(new_map, new_map->header.prev, 3480 new_entry); 3481 vmspace_map_entry_forked(vm1, vm2, new_entry); 3482 3483 new_entry->cred = curthread->td_ucred; 3484 crhold(new_entry->cred); 3485 *fork_charge += (new_entry->end - new_entry->start); 3486 3487 break; 3488 } 3489 old_entry = old_entry->next; 3490 } 3491 /* 3492 * Use inlined vm_map_unlock() to postpone handling the deferred 3493 * map entries, which cannot be done until both old_map and 3494 * new_map locks are released. 3495 */ 3496 sx_xunlock(&old_map->lock); 3497 sx_xunlock(&new_map->lock); 3498 vm_map_process_deferred(); 3499 3500 return (vm2); 3501} 3502 3503/* 3504 * Create a process's stack for exec_new_vmspace(). This function is never 3505 * asked to wire the newly created stack. 3506 */ 3507int 3508vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3509 vm_prot_t prot, vm_prot_t max, int cow) 3510{ 3511 vm_size_t growsize, init_ssize; 3512 rlim_t vmemlim; 3513 int rv; 3514 3515 MPASS((map->flags & MAP_WIREFUTURE) == 0); 3516 growsize = sgrowsiz; 3517 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3518 vm_map_lock(map); 3519 PROC_LOCK(curproc); 3520 vmemlim = lim_cur(curproc, RLIMIT_VMEM); 3521 PROC_UNLOCK(curproc); 3522 /* If we would blow our VMEM resource limit, no go */ 3523 if (map->size + init_ssize > vmemlim) { 3524 rv = KERN_NO_SPACE; 3525 goto out; 3526 } 3527 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 3528 max, cow); 3529out: 3530 vm_map_unlock(map); 3531 return (rv); 3532} 3533 3534static int stack_guard_page = 1; 3535SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 3536 &stack_guard_page, 0, 3537 "Specifies the number of guard pages for a stack that grows"); 3538 3539static int 3540vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3541 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 3542{ 3543 vm_map_entry_t new_entry, prev_entry; 3544 vm_offset_t bot, gap_bot, gap_top, top; 3545 vm_size_t init_ssize, sgp; 3546 int orient, rv; 3547 3548 /* 3549 * The stack orientation is piggybacked with the cow argument. 3550 * Extract it into orient and mask the cow argument so that we 3551 * don't pass it around further. 3552 */ 3553 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 3554 KASSERT(orient != 0, ("No stack grow direction")); 3555 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 3556 ("bi-dir stack")); 3557 3558 if (addrbos < vm_map_min(map) || 3559 addrbos + max_ssize > vm_map_max(map) || 3560 addrbos + max_ssize <= addrbos) 3561 return (KERN_INVALID_ADDRESS); 3562 sgp = (vm_size_t)stack_guard_page * PAGE_SIZE; 3563 if (sgp >= max_ssize) 3564 return (KERN_INVALID_ARGUMENT); 3565 3566 init_ssize = growsize; 3567 if (max_ssize < init_ssize + sgp) 3568 init_ssize = max_ssize - sgp; 3569 3570 /* If addr is already mapped, no go */ 3571 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 3572 return (KERN_NO_SPACE); 3573 3574 /* 3575 * If we can't accomodate max_ssize in the current mapping, no go. 3576 */ 3577 if ((prev_entry->next != &map->header) && 3578 (prev_entry->next->start < addrbos + max_ssize)) 3579 return (KERN_NO_SPACE); 3580 3581 /* 3582 * We initially map a stack of only init_ssize. We will grow as 3583 * needed later. Depending on the orientation of the stack (i.e. 3584 * the grow direction) we either map at the top of the range, the 3585 * bottom of the range or in the middle. 3586 * 3587 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3588 * and cow to be 0. Possibly we should eliminate these as input 3589 * parameters, and just pass these values here in the insert call. 3590 */ 3591 if (orient == MAP_STACK_GROWS_DOWN) { 3592 bot = addrbos + max_ssize - init_ssize; 3593 top = bot + init_ssize; 3594 gap_bot = addrbos; 3595 gap_top = bot; 3596 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 3597 bot = addrbos; 3598 top = bot + init_ssize; 3599 gap_bot = top; 3600 gap_top = addrbos + max_ssize; 3601 } 3602 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3603 if (rv != KERN_SUCCESS) 3604 return (rv); 3605 new_entry = prev_entry->next; 3606 KASSERT(new_entry->end == top || new_entry->start == bot, 3607 ("Bad entry start/end for new stack entry")); 3608 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 3609 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 3610 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3611 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 3612 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 3613 ("new entry lacks MAP_ENTRY_GROWS_UP")); 3614 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 3615 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 3616 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 3617 if (rv != KERN_SUCCESS) 3618 (void)vm_map_delete(map, bot, top); 3619 return (rv); 3620} 3621 3622/* 3623 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 3624 * successfully grow the stack. 3625 */ 3626static int 3627vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 3628{ 3629 vm_map_entry_t stack_entry; 3630 struct proc *p; 3631 struct vmspace *vm; 3632 struct ucred *cred; 3633 vm_offset_t gap_end, gap_start, grow_start; 3634 size_t grow_amount, guard, max_grow; 3635 rlim_t lmemlim, stacklim, vmemlim; 3636 int rv, rv1; 3637 bool gap_deleted, grow_down, is_procstack; 3638#ifdef notyet 3639 uint64_t limit; 3640#endif 3641#ifdef RACCT 3642 int error; 3643#endif 3644 3645 p = curproc; 3646 vm = p->p_vmspace; 3647 3648 /* 3649 * Disallow stack growth when the access is performed by a 3650 * debugger or AIO daemon. The reason is that the wrong 3651 * resource limits are applied. 3652 */ 3653 if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL) 3654 return (KERN_FAILURE); 3655 3656 MPASS(!map->system_map); 3657 3658 guard = stack_guard_page * PAGE_SIZE; 3659 PROC_LOCK(p); 3660 lmemlim = lim_cur(p, RLIMIT_MEMLOCK); 3661 stacklim = lim_cur(p, RLIMIT_STACK); 3662 vmemlim = lim_cur(p, RLIMIT_VMEM); 3663 PROC_UNLOCK(p); 3664retry: 3665 /* If addr is not in a hole for a stack grow area, no need to grow. */ 3666 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 3667 return (KERN_FAILURE); 3668 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 3669 return (KERN_SUCCESS); 3670 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 3671 stack_entry = gap_entry->next; 3672 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 3673 stack_entry->start != gap_entry->end) 3674 return (KERN_FAILURE); 3675 grow_amount = round_page(stack_entry->start - addr); 3676 grow_down = true; 3677 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 3678 stack_entry = gap_entry->prev; 3679 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 3680 stack_entry->end != gap_entry->start) 3681 return (KERN_FAILURE); 3682 grow_amount = round_page(addr + 1 - stack_entry->end); 3683 grow_down = false; 3684 } else { 3685 return (KERN_FAILURE); 3686 } 3687 max_grow = gap_entry->end - gap_entry->start; 3688 if (guard > max_grow) 3689 return (KERN_NO_SPACE); 3690 max_grow -= guard; 3691 if (grow_amount > max_grow) 3692 return (KERN_NO_SPACE); 3693 3694 /* 3695 * If this is the main process stack, see if we're over the stack 3696 * limit. 3697 */ 3698 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 3699 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 3700 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 3701 return (KERN_NO_SPACE); 3702 3703#ifdef RACCT 3704 if (racct_enable) { 3705 PROC_LOCK(p); 3706 if (is_procstack && racct_set(p, RACCT_STACK, 3707 ctob(vm->vm_ssize) + grow_amount)) { 3708 PROC_UNLOCK(p); 3709 return (KERN_NO_SPACE); 3710 } 3711 PROC_UNLOCK(p); 3712 } 3713#endif 3714 3715 grow_amount = roundup(grow_amount, sgrowsiz); 3716 if (grow_amount > max_grow) 3717 grow_amount = max_grow; 3718 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3719 grow_amount = trunc_page((vm_size_t)stacklim) - 3720 ctob(vm->vm_ssize); 3721 } 3722 3723#ifdef notyet 3724 PROC_LOCK(p); 3725 limit = racct_get_available(p, RACCT_STACK); 3726 PROC_UNLOCK(p); 3727 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 3728 grow_amount = limit - ctob(vm->vm_ssize); 3729#endif 3730 3731 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 3732 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 3733 rv = KERN_NO_SPACE; 3734 goto out; 3735 } 3736#ifdef RACCT 3737 if (racct_enable) { 3738 PROC_LOCK(p); 3739 if (racct_set(p, RACCT_MEMLOCK, 3740 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 3741 PROC_UNLOCK(p); 3742 rv = KERN_NO_SPACE; 3743 goto out; 3744 } 3745 PROC_UNLOCK(p); 3746 } 3747#endif 3748 } 3749 3750 /* If we would blow our VMEM resource limit, no go */ 3751 if (map->size + grow_amount > vmemlim) { 3752 rv = KERN_NO_SPACE; 3753 goto out; 3754 } 3755#ifdef RACCT 3756 if (racct_enable) { 3757 PROC_LOCK(p); 3758 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 3759 PROC_UNLOCK(p); 3760 rv = KERN_NO_SPACE; 3761 goto out; 3762 } 3763 PROC_UNLOCK(p); 3764 } 3765#endif 3766 3767 if (vm_map_lock_upgrade(map)) { 3768 gap_entry = NULL; 3769 vm_map_lock_read(map); 3770 goto retry; 3771 } 3772 3773 if (grow_down) { 3774 grow_start = gap_entry->end - grow_amount; 3775 if (gap_entry->start + grow_amount == gap_entry->end) { 3776 gap_start = gap_entry->start; 3777 gap_end = gap_entry->end; 3778 vm_map_entry_delete(map, gap_entry); 3779 gap_deleted = true; 3780 } else { 3781 MPASS(gap_entry->start < gap_entry->end - grow_amount); 3782 gap_entry->end -= grow_amount; 3783 vm_map_entry_resize_free(map, gap_entry); 3784 gap_deleted = false; 3785 } 3786 rv = vm_map_insert(map, NULL, 0, grow_start, 3787 grow_start + grow_amount, 3788 stack_entry->protection, stack_entry->max_protection, 3789 MAP_STACK_GROWS_DOWN); 3790 if (rv != KERN_SUCCESS) { 3791 if (gap_deleted) { 3792 rv1 = vm_map_insert(map, NULL, 0, gap_start, 3793 gap_end, VM_PROT_NONE, VM_PROT_NONE, 3794 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 3795 MPASS(rv1 == KERN_SUCCESS); 3796 } else { 3797 gap_entry->end += grow_amount; 3798 vm_map_entry_resize_free(map, gap_entry); 3799 } 3800 } 3801 } else { 3802 grow_start = stack_entry->end; 3803 cred = stack_entry->cred; 3804 if (cred == NULL && stack_entry->object.vm_object != NULL) 3805 cred = stack_entry->object.vm_object->cred; 3806 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 3807 rv = KERN_NO_SPACE; 3808 /* Grow the underlying object if applicable. */ 3809 else if (stack_entry->object.vm_object == NULL || 3810 vm_object_coalesce(stack_entry->object.vm_object, 3811 stack_entry->offset, 3812 (vm_size_t)(stack_entry->end - stack_entry->start), 3813 (vm_size_t)grow_amount, cred != NULL)) { 3814 if (gap_entry->start + grow_amount == gap_entry->end) 3815 vm_map_entry_delete(map, gap_entry); 3816 else 3817 gap_entry->start += grow_amount; 3818 stack_entry->end += grow_amount; 3819 map->size += grow_amount; 3820 vm_map_entry_resize_free(map, stack_entry); 3821 rv = KERN_SUCCESS; 3822 } else 3823 rv = KERN_FAILURE; 3824 } 3825 if (rv == KERN_SUCCESS && is_procstack) 3826 vm->vm_ssize += btoc(grow_amount); 3827 3828 /* 3829 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3830 */ 3831 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 3832 vm_map_unlock(map); 3833 vm_map_wire(map, grow_start, grow_start + grow_amount, 3834 (p->p_flag & P_SYSTEM) 3835 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 3836 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 3837 vm_map_lock_read(map); 3838 } else 3839 vm_map_lock_downgrade(map); 3840 3841out: 3842#ifdef RACCT 3843 if (racct_enable && rv != KERN_SUCCESS) { 3844 PROC_LOCK(p); 3845 error = racct_set(p, RACCT_VMEM, map->size); 3846 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 3847 if (!old_mlock) { 3848 error = racct_set(p, RACCT_MEMLOCK, 3849 ptoa(pmap_wired_count(map->pmap))); 3850 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 3851 } 3852 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 3853 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 3854 PROC_UNLOCK(p); 3855 } 3856#endif 3857 3858 return (rv); 3859} 3860 3861/* 3862 * Unshare the specified VM space for exec. If other processes are 3863 * mapped to it, then create a new one. The new vmspace is null. 3864 */ 3865int 3866vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3867{ 3868 struct vmspace *oldvmspace = p->p_vmspace; 3869 struct vmspace *newvmspace; 3870 3871 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 3872 ("vmspace_exec recursed")); 3873 newvmspace = vmspace_alloc(minuser, maxuser, NULL); 3874 if (newvmspace == NULL) 3875 return (ENOMEM); 3876 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3877 /* 3878 * This code is written like this for prototype purposes. The 3879 * goal is to avoid running down the vmspace here, but let the 3880 * other process's that are still using the vmspace to finally 3881 * run it down. Even though there is little or no chance of blocking 3882 * here, it is a good idea to keep this form for future mods. 3883 */ 3884 PROC_VMSPACE_LOCK(p); 3885 p->p_vmspace = newvmspace; 3886 PROC_VMSPACE_UNLOCK(p); 3887 if (p == curthread->td_proc) 3888 pmap_activate(curthread); 3889 curthread->td_pflags |= TDP_EXECVMSPC; 3890 return (0); 3891} 3892 3893/* 3894 * Unshare the specified VM space for forcing COW. This 3895 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3896 */ 3897int 3898vmspace_unshare(struct proc *p) 3899{ 3900 struct vmspace *oldvmspace = p->p_vmspace; 3901 struct vmspace *newvmspace; 3902 vm_ooffset_t fork_charge; 3903 3904 if (oldvmspace->vm_refcnt == 1) 3905 return (0); 3906 fork_charge = 0; 3907 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 3908 if (newvmspace == NULL) 3909 return (ENOMEM); 3910 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 3911 vmspace_free(newvmspace); 3912 return (ENOMEM); 3913 } 3914 PROC_VMSPACE_LOCK(p); 3915 p->p_vmspace = newvmspace; 3916 PROC_VMSPACE_UNLOCK(p); 3917 if (p == curthread->td_proc) 3918 pmap_activate(curthread); 3919 vmspace_free(oldvmspace); 3920 return (0); 3921} 3922 3923/* 3924 * vm_map_lookup: 3925 * 3926 * Finds the VM object, offset, and 3927 * protection for a given virtual address in the 3928 * specified map, assuming a page fault of the 3929 * type specified. 3930 * 3931 * Leaves the map in question locked for read; return 3932 * values are guaranteed until a vm_map_lookup_done 3933 * call is performed. Note that the map argument 3934 * is in/out; the returned map must be used in 3935 * the call to vm_map_lookup_done. 3936 * 3937 * A handle (out_entry) is returned for use in 3938 * vm_map_lookup_done, to make that fast. 3939 * 3940 * If a lookup is requested with "write protection" 3941 * specified, the map may be changed to perform virtual 3942 * copying operations, although the data referenced will 3943 * remain the same. 3944 */ 3945int 3946vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3947 vm_offset_t vaddr, 3948 vm_prot_t fault_typea, 3949 vm_map_entry_t *out_entry, /* OUT */ 3950 vm_object_t *object, /* OUT */ 3951 vm_pindex_t *pindex, /* OUT */ 3952 vm_prot_t *out_prot, /* OUT */ 3953 boolean_t *wired) /* OUT */ 3954{ 3955 vm_map_entry_t entry; 3956 vm_map_t map = *var_map; 3957 vm_prot_t prot; 3958 vm_prot_t fault_type = fault_typea; 3959 vm_object_t eobject; 3960 vm_size_t size; 3961 struct ucred *cred; 3962 3963RetryLookup: 3964 3965 vm_map_lock_read(map); 3966 3967RetryLookupLocked: 3968 /* 3969 * Lookup the faulting address. 3970 */ 3971 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 3972 vm_map_unlock_read(map); 3973 return (KERN_INVALID_ADDRESS); 3974 } 3975 3976 entry = *out_entry; 3977 3978 /* 3979 * Handle submaps. 3980 */ 3981 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3982 vm_map_t old_map = map; 3983 3984 *var_map = map = entry->object.sub_map; 3985 vm_map_unlock_read(old_map); 3986 goto RetryLookup; 3987 } 3988 3989 /* 3990 * Check whether this task is allowed to have this page. 3991 */ 3992 prot = entry->protection; 3993 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 3994 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 3995 if (prot == VM_PROT_NONE && map != kernel_map && 3996 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 3997 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 3998 MAP_ENTRY_STACK_GAP_UP)) != 0 && 3999 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4000 goto RetryLookupLocked; 4001 } 4002 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4003 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4004 vm_map_unlock_read(map); 4005 return (KERN_PROTECTION_FAILURE); 4006 } 4007 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4008 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4009 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4010 ("entry %p flags %x", entry, entry->eflags)); 4011 if ((fault_typea & VM_PROT_COPY) != 0 && 4012 (entry->max_protection & VM_PROT_WRITE) == 0 && 4013 (entry->eflags & MAP_ENTRY_COW) == 0) { 4014 vm_map_unlock_read(map); 4015 return (KERN_PROTECTION_FAILURE); 4016 } 4017 4018 /* 4019 * If this page is not pageable, we have to get it for all possible 4020 * accesses. 4021 */ 4022 *wired = (entry->wired_count != 0); 4023 if (*wired) 4024 fault_type = entry->protection; 4025 size = entry->end - entry->start; 4026 /* 4027 * If the entry was copy-on-write, we either ... 4028 */ 4029 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4030 /* 4031 * If we want to write the page, we may as well handle that 4032 * now since we've got the map locked. 4033 * 4034 * If we don't need to write the page, we just demote the 4035 * permissions allowed. 4036 */ 4037 if ((fault_type & VM_PROT_WRITE) != 0 || 4038 (fault_typea & VM_PROT_COPY) != 0) { 4039 /* 4040 * Make a new object, and place it in the object 4041 * chain. Note that no new references have appeared 4042 * -- one just moved from the map to the new 4043 * object. 4044 */ 4045 if (vm_map_lock_upgrade(map)) 4046 goto RetryLookup; 4047 4048 if (entry->cred == NULL) { 4049 /* 4050 * The debugger owner is charged for 4051 * the memory. 4052 */ 4053 cred = curthread->td_ucred; 4054 crhold(cred); 4055 if (!swap_reserve_by_cred(size, cred)) { 4056 crfree(cred); 4057 vm_map_unlock(map); 4058 return (KERN_RESOURCE_SHORTAGE); 4059 } 4060 entry->cred = cred; 4061 } 4062 vm_object_shadow(&entry->object.vm_object, 4063 &entry->offset, size); 4064 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4065 eobject = entry->object.vm_object; 4066 if (eobject->cred != NULL) { 4067 /* 4068 * The object was not shadowed. 4069 */ 4070 swap_release_by_cred(size, entry->cred); 4071 crfree(entry->cred); 4072 entry->cred = NULL; 4073 } else if (entry->cred != NULL) { 4074 VM_OBJECT_WLOCK(eobject); 4075 eobject->cred = entry->cred; 4076 eobject->charge = size; 4077 VM_OBJECT_WUNLOCK(eobject); 4078 entry->cred = NULL; 4079 } 4080 4081 vm_map_lock_downgrade(map); 4082 } else { 4083 /* 4084 * We're attempting to read a copy-on-write page -- 4085 * don't allow writes. 4086 */ 4087 prot &= ~VM_PROT_WRITE; 4088 } 4089 } 4090 4091 /* 4092 * Create an object if necessary. 4093 */ 4094 if (entry->object.vm_object == NULL && 4095 !map->system_map) { 4096 if (vm_map_lock_upgrade(map)) 4097 goto RetryLookup; 4098 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4099 atop(size)); 4100 entry->offset = 0; 4101 if (entry->cred != NULL) { 4102 VM_OBJECT_WLOCK(entry->object.vm_object); 4103 entry->object.vm_object->cred = entry->cred; 4104 entry->object.vm_object->charge = size; 4105 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4106 entry->cred = NULL; 4107 } 4108 vm_map_lock_downgrade(map); 4109 } 4110 4111 /* 4112 * Return the object/offset from this entry. If the entry was 4113 * copy-on-write or empty, it has been fixed up. 4114 */ 4115 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4116 *object = entry->object.vm_object; 4117 4118 *out_prot = prot; 4119 return (KERN_SUCCESS); 4120} 4121 4122/* 4123 * vm_map_lookup_locked: 4124 * 4125 * Lookup the faulting address. A version of vm_map_lookup that returns 4126 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4127 */ 4128int 4129vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4130 vm_offset_t vaddr, 4131 vm_prot_t fault_typea, 4132 vm_map_entry_t *out_entry, /* OUT */ 4133 vm_object_t *object, /* OUT */ 4134 vm_pindex_t *pindex, /* OUT */ 4135 vm_prot_t *out_prot, /* OUT */ 4136 boolean_t *wired) /* OUT */ 4137{ 4138 vm_map_entry_t entry; 4139 vm_map_t map = *var_map; 4140 vm_prot_t prot; 4141 vm_prot_t fault_type = fault_typea; 4142 4143 /* 4144 * Lookup the faulting address. 4145 */ 4146 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4147 return (KERN_INVALID_ADDRESS); 4148 4149 entry = *out_entry; 4150 4151 /* 4152 * Fail if the entry refers to a submap. 4153 */ 4154 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4155 return (KERN_FAILURE); 4156 4157 /* 4158 * Check whether this task is allowed to have this page. 4159 */ 4160 prot = entry->protection; 4161 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4162 if ((fault_type & prot) != fault_type) 4163 return (KERN_PROTECTION_FAILURE); 4164 4165 /* 4166 * If this page is not pageable, we have to get it for all possible 4167 * accesses. 4168 */ 4169 *wired = (entry->wired_count != 0); 4170 if (*wired) 4171 fault_type = entry->protection; 4172 4173 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4174 /* 4175 * Fail if the entry was copy-on-write for a write fault. 4176 */ 4177 if (fault_type & VM_PROT_WRITE) 4178 return (KERN_FAILURE); 4179 /* 4180 * We're attempting to read a copy-on-write page -- 4181 * don't allow writes. 4182 */ 4183 prot &= ~VM_PROT_WRITE; 4184 } 4185 4186 /* 4187 * Fail if an object should be created. 4188 */ 4189 if (entry->object.vm_object == NULL && !map->system_map) 4190 return (KERN_FAILURE); 4191 4192 /* 4193 * Return the object/offset from this entry. If the entry was 4194 * copy-on-write or empty, it has been fixed up. 4195 */ 4196 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4197 *object = entry->object.vm_object; 4198 4199 *out_prot = prot; 4200 return (KERN_SUCCESS); 4201} 4202 4203/* 4204 * vm_map_lookup_done: 4205 * 4206 * Releases locks acquired by a vm_map_lookup 4207 * (according to the handle returned by that lookup). 4208 */ 4209void 4210vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4211{ 4212 /* 4213 * Unlock the main-level map 4214 */ 4215 vm_map_unlock_read(map); 4216} 4217 4218#include "opt_ddb.h" 4219#ifdef DDB 4220#include <sys/kernel.h> 4221 4222#include <ddb/ddb.h> 4223 4224static void 4225vm_map_print(vm_map_t map) 4226{ 4227 vm_map_entry_t entry; 4228 4229 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4230 (void *)map, 4231 (void *)map->pmap, map->nentries, map->timestamp); 4232 4233 db_indent += 2; 4234 for (entry = map->header.next; entry != &map->header; 4235 entry = entry->next) { 4236 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4237 (void *)entry, (void *)entry->start, (void *)entry->end, 4238 entry->eflags); 4239 { 4240 static char *inheritance_name[4] = 4241 {"share", "copy", "none", "donate_copy"}; 4242 4243 db_iprintf(" prot=%x/%x/%s", 4244 entry->protection, 4245 entry->max_protection, 4246 inheritance_name[(int)(unsigned char)entry->inheritance]); 4247 if (entry->wired_count != 0) 4248 db_printf(", wired"); 4249 } 4250 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4251 db_printf(", share=%p, offset=0x%jx\n", 4252 (void *)entry->object.sub_map, 4253 (uintmax_t)entry->offset); 4254 if ((entry->prev == &map->header) || 4255 (entry->prev->object.sub_map != 4256 entry->object.sub_map)) { 4257 db_indent += 2; 4258 vm_map_print((vm_map_t)entry->object.sub_map); 4259 db_indent -= 2; 4260 } 4261 } else { 4262 if (entry->cred != NULL) 4263 db_printf(", ruid %d", entry->cred->cr_ruid); 4264 db_printf(", object=%p, offset=0x%jx", 4265 (void *)entry->object.vm_object, 4266 (uintmax_t)entry->offset); 4267 if (entry->object.vm_object && entry->object.vm_object->cred) 4268 db_printf(", obj ruid %d charge %jx", 4269 entry->object.vm_object->cred->cr_ruid, 4270 (uintmax_t)entry->object.vm_object->charge); 4271 if (entry->eflags & MAP_ENTRY_COW) 4272 db_printf(", copy (%s)", 4273 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4274 db_printf("\n"); 4275 4276 if ((entry->prev == &map->header) || 4277 (entry->prev->object.vm_object != 4278 entry->object.vm_object)) { 4279 db_indent += 2; 4280 vm_object_print((db_expr_t)(intptr_t) 4281 entry->object.vm_object, 4282 0, 0, (char *)0); 4283 db_indent -= 2; 4284 } 4285 } 4286 } 4287 db_indent -= 2; 4288} 4289 4290DB_SHOW_COMMAND(map, map) 4291{ 4292 4293 if (!have_addr) { 4294 db_printf("usage: show map <addr>\n"); 4295 return; 4296 } 4297 vm_map_print((vm_map_t)addr); 4298} 4299 4300DB_SHOW_COMMAND(procvm, procvm) 4301{ 4302 struct proc *p; 4303 4304 if (have_addr) { 4305 p = db_lookup_proc(addr); 4306 } else { 4307 p = curproc; 4308 } 4309 4310 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4311 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4312 (void *)vmspace_pmap(p->p_vmspace)); 4313 4314 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4315} 4316 4317#endif /* DDB */ 4318