pmap.c revision 287126
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2003 Peter Wemm 9 * All rights reserved. 10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * the Systems Programming Group of the University of Utah Computer 15 * Science Department and William Jolitz of UUNET Technologies Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. All advertising materials mentioning features or use of this software 26 * must display the following acknowledgement: 27 * This product includes software developed by the University of 28 * California, Berkeley and its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 46 */ 47/*- 48 * Copyright (c) 2003 Networks Associates Technology, Inc. 49 * All rights reserved. 50 * 51 * This software was developed for the FreeBSD Project by Jake Burkholder, 52 * Safeport Network Services, and Network Associates Laboratories, the 53 * Security Research Division of Network Associates, Inc. under 54 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 55 * CHATS research program. 56 * 57 * Redistribution and use in source and binary forms, with or without 58 * modification, are permitted provided that the following conditions 59 * are met: 60 * 1. Redistributions of source code must retain the above copyright 61 * notice, this list of conditions and the following disclaimer. 62 * 2. Redistributions in binary form must reproduce the above copyright 63 * notice, this list of conditions and the following disclaimer in the 64 * documentation and/or other materials provided with the distribution. 65 * 66 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 76 * SUCH DAMAGE. 77 */ 78 79#define AMD64_NPT_AWARE 80 81#include <sys/cdefs.h> 82__FBSDID("$FreeBSD: stable/10/sys/amd64/amd64/pmap.c 287126 2015-08-25 14:39:40Z marcel $"); 83 84/* 85 * Manages physical address maps. 86 * 87 * Since the information managed by this module is 88 * also stored by the logical address mapping module, 89 * this module may throw away valid virtual-to-physical 90 * mappings at almost any time. However, invalidations 91 * of virtual-to-physical mappings must be done as 92 * requested. 93 * 94 * In order to cope with hardware architectures which 95 * make virtual-to-physical map invalidates expensive, 96 * this module may delay invalidate or reduced protection 97 * operations until such time as they are actually 98 * necessary. This module is given full information as 99 * to which processors are currently using which maps, 100 * and to when physical maps must be made correct. 101 */ 102 103#include "opt_pmap.h" 104#include "opt_vm.h" 105 106#include <sys/param.h> 107#include <sys/bus.h> 108#include <sys/systm.h> 109#include <sys/kernel.h> 110#include <sys/ktr.h> 111#include <sys/lock.h> 112#include <sys/malloc.h> 113#include <sys/mman.h> 114#include <sys/mutex.h> 115#include <sys/proc.h> 116#include <sys/rwlock.h> 117#include <sys/sx.h> 118#include <sys/vmmeter.h> 119#include <sys/sched.h> 120#include <sys/sysctl.h> 121#include <sys/_unrhdr.h> 122#include <sys/smp.h> 123 124#include <vm/vm.h> 125#include <vm/vm_param.h> 126#include <vm/vm_kern.h> 127#include <vm/vm_page.h> 128#include <vm/vm_map.h> 129#include <vm/vm_object.h> 130#include <vm/vm_extern.h> 131#include <vm/vm_pageout.h> 132#include <vm/vm_pager.h> 133#include <vm/vm_phys.h> 134#include <vm/vm_radix.h> 135#include <vm/vm_reserv.h> 136#include <vm/uma.h> 137 138#include <machine/intr_machdep.h> 139#include <machine/apicvar.h> 140#include <machine/cpu.h> 141#include <machine/cputypes.h> 142#include <machine/md_var.h> 143#include <machine/pcb.h> 144#include <machine/specialreg.h> 145#ifdef SMP 146#include <machine/smp.h> 147#endif 148 149static __inline boolean_t 150pmap_type_guest(pmap_t pmap) 151{ 152 153 return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI)); 154} 155 156static __inline boolean_t 157pmap_emulate_ad_bits(pmap_t pmap) 158{ 159 160 return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0); 161} 162 163static __inline pt_entry_t 164pmap_valid_bit(pmap_t pmap) 165{ 166 pt_entry_t mask; 167 168 switch (pmap->pm_type) { 169 case PT_X86: 170 case PT_RVI: 171 mask = X86_PG_V; 172 break; 173 case PT_EPT: 174 if (pmap_emulate_ad_bits(pmap)) 175 mask = EPT_PG_EMUL_V; 176 else 177 mask = EPT_PG_READ; 178 break; 179 default: 180 panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type); 181 } 182 183 return (mask); 184} 185 186static __inline pt_entry_t 187pmap_rw_bit(pmap_t pmap) 188{ 189 pt_entry_t mask; 190 191 switch (pmap->pm_type) { 192 case PT_X86: 193 case PT_RVI: 194 mask = X86_PG_RW; 195 break; 196 case PT_EPT: 197 if (pmap_emulate_ad_bits(pmap)) 198 mask = EPT_PG_EMUL_RW; 199 else 200 mask = EPT_PG_WRITE; 201 break; 202 default: 203 panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type); 204 } 205 206 return (mask); 207} 208 209static __inline pt_entry_t 210pmap_global_bit(pmap_t pmap) 211{ 212 pt_entry_t mask; 213 214 switch (pmap->pm_type) { 215 case PT_X86: 216 mask = X86_PG_G; 217 break; 218 case PT_RVI: 219 case PT_EPT: 220 mask = 0; 221 break; 222 default: 223 panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type); 224 } 225 226 return (mask); 227} 228 229static __inline pt_entry_t 230pmap_accessed_bit(pmap_t pmap) 231{ 232 pt_entry_t mask; 233 234 switch (pmap->pm_type) { 235 case PT_X86: 236 case PT_RVI: 237 mask = X86_PG_A; 238 break; 239 case PT_EPT: 240 if (pmap_emulate_ad_bits(pmap)) 241 mask = EPT_PG_READ; 242 else 243 mask = EPT_PG_A; 244 break; 245 default: 246 panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type); 247 } 248 249 return (mask); 250} 251 252static __inline pt_entry_t 253pmap_modified_bit(pmap_t pmap) 254{ 255 pt_entry_t mask; 256 257 switch (pmap->pm_type) { 258 case PT_X86: 259 case PT_RVI: 260 mask = X86_PG_M; 261 break; 262 case PT_EPT: 263 if (pmap_emulate_ad_bits(pmap)) 264 mask = EPT_PG_WRITE; 265 else 266 mask = EPT_PG_M; 267 break; 268 default: 269 panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type); 270 } 271 272 return (mask); 273} 274 275#if !defined(DIAGNOSTIC) 276#ifdef __GNUC_GNU_INLINE__ 277#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 278#else 279#define PMAP_INLINE extern inline 280#endif 281#else 282#define PMAP_INLINE 283#endif 284 285#ifdef PV_STATS 286#define PV_STAT(x) do { x ; } while (0) 287#else 288#define PV_STAT(x) do { } while (0) 289#endif 290 291#define pa_index(pa) ((pa) >> PDRSHIFT) 292#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 293 294#define NPV_LIST_LOCKS MAXCPU 295 296#define PHYS_TO_PV_LIST_LOCK(pa) \ 297 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) 298 299#define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ 300 struct rwlock **_lockp = (lockp); \ 301 struct rwlock *_new_lock; \ 302 \ 303 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \ 304 if (_new_lock != *_lockp) { \ 305 if (*_lockp != NULL) \ 306 rw_wunlock(*_lockp); \ 307 *_lockp = _new_lock; \ 308 rw_wlock(*_lockp); \ 309 } \ 310} while (0) 311 312#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ 313 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) 314 315#define RELEASE_PV_LIST_LOCK(lockp) do { \ 316 struct rwlock **_lockp = (lockp); \ 317 \ 318 if (*_lockp != NULL) { \ 319 rw_wunlock(*_lockp); \ 320 *_lockp = NULL; \ 321 } \ 322} while (0) 323 324#define VM_PAGE_TO_PV_LIST_LOCK(m) \ 325 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) 326 327struct pmap kernel_pmap_store; 328 329vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 330vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 331 332int nkpt; 333SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0, 334 "Number of kernel page table pages allocated on bootup"); 335 336static int ndmpdp; 337vm_paddr_t dmaplimit; 338vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 339pt_entry_t pg_nx; 340 341static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 342 343static int pat_works = 1; 344SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1, 345 "Is page attribute table fully functional?"); 346 347static int pg_ps_enabled = 1; 348SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0, 349 "Are large page mappings enabled?"); 350 351#define PAT_INDEX_SIZE 8 352static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ 353 354static u_int64_t KPTphys; /* phys addr of kernel level 1 */ 355static u_int64_t KPDphys; /* phys addr of kernel level 2 */ 356u_int64_t KPDPphys; /* phys addr of kernel level 3 */ 357u_int64_t KPML4phys; /* phys addr of kernel level 4 */ 358 359static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ 360static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ 361static int ndmpdpphys; /* number of DMPDPphys pages */ 362 363/* 364 * pmap_mapdev support pre initialization (i.e. console) 365 */ 366#define PMAP_PREINIT_MAPPING_COUNT 8 367static struct pmap_preinit_mapping { 368 vm_paddr_t pa; 369 vm_offset_t va; 370 vm_size_t sz; 371 int mode; 372} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; 373static int pmap_initialized; 374 375static struct rwlock_padalign pvh_global_lock; 376 377/* 378 * Data for the pv entry allocation mechanism 379 */ 380static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 381static struct mtx pv_chunks_mutex; 382static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; 383static struct md_page *pv_table; 384 385/* 386 * All those kernel PT submaps that BSD is so fond of 387 */ 388pt_entry_t *CMAP1 = 0; 389caddr_t CADDR1 = 0; 390 391static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */ 392 393static struct unrhdr pcid_unr; 394static struct mtx pcid_mtx; 395int pmap_pcid_enabled = 0; 396SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN, &pmap_pcid_enabled, 397 0, "Is TLB Context ID enabled ?"); 398int invpcid_works = 0; 399SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0, 400 "Is the invpcid instruction available ?"); 401 402static int 403pmap_pcid_save_cnt_proc(SYSCTL_HANDLER_ARGS) 404{ 405 int i; 406 uint64_t res; 407 408 res = 0; 409 CPU_FOREACH(i) { 410 res += cpuid_to_pcpu[i]->pc_pm_save_cnt; 411 } 412 return (sysctl_handle_64(oidp, &res, 0, req)); 413} 414SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RW | 415 CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU", 416 "Count of saved TLB context on switch"); 417 418/* pmap_copy_pages() over non-DMAP */ 419static struct mtx cpage_lock; 420static vm_offset_t cpage_a; 421static vm_offset_t cpage_b; 422 423/* 424 * Crashdump maps. 425 */ 426static caddr_t crashdumpmap; 427 428static void free_pv_chunk(struct pv_chunk *pc); 429static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 430static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); 431static int popcnt_pc_map_elem(uint64_t elem); 432static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); 433static void reserve_pv_entries(pmap_t pmap, int needed, 434 struct rwlock **lockp); 435static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 436 struct rwlock **lockp); 437static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 438 struct rwlock **lockp); 439static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 440 struct rwlock **lockp); 441static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 442static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 443 vm_offset_t va); 444 445static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode); 446static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 447static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, 448 vm_offset_t va, struct rwlock **lockp); 449static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, 450 vm_offset_t va); 451static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, 452 vm_prot_t prot, struct rwlock **lockp); 453static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 454 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); 455static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); 456static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); 457static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 458static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va); 459static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask); 460static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, 461 struct rwlock **lockp); 462static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, 463 vm_prot_t prot); 464static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask); 465static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 466 struct spglist *free, struct rwlock **lockp); 467static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 468 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); 469static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte); 470static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 471 struct spglist *free); 472static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 473 vm_page_t m, struct rwlock **lockp); 474static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 475 pd_entry_t newpde); 476static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde); 477 478static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, 479 struct rwlock **lockp); 480static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, 481 struct rwlock **lockp); 482static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, 483 struct rwlock **lockp); 484 485static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, 486 struct spglist *free); 487static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); 488static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 489 490/* 491 * Move the kernel virtual free pointer to the next 492 * 2MB. This is used to help improve performance 493 * by using a large (2MB) page for much of the kernel 494 * (.text, .data, .bss) 495 */ 496static vm_offset_t 497pmap_kmem_choose(vm_offset_t addr) 498{ 499 vm_offset_t newaddr = addr; 500 501 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1); 502 return (newaddr); 503} 504 505/********************/ 506/* Inline functions */ 507/********************/ 508 509/* Return a non-clipped PD index for a given VA */ 510static __inline vm_pindex_t 511pmap_pde_pindex(vm_offset_t va) 512{ 513 return (va >> PDRSHIFT); 514} 515 516 517/* Return various clipped indexes for a given VA */ 518static __inline vm_pindex_t 519pmap_pte_index(vm_offset_t va) 520{ 521 522 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); 523} 524 525static __inline vm_pindex_t 526pmap_pde_index(vm_offset_t va) 527{ 528 529 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); 530} 531 532static __inline vm_pindex_t 533pmap_pdpe_index(vm_offset_t va) 534{ 535 536 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); 537} 538 539static __inline vm_pindex_t 540pmap_pml4e_index(vm_offset_t va) 541{ 542 543 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); 544} 545 546/* Return a pointer to the PML4 slot that corresponds to a VA */ 547static __inline pml4_entry_t * 548pmap_pml4e(pmap_t pmap, vm_offset_t va) 549{ 550 551 return (&pmap->pm_pml4[pmap_pml4e_index(va)]); 552} 553 554/* Return a pointer to the PDP slot that corresponds to a VA */ 555static __inline pdp_entry_t * 556pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va) 557{ 558 pdp_entry_t *pdpe; 559 560 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME); 561 return (&pdpe[pmap_pdpe_index(va)]); 562} 563 564/* Return a pointer to the PDP slot that corresponds to a VA */ 565static __inline pdp_entry_t * 566pmap_pdpe(pmap_t pmap, vm_offset_t va) 567{ 568 pml4_entry_t *pml4e; 569 pt_entry_t PG_V; 570 571 PG_V = pmap_valid_bit(pmap); 572 pml4e = pmap_pml4e(pmap, va); 573 if ((*pml4e & PG_V) == 0) 574 return (NULL); 575 return (pmap_pml4e_to_pdpe(pml4e, va)); 576} 577 578/* Return a pointer to the PD slot that corresponds to a VA */ 579static __inline pd_entry_t * 580pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va) 581{ 582 pd_entry_t *pde; 583 584 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME); 585 return (&pde[pmap_pde_index(va)]); 586} 587 588/* Return a pointer to the PD slot that corresponds to a VA */ 589static __inline pd_entry_t * 590pmap_pde(pmap_t pmap, vm_offset_t va) 591{ 592 pdp_entry_t *pdpe; 593 pt_entry_t PG_V; 594 595 PG_V = pmap_valid_bit(pmap); 596 pdpe = pmap_pdpe(pmap, va); 597 if (pdpe == NULL || (*pdpe & PG_V) == 0) 598 return (NULL); 599 return (pmap_pdpe_to_pde(pdpe, va)); 600} 601 602/* Return a pointer to the PT slot that corresponds to a VA */ 603static __inline pt_entry_t * 604pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) 605{ 606 pt_entry_t *pte; 607 608 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME); 609 return (&pte[pmap_pte_index(va)]); 610} 611 612/* Return a pointer to the PT slot that corresponds to a VA */ 613static __inline pt_entry_t * 614pmap_pte(pmap_t pmap, vm_offset_t va) 615{ 616 pd_entry_t *pde; 617 pt_entry_t PG_V; 618 619 PG_V = pmap_valid_bit(pmap); 620 pde = pmap_pde(pmap, va); 621 if (pde == NULL || (*pde & PG_V) == 0) 622 return (NULL); 623 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */ 624 return ((pt_entry_t *)pde); 625 return (pmap_pde_to_pte(pde, va)); 626} 627 628static __inline void 629pmap_resident_count_inc(pmap_t pmap, int count) 630{ 631 632 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 633 pmap->pm_stats.resident_count += count; 634} 635 636static __inline void 637pmap_resident_count_dec(pmap_t pmap, int count) 638{ 639 640 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 641 KASSERT(pmap->pm_stats.resident_count >= count, 642 ("pmap %p resident count underflow %ld %d", pmap, 643 pmap->pm_stats.resident_count, count)); 644 pmap->pm_stats.resident_count -= count; 645} 646 647PMAP_INLINE pt_entry_t * 648vtopte(vm_offset_t va) 649{ 650 u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); 651 652 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va)); 653 654 return (PTmap + ((va >> PAGE_SHIFT) & mask)); 655} 656 657static __inline pd_entry_t * 658vtopde(vm_offset_t va) 659{ 660 u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); 661 662 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va)); 663 664 return (PDmap + ((va >> PDRSHIFT) & mask)); 665} 666 667static u_int64_t 668allocpages(vm_paddr_t *firstaddr, int n) 669{ 670 u_int64_t ret; 671 672 ret = *firstaddr; 673 bzero((void *)ret, n * PAGE_SIZE); 674 *firstaddr += n * PAGE_SIZE; 675 return (ret); 676} 677 678CTASSERT(powerof2(NDMPML4E)); 679 680/* number of kernel PDP slots */ 681#define NKPDPE(ptpgs) howmany((ptpgs), NPDEPG) 682 683static void 684nkpt_init(vm_paddr_t addr) 685{ 686 int pt_pages; 687 688#ifdef NKPT 689 pt_pages = NKPT; 690#else 691 pt_pages = howmany(addr, 1 << PDRSHIFT); 692 pt_pages += NKPDPE(pt_pages); 693 694 /* 695 * Add some slop beyond the bare minimum required for bootstrapping 696 * the kernel. 697 * 698 * This is quite important when allocating KVA for kernel modules. 699 * The modules are required to be linked in the negative 2GB of 700 * the address space. If we run out of KVA in this region then 701 * pmap_growkernel() will need to allocate page table pages to map 702 * the entire 512GB of KVA space which is an unnecessary tax on 703 * physical memory. 704 */ 705 pt_pages += 8; /* 16MB additional slop for kernel modules */ 706#endif 707 nkpt = pt_pages; 708} 709 710static void 711create_pagetables(vm_paddr_t *firstaddr) 712{ 713 int i, j, ndm1g, nkpdpe; 714 pt_entry_t *pt_p; 715 pd_entry_t *pd_p; 716 pdp_entry_t *pdp_p; 717 pml4_entry_t *p4_p; 718 719 /* Allocate page table pages for the direct map */ 720 ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT; 721 if (ndmpdp < 4) /* Minimum 4GB of dirmap */ 722 ndmpdp = 4; 723 ndmpdpphys = howmany(ndmpdp, NPDPEPG); 724 if (ndmpdpphys > NDMPML4E) { 725 /* 726 * Each NDMPML4E allows 512 GB, so limit to that, 727 * and then readjust ndmpdp and ndmpdpphys. 728 */ 729 printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512); 730 Maxmem = atop(NDMPML4E * NBPML4); 731 ndmpdpphys = NDMPML4E; 732 ndmpdp = NDMPML4E * NPDEPG; 733 } 734 DMPDPphys = allocpages(firstaddr, ndmpdpphys); 735 ndm1g = 0; 736 if ((amd_feature & AMDID_PAGE1GB) != 0) 737 ndm1g = ptoa(Maxmem) >> PDPSHIFT; 738 if (ndm1g < ndmpdp) 739 DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g); 740 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; 741 742 /* Allocate pages */ 743 KPML4phys = allocpages(firstaddr, 1); 744 KPDPphys = allocpages(firstaddr, NKPML4E); 745 746 /* 747 * Allocate the initial number of kernel page table pages required to 748 * bootstrap. We defer this until after all memory-size dependent 749 * allocations are done (e.g. direct map), so that we don't have to 750 * build in too much slop in our estimate. 751 * 752 * Note that when NKPML4E > 1, we have an empty page underneath 753 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed) 754 * pages. (pmap_enter requires a PD page to exist for each KPML4E.) 755 */ 756 nkpt_init(*firstaddr); 757 nkpdpe = NKPDPE(nkpt); 758 759 KPTphys = allocpages(firstaddr, nkpt); 760 KPDphys = allocpages(firstaddr, nkpdpe); 761 762 /* Fill in the underlying page table pages */ 763 /* Nominally read-only (but really R/W) from zero to physfree */ 764 /* XXX not fully used, underneath 2M pages */ 765 pt_p = (pt_entry_t *)KPTphys; 766 for (i = 0; ptoa(i) < *firstaddr; i++) 767 pt_p[i] = ptoa(i) | X86_PG_RW | X86_PG_V | X86_PG_G; 768 769 /* Now map the page tables at their location within PTmap */ 770 pd_p = (pd_entry_t *)KPDphys; 771 for (i = 0; i < nkpt; i++) 772 pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V; 773 774 /* Map from zero to end of allocations under 2M pages */ 775 /* This replaces some of the KPTphys entries above */ 776 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) 777 pd_p[i] = (i << PDRSHIFT) | X86_PG_RW | X86_PG_V | PG_PS | 778 X86_PG_G; 779 780 /* And connect up the PD to the PDP (leaving room for L4 pages) */ 781 pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE)); 782 for (i = 0; i < nkpdpe; i++) 783 pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V | 784 PG_U; 785 786 /* 787 * Now, set up the direct map region using 2MB and/or 1GB pages. If 788 * the end of physical memory is not aligned to a 1GB page boundary, 789 * then the residual physical memory is mapped with 2MB pages. Later, 790 * if pmap_mapdev{_attr}() uses the direct map for non-write-back 791 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings 792 * that are partially used. 793 */ 794 pd_p = (pd_entry_t *)DMPDphys; 795 for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) { 796 pd_p[j] = (vm_paddr_t)i << PDRSHIFT; 797 /* Preset PG_M and PG_A because demotion expects it. */ 798 pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G | 799 X86_PG_M | X86_PG_A; 800 } 801 pdp_p = (pdp_entry_t *)DMPDPphys; 802 for (i = 0; i < ndm1g; i++) { 803 pdp_p[i] = (vm_paddr_t)i << PDPSHIFT; 804 /* Preset PG_M and PG_A because demotion expects it. */ 805 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G | 806 X86_PG_M | X86_PG_A; 807 } 808 for (j = 0; i < ndmpdp; i++, j++) { 809 pdp_p[i] = DMPDphys + ptoa(j); 810 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_U; 811 } 812 813 /* And recursively map PML4 to itself in order to get PTmap */ 814 p4_p = (pml4_entry_t *)KPML4phys; 815 p4_p[PML4PML4I] = KPML4phys; 816 p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | PG_U; 817 818 /* Connect the Direct Map slot(s) up to the PML4. */ 819 for (i = 0; i < ndmpdpphys; i++) { 820 p4_p[DMPML4I + i] = DMPDPphys + ptoa(i); 821 p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | PG_U; 822 } 823 824 /* Connect the KVA slots up to the PML4 */ 825 for (i = 0; i < NKPML4E; i++) { 826 p4_p[KPML4BASE + i] = KPDPphys + ptoa(i); 827 p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V | PG_U; 828 } 829} 830 831/* 832 * Bootstrap the system enough to run with virtual memory. 833 * 834 * On amd64 this is called after mapping has already been enabled 835 * and just syncs the pmap module with what has already been done. 836 * [We can't call it easily with mapping off since the kernel is not 837 * mapped with PA == VA, hence we would have to relocate every address 838 * from the linked base (virtual) address "KERNBASE" to the actual 839 * (physical) address starting relative to 0] 840 */ 841void 842pmap_bootstrap(vm_paddr_t *firstaddr) 843{ 844 vm_offset_t va; 845 pt_entry_t *pte; 846 847 /* 848 * Create an initial set of page tables to run the kernel in. 849 */ 850 create_pagetables(firstaddr); 851 852 /* 853 * Add a physical memory segment (vm_phys_seg) corresponding to the 854 * preallocated kernel page table pages so that vm_page structures 855 * representing these pages will be created. The vm_page structures 856 * are required for promotion of the corresponding kernel virtual 857 * addresses to superpage mappings. 858 */ 859 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 860 861 virtual_avail = (vm_offset_t) KERNBASE + *firstaddr; 862 virtual_avail = pmap_kmem_choose(virtual_avail); 863 864 virtual_end = VM_MAX_KERNEL_ADDRESS; 865 866 867 /* XXX do %cr0 as well */ 868 load_cr4(rcr4() | CR4_PGE | CR4_PSE); 869 load_cr3(KPML4phys); 870 if (cpu_stdext_feature & CPUID_STDEXT_SMEP) 871 load_cr4(rcr4() | CR4_SMEP); 872 873 /* 874 * Initialize the kernel pmap (which is statically allocated). 875 */ 876 PMAP_LOCK_INIT(kernel_pmap); 877 kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys); 878 kernel_pmap->pm_cr3 = KPML4phys; 879 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 880 CPU_FILL(&kernel_pmap->pm_save); /* always superset of pm_active */ 881 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 882 kernel_pmap->pm_flags = pmap_flags; 883 884 /* 885 * Initialize the global pv list lock. 886 */ 887 rw_init(&pvh_global_lock, "pmap pv global"); 888 889 /* 890 * Reserve some special page table entries/VA space for temporary 891 * mapping of pages. 892 */ 893#define SYSMAP(c, p, v, n) \ 894 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 895 896 va = virtual_avail; 897 pte = vtopte(va); 898 899 /* 900 * Crashdump maps. The first page is reused as CMAP1 for the 901 * memory test. 902 */ 903 SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS) 904 CADDR1 = crashdumpmap; 905 906 virtual_avail = va; 907 908 /* Initialize the PAT MSR. */ 909 pmap_init_pat(); 910 911 /* Initialize TLB Context Id. */ 912 TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled); 913 if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) { 914 load_cr4(rcr4() | CR4_PCIDE); 915 mtx_init(&pcid_mtx, "pcid", NULL, MTX_DEF); 916 init_unrhdr(&pcid_unr, 1, (1 << 12) - 1, &pcid_mtx); 917 /* Check for INVPCID support */ 918 invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID) 919 != 0; 920 kernel_pmap->pm_pcid = 0; 921#ifndef SMP 922 pmap_pcid_enabled = 0; 923#endif 924 } else 925 pmap_pcid_enabled = 0; 926} 927 928/* 929 * Setup the PAT MSR. 930 */ 931void 932pmap_init_pat(void) 933{ 934 int pat_table[PAT_INDEX_SIZE]; 935 uint64_t pat_msr; 936 u_long cr0, cr4; 937 int i; 938 939 /* Bail if this CPU doesn't implement PAT. */ 940 if ((cpu_feature & CPUID_PAT) == 0) 941 panic("no PAT??"); 942 943 /* Set default PAT index table. */ 944 for (i = 0; i < PAT_INDEX_SIZE; i++) 945 pat_table[i] = -1; 946 pat_table[PAT_WRITE_BACK] = 0; 947 pat_table[PAT_WRITE_THROUGH] = 1; 948 pat_table[PAT_UNCACHEABLE] = 3; 949 pat_table[PAT_WRITE_COMBINING] = 3; 950 pat_table[PAT_WRITE_PROTECTED] = 3; 951 pat_table[PAT_UNCACHED] = 3; 952 953 /* Initialize default PAT entries. */ 954 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | 955 PAT_VALUE(1, PAT_WRITE_THROUGH) | 956 PAT_VALUE(2, PAT_UNCACHED) | 957 PAT_VALUE(3, PAT_UNCACHEABLE) | 958 PAT_VALUE(4, PAT_WRITE_BACK) | 959 PAT_VALUE(5, PAT_WRITE_THROUGH) | 960 PAT_VALUE(6, PAT_UNCACHED) | 961 PAT_VALUE(7, PAT_UNCACHEABLE); 962 963 if (pat_works) { 964 /* 965 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. 966 * Program 5 and 6 as WP and WC. 967 * Leave 4 and 7 as WB and UC. 968 */ 969 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); 970 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | 971 PAT_VALUE(6, PAT_WRITE_COMBINING); 972 pat_table[PAT_UNCACHED] = 2; 973 pat_table[PAT_WRITE_PROTECTED] = 5; 974 pat_table[PAT_WRITE_COMBINING] = 6; 975 } else { 976 /* 977 * Just replace PAT Index 2 with WC instead of UC-. 978 */ 979 pat_msr &= ~PAT_MASK(2); 980 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 981 pat_table[PAT_WRITE_COMBINING] = 2; 982 } 983 984 /* Disable PGE. */ 985 cr4 = rcr4(); 986 load_cr4(cr4 & ~CR4_PGE); 987 988 /* Disable caches (CD = 1, NW = 0). */ 989 cr0 = rcr0(); 990 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 991 992 /* Flushes caches and TLBs. */ 993 wbinvd(); 994 invltlb(); 995 996 /* Update PAT and index table. */ 997 wrmsr(MSR_PAT, pat_msr); 998 for (i = 0; i < PAT_INDEX_SIZE; i++) 999 pat_index[i] = pat_table[i]; 1000 1001 /* Flush caches and TLBs again. */ 1002 wbinvd(); 1003 invltlb(); 1004 1005 /* Restore caches and PGE. */ 1006 load_cr0(cr0); 1007 load_cr4(cr4); 1008} 1009 1010/* 1011 * Initialize a vm_page's machine-dependent fields. 1012 */ 1013void 1014pmap_page_init(vm_page_t m) 1015{ 1016 1017 TAILQ_INIT(&m->md.pv_list); 1018 m->md.pat_mode = PAT_WRITE_BACK; 1019} 1020 1021/* 1022 * Initialize the pmap module. 1023 * Called by vm_init, to initialize any structures that the pmap 1024 * system needs to map virtual memory. 1025 */ 1026void 1027pmap_init(void) 1028{ 1029 struct pmap_preinit_mapping *ppim; 1030 vm_page_t mpte; 1031 vm_size_t s; 1032 int i, pv_npg; 1033 1034 /* 1035 * Initialize the vm page array entries for the kernel pmap's 1036 * page table pages. 1037 */ 1038 for (i = 0; i < nkpt; i++) { 1039 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); 1040 KASSERT(mpte >= vm_page_array && 1041 mpte < &vm_page_array[vm_page_array_size], 1042 ("pmap_init: page table page is out of range")); 1043 mpte->pindex = pmap_pde_pindex(KERNBASE) + i; 1044 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); 1045 } 1046 1047 /* 1048 * If the kernel is running on a virtual machine, then it must assume 1049 * that MCA is enabled by the hypervisor. Moreover, the kernel must 1050 * be prepared for the hypervisor changing the vendor and family that 1051 * are reported by CPUID. Consequently, the workaround for AMD Family 1052 * 10h Erratum 383 is enabled if the processor's feature set does not 1053 * include at least one feature that is only supported by older Intel 1054 * or newer AMD processors. 1055 */ 1056 if (vm_guest == VM_GUEST_VM && (cpu_feature & CPUID_SS) == 0 && 1057 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | 1058 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | 1059 AMDID2_FMA4)) == 0) 1060 workaround_erratum383 = 1; 1061 1062 /* 1063 * Are large page mappings enabled? 1064 */ 1065 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 1066 if (pg_ps_enabled) { 1067 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1068 ("pmap_init: can't assign to pagesizes[1]")); 1069 pagesizes[1] = NBPDR; 1070 } 1071 1072 /* 1073 * Initialize the pv chunk list mutex. 1074 */ 1075 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); 1076 1077 /* 1078 * Initialize the pool of pv list locks. 1079 */ 1080 for (i = 0; i < NPV_LIST_LOCKS; i++) 1081 rw_init(&pv_list_locks[i], "pmap pv list"); 1082 1083 /* 1084 * Calculate the size of the pv head table for superpages. 1085 */ 1086 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR); 1087 1088 /* 1089 * Allocate memory for the pv head table for superpages. 1090 */ 1091 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1092 s = round_page(s); 1093 pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, 1094 M_WAITOK | M_ZERO); 1095 for (i = 0; i < pv_npg; i++) 1096 TAILQ_INIT(&pv_table[i].pv_list); 1097 1098 mtx_init(&cpage_lock, "cpage", NULL, MTX_DEF); 1099 cpage_a = kva_alloc(PAGE_SIZE); 1100 cpage_b = kva_alloc(PAGE_SIZE); 1101 1102 pmap_initialized = 1; 1103 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 1104 ppim = pmap_preinit_mapping + i; 1105 if (ppim->va == 0) 1106 continue; 1107 /* Make the direct map consistent */ 1108 if (ppim->pa < dmaplimit && ppim->pa + ppim->sz < dmaplimit) { 1109 (void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa), 1110 ppim->sz, ppim->mode); 1111 } 1112 if (!bootverbose) 1113 continue; 1114 printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i, 1115 ppim->pa, ppim->va, ppim->sz, ppim->mode); 1116 } 1117} 1118 1119static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 1120 "2MB page mapping counters"); 1121 1122static u_long pmap_pde_demotions; 1123SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD, 1124 &pmap_pde_demotions, 0, "2MB page demotions"); 1125 1126static u_long pmap_pde_mappings; 1127SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 1128 &pmap_pde_mappings, 0, "2MB page mappings"); 1129 1130static u_long pmap_pde_p_failures; 1131SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD, 1132 &pmap_pde_p_failures, 0, "2MB page promotion failures"); 1133 1134static u_long pmap_pde_promotions; 1135SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD, 1136 &pmap_pde_promotions, 0, "2MB page promotions"); 1137 1138static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0, 1139 "1GB page mapping counters"); 1140 1141static u_long pmap_pdpe_demotions; 1142SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD, 1143 &pmap_pdpe_demotions, 0, "1GB page demotions"); 1144 1145/*************************************************** 1146 * Low level helper routines..... 1147 ***************************************************/ 1148 1149static pt_entry_t 1150pmap_swap_pat(pmap_t pmap, pt_entry_t entry) 1151{ 1152 int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT; 1153 1154 switch (pmap->pm_type) { 1155 case PT_X86: 1156 case PT_RVI: 1157 /* Verify that both PAT bits are not set at the same time */ 1158 KASSERT((entry & x86_pat_bits) != x86_pat_bits, 1159 ("Invalid PAT bits in entry %#lx", entry)); 1160 1161 /* Swap the PAT bits if one of them is set */ 1162 if ((entry & x86_pat_bits) != 0) 1163 entry ^= x86_pat_bits; 1164 break; 1165 case PT_EPT: 1166 /* 1167 * Nothing to do - the memory attributes are represented 1168 * the same way for regular pages and superpages. 1169 */ 1170 break; 1171 default: 1172 panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type); 1173 } 1174 1175 return (entry); 1176} 1177 1178/* 1179 * Determine the appropriate bits to set in a PTE or PDE for a specified 1180 * caching mode. 1181 */ 1182static int 1183pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde) 1184{ 1185 int cache_bits, pat_flag, pat_idx; 1186 1187 if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0) 1188 panic("Unknown caching mode %d\n", mode); 1189 1190 switch (pmap->pm_type) { 1191 case PT_X86: 1192 case PT_RVI: 1193 /* The PAT bit is different for PTE's and PDE's. */ 1194 pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT; 1195 1196 /* Map the caching mode to a PAT index. */ 1197 pat_idx = pat_index[mode]; 1198 1199 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 1200 cache_bits = 0; 1201 if (pat_idx & 0x4) 1202 cache_bits |= pat_flag; 1203 if (pat_idx & 0x2) 1204 cache_bits |= PG_NC_PCD; 1205 if (pat_idx & 0x1) 1206 cache_bits |= PG_NC_PWT; 1207 break; 1208 1209 case PT_EPT: 1210 cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode); 1211 break; 1212 1213 default: 1214 panic("unsupported pmap type %d", pmap->pm_type); 1215 } 1216 1217 return (cache_bits); 1218} 1219 1220static int 1221pmap_cache_mask(pmap_t pmap, boolean_t is_pde) 1222{ 1223 int mask; 1224 1225 switch (pmap->pm_type) { 1226 case PT_X86: 1227 case PT_RVI: 1228 mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE; 1229 break; 1230 case PT_EPT: 1231 mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7); 1232 break; 1233 default: 1234 panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type); 1235 } 1236 1237 return (mask); 1238} 1239 1240static __inline boolean_t 1241pmap_ps_enabled(pmap_t pmap) 1242{ 1243 1244 return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0); 1245} 1246 1247static void 1248pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde) 1249{ 1250 1251 switch (pmap->pm_type) { 1252 case PT_X86: 1253 break; 1254 case PT_RVI: 1255 case PT_EPT: 1256 /* 1257 * XXX 1258 * This is a little bogus since the generation number is 1259 * supposed to be bumped up when a region of the address 1260 * space is invalidated in the page tables. 1261 * 1262 * In this case the old PDE entry is valid but yet we want 1263 * to make sure that any mappings using the old entry are 1264 * invalidated in the TLB. 1265 * 1266 * The reason this works as expected is because we rendezvous 1267 * "all" host cpus and force any vcpu context to exit as a 1268 * side-effect. 1269 */ 1270 atomic_add_acq_long(&pmap->pm_eptgen, 1); 1271 break; 1272 default: 1273 panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type); 1274 } 1275 pde_store(pde, newpde); 1276} 1277 1278/* 1279 * After changing the page size for the specified virtual address in the page 1280 * table, flush the corresponding entries from the processor's TLB. Only the 1281 * calling processor's TLB is affected. 1282 * 1283 * The calling thread must be pinned to a processor. 1284 */ 1285static void 1286pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde) 1287{ 1288 pt_entry_t PG_G; 1289 1290 if (pmap_type_guest(pmap)) 1291 return; 1292 1293 KASSERT(pmap->pm_type == PT_X86, 1294 ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type)); 1295 1296 PG_G = pmap_global_bit(pmap); 1297 1298 if ((newpde & PG_PS) == 0) 1299 /* Demotion: flush a specific 2MB page mapping. */ 1300 invlpg(va); 1301 else if ((newpde & PG_G) == 0) 1302 /* 1303 * Promotion: flush every 4KB page mapping from the TLB 1304 * because there are too many to flush individually. 1305 */ 1306 invltlb(); 1307 else { 1308 /* 1309 * Promotion: flush every 4KB page mapping from the TLB, 1310 * including any global (PG_G) mappings. 1311 */ 1312 invltlb_globpcid(); 1313 } 1314} 1315#ifdef SMP 1316 1317static void 1318pmap_invalidate_page_pcid(pmap_t pmap, vm_offset_t va) 1319{ 1320 struct invpcid_descr d; 1321 uint64_t cr3; 1322 1323 if (invpcid_works) { 1324 d.pcid = pmap->pm_pcid; 1325 d.pad = 0; 1326 d.addr = va; 1327 invpcid(&d, INVPCID_ADDR); 1328 return; 1329 } 1330 1331 cr3 = rcr3(); 1332 critical_enter(); 1333 load_cr3(pmap->pm_cr3 | CR3_PCID_SAVE); 1334 invlpg(va); 1335 load_cr3(cr3 | CR3_PCID_SAVE); 1336 critical_exit(); 1337} 1338 1339/* 1340 * For SMP, these functions have to use the IPI mechanism for coherence. 1341 * 1342 * N.B.: Before calling any of the following TLB invalidation functions, 1343 * the calling processor must ensure that all stores updating a non- 1344 * kernel page table are globally performed. Otherwise, another 1345 * processor could cache an old, pre-update entry without being 1346 * invalidated. This can happen one of two ways: (1) The pmap becomes 1347 * active on another processor after its pm_active field is checked by 1348 * one of the following functions but before a store updating the page 1349 * table is globally performed. (2) The pmap becomes active on another 1350 * processor before its pm_active field is checked but due to 1351 * speculative loads one of the following functions stills reads the 1352 * pmap as inactive on the other processor. 1353 * 1354 * The kernel page table is exempt because its pm_active field is 1355 * immutable. The kernel page table is always active on every 1356 * processor. 1357 */ 1358 1359/* 1360 * Interrupt the cpus that are executing in the guest context. 1361 * This will force the vcpu to exit and the cached EPT mappings 1362 * will be invalidated by the host before the next vmresume. 1363 */ 1364static __inline void 1365pmap_invalidate_ept(pmap_t pmap) 1366{ 1367 int ipinum; 1368 1369 sched_pin(); 1370 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1371 ("pmap_invalidate_ept: absurd pm_active")); 1372 1373 /* 1374 * The TLB mappings associated with a vcpu context are not 1375 * flushed each time a different vcpu is chosen to execute. 1376 * 1377 * This is in contrast with a process's vtop mappings that 1378 * are flushed from the TLB on each context switch. 1379 * 1380 * Therefore we need to do more than just a TLB shootdown on 1381 * the active cpus in 'pmap->pm_active'. To do this we keep 1382 * track of the number of invalidations performed on this pmap. 1383 * 1384 * Each vcpu keeps a cache of this counter and compares it 1385 * just before a vmresume. If the counter is out-of-date an 1386 * invept will be done to flush stale mappings from the TLB. 1387 */ 1388 atomic_add_acq_long(&pmap->pm_eptgen, 1); 1389 1390 /* 1391 * Force the vcpu to exit and trap back into the hypervisor. 1392 */ 1393 ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK; 1394 ipi_selected(pmap->pm_active, ipinum); 1395 sched_unpin(); 1396} 1397 1398void 1399pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 1400{ 1401 cpuset_t other_cpus; 1402 u_int cpuid; 1403 1404 if (pmap_type_guest(pmap)) { 1405 pmap_invalidate_ept(pmap); 1406 return; 1407 } 1408 1409 KASSERT(pmap->pm_type == PT_X86, 1410 ("pmap_invalidate_page: invalid type %d", pmap->pm_type)); 1411 1412 sched_pin(); 1413 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 1414 if (!pmap_pcid_enabled) { 1415 invlpg(va); 1416 } else { 1417 if (pmap->pm_pcid != -1 && pmap->pm_pcid != 0) { 1418 if (pmap == PCPU_GET(curpmap)) 1419 invlpg(va); 1420 else 1421 pmap_invalidate_page_pcid(pmap, va); 1422 } else { 1423 invltlb_globpcid(); 1424 } 1425 } 1426 smp_invlpg(pmap, va); 1427 } else { 1428 cpuid = PCPU_GET(cpuid); 1429 other_cpus = all_cpus; 1430 CPU_CLR(cpuid, &other_cpus); 1431 if (CPU_ISSET(cpuid, &pmap->pm_active)) 1432 invlpg(va); 1433 else if (pmap_pcid_enabled) { 1434 if (pmap->pm_pcid != -1 && pmap->pm_pcid != 0) 1435 pmap_invalidate_page_pcid(pmap, va); 1436 else 1437 invltlb_globpcid(); 1438 } 1439 if (pmap_pcid_enabled) 1440 CPU_AND(&other_cpus, &pmap->pm_save); 1441 else 1442 CPU_AND(&other_cpus, &pmap->pm_active); 1443 if (!CPU_EMPTY(&other_cpus)) 1444 smp_masked_invlpg(other_cpus, pmap, va); 1445 } 1446 sched_unpin(); 1447} 1448 1449static void 1450pmap_invalidate_range_pcid(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1451{ 1452 struct invpcid_descr d; 1453 uint64_t cr3; 1454 vm_offset_t addr; 1455 1456 if (invpcid_works) { 1457 d.pcid = pmap->pm_pcid; 1458 d.pad = 0; 1459 for (addr = sva; addr < eva; addr += PAGE_SIZE) { 1460 d.addr = addr; 1461 invpcid(&d, INVPCID_ADDR); 1462 } 1463 return; 1464 } 1465 1466 cr3 = rcr3(); 1467 critical_enter(); 1468 load_cr3(pmap->pm_cr3 | CR3_PCID_SAVE); 1469 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1470 invlpg(addr); 1471 load_cr3(cr3 | CR3_PCID_SAVE); 1472 critical_exit(); 1473} 1474 1475void 1476pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1477{ 1478 cpuset_t other_cpus; 1479 vm_offset_t addr; 1480 u_int cpuid; 1481 1482 if (pmap_type_guest(pmap)) { 1483 pmap_invalidate_ept(pmap); 1484 return; 1485 } 1486 1487 KASSERT(pmap->pm_type == PT_X86, 1488 ("pmap_invalidate_range: invalid type %d", pmap->pm_type)); 1489 1490 sched_pin(); 1491 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 1492 if (!pmap_pcid_enabled) { 1493 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1494 invlpg(addr); 1495 } else { 1496 if (pmap->pm_pcid != -1 && pmap->pm_pcid != 0) { 1497 if (pmap == PCPU_GET(curpmap)) { 1498 for (addr = sva; addr < eva; 1499 addr += PAGE_SIZE) 1500 invlpg(addr); 1501 } else { 1502 pmap_invalidate_range_pcid(pmap, 1503 sva, eva); 1504 } 1505 } else { 1506 invltlb_globpcid(); 1507 } 1508 } 1509 smp_invlpg_range(pmap, sva, eva); 1510 } else { 1511 cpuid = PCPU_GET(cpuid); 1512 other_cpus = all_cpus; 1513 CPU_CLR(cpuid, &other_cpus); 1514 if (CPU_ISSET(cpuid, &pmap->pm_active)) { 1515 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1516 invlpg(addr); 1517 } else if (pmap_pcid_enabled) { 1518 if (pmap->pm_pcid != -1 && pmap->pm_pcid != 0) 1519 pmap_invalidate_range_pcid(pmap, sva, eva); 1520 else 1521 invltlb_globpcid(); 1522 } 1523 if (pmap_pcid_enabled) 1524 CPU_AND(&other_cpus, &pmap->pm_save); 1525 else 1526 CPU_AND(&other_cpus, &pmap->pm_active); 1527 if (!CPU_EMPTY(&other_cpus)) 1528 smp_masked_invlpg_range(other_cpus, pmap, sva, eva); 1529 } 1530 sched_unpin(); 1531} 1532 1533void 1534pmap_invalidate_all(pmap_t pmap) 1535{ 1536 cpuset_t other_cpus; 1537 struct invpcid_descr d; 1538 uint64_t cr3; 1539 u_int cpuid; 1540 1541 if (pmap_type_guest(pmap)) { 1542 pmap_invalidate_ept(pmap); 1543 return; 1544 } 1545 1546 KASSERT(pmap->pm_type == PT_X86, 1547 ("pmap_invalidate_all: invalid type %d", pmap->pm_type)); 1548 1549 sched_pin(); 1550 cpuid = PCPU_GET(cpuid); 1551 if (pmap == kernel_pmap || 1552 (pmap_pcid_enabled && !CPU_CMP(&pmap->pm_save, &all_cpus)) || 1553 !CPU_CMP(&pmap->pm_active, &all_cpus)) { 1554 if (invpcid_works) { 1555 bzero(&d, sizeof(d)); 1556 invpcid(&d, INVPCID_CTXGLOB); 1557 } else { 1558 invltlb_globpcid(); 1559 } 1560 if (!CPU_ISSET(cpuid, &pmap->pm_active)) 1561 CPU_CLR_ATOMIC(cpuid, &pmap->pm_save); 1562 smp_invltlb(pmap); 1563 } else { 1564 other_cpus = all_cpus; 1565 CPU_CLR(cpuid, &other_cpus); 1566 1567 /* 1568 * This logic is duplicated in the Xinvltlb shootdown 1569 * IPI handler. 1570 */ 1571 if (pmap_pcid_enabled) { 1572 if (pmap->pm_pcid != -1 && pmap->pm_pcid != 0) { 1573 if (invpcid_works) { 1574 d.pcid = pmap->pm_pcid; 1575 d.pad = 0; 1576 d.addr = 0; 1577 invpcid(&d, INVPCID_CTX); 1578 } else { 1579 cr3 = rcr3(); 1580 critical_enter(); 1581 1582 /* 1583 * Bit 63 is clear, pcid TLB 1584 * entries are invalidated. 1585 */ 1586 load_cr3(pmap->pm_cr3); 1587 load_cr3(cr3 | CR3_PCID_SAVE); 1588 critical_exit(); 1589 } 1590 } else { 1591 invltlb_globpcid(); 1592 } 1593 } else if (CPU_ISSET(cpuid, &pmap->pm_active)) 1594 invltlb(); 1595 if (!CPU_ISSET(cpuid, &pmap->pm_active)) 1596 CPU_CLR_ATOMIC(cpuid, &pmap->pm_save); 1597 if (pmap_pcid_enabled) 1598 CPU_AND(&other_cpus, &pmap->pm_save); 1599 else 1600 CPU_AND(&other_cpus, &pmap->pm_active); 1601 if (!CPU_EMPTY(&other_cpus)) 1602 smp_masked_invltlb(other_cpus, pmap); 1603 } 1604 sched_unpin(); 1605} 1606 1607void 1608pmap_invalidate_cache(void) 1609{ 1610 1611 sched_pin(); 1612 wbinvd(); 1613 smp_cache_flush(); 1614 sched_unpin(); 1615} 1616 1617struct pde_action { 1618 cpuset_t invalidate; /* processors that invalidate their TLB */ 1619 pmap_t pmap; 1620 vm_offset_t va; 1621 pd_entry_t *pde; 1622 pd_entry_t newpde; 1623 u_int store; /* processor that updates the PDE */ 1624}; 1625 1626static void 1627pmap_update_pde_action(void *arg) 1628{ 1629 struct pde_action *act = arg; 1630 1631 if (act->store == PCPU_GET(cpuid)) 1632 pmap_update_pde_store(act->pmap, act->pde, act->newpde); 1633} 1634 1635static void 1636pmap_update_pde_teardown(void *arg) 1637{ 1638 struct pde_action *act = arg; 1639 1640 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) 1641 pmap_update_pde_invalidate(act->pmap, act->va, act->newpde); 1642} 1643 1644/* 1645 * Change the page size for the specified virtual address in a way that 1646 * prevents any possibility of the TLB ever having two entries that map the 1647 * same virtual address using different page sizes. This is the recommended 1648 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a 1649 * machine check exception for a TLB state that is improperly diagnosed as a 1650 * hardware error. 1651 */ 1652static void 1653pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1654{ 1655 struct pde_action act; 1656 cpuset_t active, other_cpus; 1657 u_int cpuid; 1658 1659 sched_pin(); 1660 cpuid = PCPU_GET(cpuid); 1661 other_cpus = all_cpus; 1662 CPU_CLR(cpuid, &other_cpus); 1663 if (pmap == kernel_pmap || pmap_type_guest(pmap)) 1664 active = all_cpus; 1665 else { 1666 active = pmap->pm_active; 1667 CPU_AND_ATOMIC(&pmap->pm_save, &active); 1668 } 1669 if (CPU_OVERLAP(&active, &other_cpus)) { 1670 act.store = cpuid; 1671 act.invalidate = active; 1672 act.va = va; 1673 act.pmap = pmap; 1674 act.pde = pde; 1675 act.newpde = newpde; 1676 CPU_SET(cpuid, &active); 1677 smp_rendezvous_cpus(active, 1678 smp_no_rendevous_barrier, pmap_update_pde_action, 1679 pmap_update_pde_teardown, &act); 1680 } else { 1681 pmap_update_pde_store(pmap, pde, newpde); 1682 if (CPU_ISSET(cpuid, &active)) 1683 pmap_update_pde_invalidate(pmap, va, newpde); 1684 } 1685 sched_unpin(); 1686} 1687#else /* !SMP */ 1688/* 1689 * Normal, non-SMP, invalidation functions. 1690 * We inline these within pmap.c for speed. 1691 */ 1692PMAP_INLINE void 1693pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 1694{ 1695 1696 switch (pmap->pm_type) { 1697 case PT_X86: 1698 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1699 invlpg(va); 1700 break; 1701 case PT_RVI: 1702 case PT_EPT: 1703 pmap->pm_eptgen++; 1704 break; 1705 default: 1706 panic("pmap_invalidate_page: unknown type: %d", pmap->pm_type); 1707 } 1708} 1709 1710PMAP_INLINE void 1711pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1712{ 1713 vm_offset_t addr; 1714 1715 switch (pmap->pm_type) { 1716 case PT_X86: 1717 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1718 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1719 invlpg(addr); 1720 break; 1721 case PT_RVI: 1722 case PT_EPT: 1723 pmap->pm_eptgen++; 1724 break; 1725 default: 1726 panic("pmap_invalidate_range: unknown type: %d", pmap->pm_type); 1727 } 1728} 1729 1730PMAP_INLINE void 1731pmap_invalidate_all(pmap_t pmap) 1732{ 1733 1734 switch (pmap->pm_type) { 1735 case PT_X86: 1736 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1737 invltlb(); 1738 break; 1739 case PT_RVI: 1740 case PT_EPT: 1741 pmap->pm_eptgen++; 1742 break; 1743 default: 1744 panic("pmap_invalidate_all: unknown type %d", pmap->pm_type); 1745 } 1746} 1747 1748PMAP_INLINE void 1749pmap_invalidate_cache(void) 1750{ 1751 1752 wbinvd(); 1753} 1754 1755static void 1756pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1757{ 1758 1759 pmap_update_pde_store(pmap, pde, newpde); 1760 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1761 pmap_update_pde_invalidate(pmap, va, newpde); 1762 else 1763 CPU_ZERO(&pmap->pm_save); 1764} 1765#endif /* !SMP */ 1766 1767#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 1768 1769void 1770pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force) 1771{ 1772 1773 if (force) { 1774 sva &= ~(vm_offset_t)cpu_clflush_line_size; 1775 } else { 1776 KASSERT((sva & PAGE_MASK) == 0, 1777 ("pmap_invalidate_cache_range: sva not page-aligned")); 1778 KASSERT((eva & PAGE_MASK) == 0, 1779 ("pmap_invalidate_cache_range: eva not page-aligned")); 1780 } 1781 1782 if ((cpu_feature & CPUID_SS) != 0 && !force) 1783 ; /* If "Self Snoop" is supported and allowed, do nothing. */ 1784 else if ((cpu_feature & CPUID_CLFSH) != 0 && 1785 eva - sva < PMAP_CLFLUSH_THRESHOLD) { 1786 1787 /* 1788 * XXX: Some CPUs fault, hang, or trash the local APIC 1789 * registers if we use CLFLUSH on the local APIC 1790 * range. The local APIC is always uncached, so we 1791 * don't need to flush for that range anyway. 1792 */ 1793 if (pmap_kextract(sva) == lapic_paddr) 1794 return; 1795 1796 /* 1797 * Otherwise, do per-cache line flush. Use the mfence 1798 * instruction to insure that previous stores are 1799 * included in the write-back. The processor 1800 * propagates flush to other processors in the cache 1801 * coherence domain. 1802 */ 1803 mfence(); 1804 for (; sva < eva; sva += cpu_clflush_line_size) 1805 clflush(sva); 1806 mfence(); 1807 } else { 1808 1809 /* 1810 * No targeted cache flush methods are supported by CPU, 1811 * or the supplied range is bigger than 2MB. 1812 * Globally invalidate cache. 1813 */ 1814 pmap_invalidate_cache(); 1815 } 1816} 1817 1818/* 1819 * Remove the specified set of pages from the data and instruction caches. 1820 * 1821 * In contrast to pmap_invalidate_cache_range(), this function does not 1822 * rely on the CPU's self-snoop feature, because it is intended for use 1823 * when moving pages into a different cache domain. 1824 */ 1825void 1826pmap_invalidate_cache_pages(vm_page_t *pages, int count) 1827{ 1828 vm_offset_t daddr, eva; 1829 int i; 1830 1831 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 1832 (cpu_feature & CPUID_CLFSH) == 0) 1833 pmap_invalidate_cache(); 1834 else { 1835 mfence(); 1836 for (i = 0; i < count; i++) { 1837 daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i])); 1838 eva = daddr + PAGE_SIZE; 1839 for (; daddr < eva; daddr += cpu_clflush_line_size) 1840 clflush(daddr); 1841 } 1842 mfence(); 1843 } 1844} 1845 1846/* 1847 * Routine: pmap_extract 1848 * Function: 1849 * Extract the physical page address associated 1850 * with the given map/virtual_address pair. 1851 */ 1852vm_paddr_t 1853pmap_extract(pmap_t pmap, vm_offset_t va) 1854{ 1855 pdp_entry_t *pdpe; 1856 pd_entry_t *pde; 1857 pt_entry_t *pte, PG_V; 1858 vm_paddr_t pa; 1859 1860 pa = 0; 1861 PG_V = pmap_valid_bit(pmap); 1862 PMAP_LOCK(pmap); 1863 pdpe = pmap_pdpe(pmap, va); 1864 if (pdpe != NULL && (*pdpe & PG_V) != 0) { 1865 if ((*pdpe & PG_PS) != 0) 1866 pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK); 1867 else { 1868 pde = pmap_pdpe_to_pde(pdpe, va); 1869 if ((*pde & PG_V) != 0) { 1870 if ((*pde & PG_PS) != 0) { 1871 pa = (*pde & PG_PS_FRAME) | 1872 (va & PDRMASK); 1873 } else { 1874 pte = pmap_pde_to_pte(pde, va); 1875 pa = (*pte & PG_FRAME) | 1876 (va & PAGE_MASK); 1877 } 1878 } 1879 } 1880 } 1881 PMAP_UNLOCK(pmap); 1882 return (pa); 1883} 1884 1885/* 1886 * Routine: pmap_extract_and_hold 1887 * Function: 1888 * Atomically extract and hold the physical page 1889 * with the given pmap and virtual address pair 1890 * if that mapping permits the given protection. 1891 */ 1892vm_page_t 1893pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1894{ 1895 pd_entry_t pde, *pdep; 1896 pt_entry_t pte, PG_RW, PG_V; 1897 vm_paddr_t pa; 1898 vm_page_t m; 1899 1900 pa = 0; 1901 m = NULL; 1902 PG_RW = pmap_rw_bit(pmap); 1903 PG_V = pmap_valid_bit(pmap); 1904 PMAP_LOCK(pmap); 1905retry: 1906 pdep = pmap_pde(pmap, va); 1907 if (pdep != NULL && (pde = *pdep)) { 1908 if (pde & PG_PS) { 1909 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1910 if (vm_page_pa_tryrelock(pmap, (pde & 1911 PG_PS_FRAME) | (va & PDRMASK), &pa)) 1912 goto retry; 1913 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1914 (va & PDRMASK)); 1915 vm_page_hold(m); 1916 } 1917 } else { 1918 pte = *pmap_pde_to_pte(pdep, va); 1919 if ((pte & PG_V) && 1920 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1921 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, 1922 &pa)) 1923 goto retry; 1924 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1925 vm_page_hold(m); 1926 } 1927 } 1928 } 1929 PA_UNLOCK_COND(pa); 1930 PMAP_UNLOCK(pmap); 1931 return (m); 1932} 1933 1934vm_paddr_t 1935pmap_kextract(vm_offset_t va) 1936{ 1937 pd_entry_t pde; 1938 vm_paddr_t pa; 1939 1940 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { 1941 pa = DMAP_TO_PHYS(va); 1942 } else { 1943 pde = *vtopde(va); 1944 if (pde & PG_PS) { 1945 pa = (pde & PG_PS_FRAME) | (va & PDRMASK); 1946 } else { 1947 /* 1948 * Beware of a concurrent promotion that changes the 1949 * PDE at this point! For example, vtopte() must not 1950 * be used to access the PTE because it would use the 1951 * new PDE. It is, however, safe to use the old PDE 1952 * because the page table page is preserved by the 1953 * promotion. 1954 */ 1955 pa = *pmap_pde_to_pte(&pde, va); 1956 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1957 } 1958 } 1959 return (pa); 1960} 1961 1962/*************************************************** 1963 * Low level mapping routines..... 1964 ***************************************************/ 1965 1966/* 1967 * Add a wired page to the kva. 1968 * Note: not SMP coherent. 1969 */ 1970PMAP_INLINE void 1971pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1972{ 1973 pt_entry_t *pte; 1974 1975 pte = vtopte(va); 1976 pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G); 1977} 1978 1979static __inline void 1980pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1981{ 1982 pt_entry_t *pte; 1983 int cache_bits; 1984 1985 pte = vtopte(va); 1986 cache_bits = pmap_cache_bits(kernel_pmap, mode, 0); 1987 pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G | cache_bits); 1988} 1989 1990/* 1991 * Remove a page from the kernel pagetables. 1992 * Note: not SMP coherent. 1993 */ 1994PMAP_INLINE void 1995pmap_kremove(vm_offset_t va) 1996{ 1997 pt_entry_t *pte; 1998 1999 pte = vtopte(va); 2000 pte_clear(pte); 2001} 2002 2003/* 2004 * Used to map a range of physical addresses into kernel 2005 * virtual address space. 2006 * 2007 * The value passed in '*virt' is a suggested virtual address for 2008 * the mapping. Architectures which can support a direct-mapped 2009 * physical to virtual region can return the appropriate address 2010 * within that region, leaving '*virt' unchanged. Other 2011 * architectures should map the pages starting at '*virt' and 2012 * update '*virt' with the first usable address after the mapped 2013 * region. 2014 */ 2015vm_offset_t 2016pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 2017{ 2018 return PHYS_TO_DMAP(start); 2019} 2020 2021 2022/* 2023 * Add a list of wired pages to the kva 2024 * this routine is only used for temporary 2025 * kernel mappings that do not need to have 2026 * page modification or references recorded. 2027 * Note that old mappings are simply written 2028 * over. The page *must* be wired. 2029 * Note: SMP coherent. Uses a ranged shootdown IPI. 2030 */ 2031void 2032pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 2033{ 2034 pt_entry_t *endpte, oldpte, pa, *pte; 2035 vm_page_t m; 2036 int cache_bits; 2037 2038 oldpte = 0; 2039 pte = vtopte(sva); 2040 endpte = pte + count; 2041 while (pte < endpte) { 2042 m = *ma++; 2043 cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 2044 pa = VM_PAGE_TO_PHYS(m) | cache_bits; 2045 if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) { 2046 oldpte |= *pte; 2047 pte_store(pte, pa | X86_PG_G | X86_PG_RW | X86_PG_V); 2048 } 2049 pte++; 2050 } 2051 if (__predict_false((oldpte & X86_PG_V) != 0)) 2052 pmap_invalidate_range(kernel_pmap, sva, sva + count * 2053 PAGE_SIZE); 2054} 2055 2056/* 2057 * This routine tears out page mappings from the 2058 * kernel -- it is meant only for temporary mappings. 2059 * Note: SMP coherent. Uses a ranged shootdown IPI. 2060 */ 2061void 2062pmap_qremove(vm_offset_t sva, int count) 2063{ 2064 vm_offset_t va; 2065 2066 va = sva; 2067 while (count-- > 0) { 2068 KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va)); 2069 pmap_kremove(va); 2070 va += PAGE_SIZE; 2071 } 2072 pmap_invalidate_range(kernel_pmap, sva, va); 2073} 2074 2075/*************************************************** 2076 * Page table page management routines..... 2077 ***************************************************/ 2078static __inline void 2079pmap_free_zero_pages(struct spglist *free) 2080{ 2081 vm_page_t m; 2082 2083 while ((m = SLIST_FIRST(free)) != NULL) { 2084 SLIST_REMOVE_HEAD(free, plinks.s.ss); 2085 /* Preserve the page's PG_ZERO setting. */ 2086 vm_page_free_toq(m); 2087 } 2088} 2089 2090/* 2091 * Schedule the specified unused page table page to be freed. Specifically, 2092 * add the page to the specified list of pages that will be released to the 2093 * physical memory manager after the TLB has been updated. 2094 */ 2095static __inline void 2096pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 2097 boolean_t set_PG_ZERO) 2098{ 2099 2100 if (set_PG_ZERO) 2101 m->flags |= PG_ZERO; 2102 else 2103 m->flags &= ~PG_ZERO; 2104 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 2105} 2106 2107/* 2108 * Inserts the specified page table page into the specified pmap's collection 2109 * of idle page table pages. Each of a pmap's page table pages is responsible 2110 * for mapping a distinct range of virtual addresses. The pmap's collection is 2111 * ordered by this virtual address range. 2112 */ 2113static __inline int 2114pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) 2115{ 2116 2117 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2118 return (vm_radix_insert(&pmap->pm_root, mpte)); 2119} 2120 2121/* 2122 * Looks for a page table page mapping the specified virtual address in the 2123 * specified pmap's collection of idle page table pages. Returns NULL if there 2124 * is no page table page corresponding to the specified virtual address. 2125 */ 2126static __inline vm_page_t 2127pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va) 2128{ 2129 2130 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2131 return (vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(va))); 2132} 2133 2134/* 2135 * Removes the specified page table page from the specified pmap's collection 2136 * of idle page table pages. The specified page table page must be a member of 2137 * the pmap's collection. 2138 */ 2139static __inline void 2140pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte) 2141{ 2142 2143 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2144 vm_radix_remove(&pmap->pm_root, mpte->pindex); 2145} 2146 2147/* 2148 * Decrements a page table page's wire count, which is used to record the 2149 * number of valid page table entries within the page. If the wire count 2150 * drops to zero, then the page table page is unmapped. Returns TRUE if the 2151 * page table page was unmapped and FALSE otherwise. 2152 */ 2153static inline boolean_t 2154pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 2155{ 2156 2157 --m->wire_count; 2158 if (m->wire_count == 0) { 2159 _pmap_unwire_ptp(pmap, va, m, free); 2160 return (TRUE); 2161 } else 2162 return (FALSE); 2163} 2164 2165static void 2166_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 2167{ 2168 2169 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2170 /* 2171 * unmap the page table page 2172 */ 2173 if (m->pindex >= (NUPDE + NUPDPE)) { 2174 /* PDP page */ 2175 pml4_entry_t *pml4; 2176 pml4 = pmap_pml4e(pmap, va); 2177 *pml4 = 0; 2178 } else if (m->pindex >= NUPDE) { 2179 /* PD page */ 2180 pdp_entry_t *pdp; 2181 pdp = pmap_pdpe(pmap, va); 2182 *pdp = 0; 2183 } else { 2184 /* PTE page */ 2185 pd_entry_t *pd; 2186 pd = pmap_pde(pmap, va); 2187 *pd = 0; 2188 } 2189 pmap_resident_count_dec(pmap, 1); 2190 if (m->pindex < NUPDE) { 2191 /* We just released a PT, unhold the matching PD */ 2192 vm_page_t pdpg; 2193 2194 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME); 2195 pmap_unwire_ptp(pmap, va, pdpg, free); 2196 } 2197 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) { 2198 /* We just released a PD, unhold the matching PDP */ 2199 vm_page_t pdppg; 2200 2201 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME); 2202 pmap_unwire_ptp(pmap, va, pdppg, free); 2203 } 2204 2205 /* 2206 * This is a release store so that the ordinary store unmapping 2207 * the page table page is globally performed before TLB shoot- 2208 * down is begun. 2209 */ 2210 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 2211 2212 /* 2213 * Put page on a list so that it is released after 2214 * *ALL* TLB shootdown is done 2215 */ 2216 pmap_add_delayed_free_list(m, free, TRUE); 2217} 2218 2219/* 2220 * After removing a page table entry, this routine is used to 2221 * conditionally free the page, and manage the hold/wire counts. 2222 */ 2223static int 2224pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, 2225 struct spglist *free) 2226{ 2227 vm_page_t mpte; 2228 2229 if (va >= VM_MAXUSER_ADDRESS) 2230 return (0); 2231 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); 2232 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 2233 return (pmap_unwire_ptp(pmap, va, mpte, free)); 2234} 2235 2236void 2237pmap_pinit0(pmap_t pmap) 2238{ 2239 2240 PMAP_LOCK_INIT(pmap); 2241 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys); 2242 pmap->pm_cr3 = KPML4phys; 2243 pmap->pm_root.rt_root = 0; 2244 CPU_ZERO(&pmap->pm_active); 2245 CPU_ZERO(&pmap->pm_save); 2246 PCPU_SET(curpmap, pmap); 2247 TAILQ_INIT(&pmap->pm_pvchunk); 2248 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2249 pmap->pm_pcid = pmap_pcid_enabled ? 0 : -1; 2250 pmap->pm_flags = pmap_flags; 2251} 2252 2253/* 2254 * Initialize a preallocated and zeroed pmap structure, 2255 * such as one in a vmspace structure. 2256 */ 2257int 2258pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) 2259{ 2260 vm_page_t pml4pg; 2261 vm_paddr_t pml4phys; 2262 int i; 2263 2264 /* 2265 * allocate the page directory page 2266 */ 2267 while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2268 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) 2269 VM_WAIT; 2270 2271 pml4phys = VM_PAGE_TO_PHYS(pml4pg); 2272 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys); 2273 pmap->pm_pcid = -1; 2274 pmap->pm_cr3 = ~0; /* initialize to an invalid value */ 2275 2276 if ((pml4pg->flags & PG_ZERO) == 0) 2277 pagezero(pmap->pm_pml4); 2278 2279 /* 2280 * Do not install the host kernel mappings in the nested page 2281 * tables. These mappings are meaningless in the guest physical 2282 * address space. 2283 */ 2284 if ((pmap->pm_type = pm_type) == PT_X86) { 2285 pmap->pm_cr3 = pml4phys; 2286 2287 /* Wire in kernel global address entries. */ 2288 for (i = 0; i < NKPML4E; i++) { 2289 pmap->pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | 2290 X86_PG_RW | X86_PG_V | PG_U; 2291 } 2292 for (i = 0; i < ndmpdpphys; i++) { 2293 pmap->pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | 2294 X86_PG_RW | X86_PG_V | PG_U; 2295 } 2296 2297 /* install self-referential address mapping entry(s) */ 2298 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | 2299 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; 2300 2301 if (pmap_pcid_enabled) { 2302 pmap->pm_pcid = alloc_unr(&pcid_unr); 2303 if (pmap->pm_pcid != -1) 2304 pmap->pm_cr3 |= pmap->pm_pcid; 2305 } 2306 } 2307 2308 pmap->pm_root.rt_root = 0; 2309 CPU_ZERO(&pmap->pm_active); 2310 TAILQ_INIT(&pmap->pm_pvchunk); 2311 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2312 pmap->pm_flags = flags; 2313 pmap->pm_eptgen = 0; 2314 CPU_ZERO(&pmap->pm_save); 2315 2316 return (1); 2317} 2318 2319int 2320pmap_pinit(pmap_t pmap) 2321{ 2322 2323 return (pmap_pinit_type(pmap, PT_X86, pmap_flags)); 2324} 2325 2326/* 2327 * This routine is called if the desired page table page does not exist. 2328 * 2329 * If page table page allocation fails, this routine may sleep before 2330 * returning NULL. It sleeps only if a lock pointer was given. 2331 * 2332 * Note: If a page allocation fails at page table level two or three, 2333 * one or two pages may be held during the wait, only to be released 2334 * afterwards. This conservative approach is easily argued to avoid 2335 * race conditions. 2336 */ 2337static vm_page_t 2338_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) 2339{ 2340 vm_page_t m, pdppg, pdpg; 2341 pt_entry_t PG_A, PG_M, PG_RW, PG_V; 2342 2343 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2344 2345 PG_A = pmap_accessed_bit(pmap); 2346 PG_M = pmap_modified_bit(pmap); 2347 PG_V = pmap_valid_bit(pmap); 2348 PG_RW = pmap_rw_bit(pmap); 2349 2350 /* 2351 * Allocate a page table page. 2352 */ 2353 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 2354 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 2355 if (lockp != NULL) { 2356 RELEASE_PV_LIST_LOCK(lockp); 2357 PMAP_UNLOCK(pmap); 2358 rw_runlock(&pvh_global_lock); 2359 VM_WAIT; 2360 rw_rlock(&pvh_global_lock); 2361 PMAP_LOCK(pmap); 2362 } 2363 2364 /* 2365 * Indicate the need to retry. While waiting, the page table 2366 * page may have been allocated. 2367 */ 2368 return (NULL); 2369 } 2370 if ((m->flags & PG_ZERO) == 0) 2371 pmap_zero_page(m); 2372 2373 /* 2374 * Map the pagetable page into the process address space, if 2375 * it isn't already there. 2376 */ 2377 2378 if (ptepindex >= (NUPDE + NUPDPE)) { 2379 pml4_entry_t *pml4; 2380 vm_pindex_t pml4index; 2381 2382 /* Wire up a new PDPE page */ 2383 pml4index = ptepindex - (NUPDE + NUPDPE); 2384 pml4 = &pmap->pm_pml4[pml4index]; 2385 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; 2386 2387 } else if (ptepindex >= NUPDE) { 2388 vm_pindex_t pml4index; 2389 vm_pindex_t pdpindex; 2390 pml4_entry_t *pml4; 2391 pdp_entry_t *pdp; 2392 2393 /* Wire up a new PDE page */ 2394 pdpindex = ptepindex - NUPDE; 2395 pml4index = pdpindex >> NPML4EPGSHIFT; 2396 2397 pml4 = &pmap->pm_pml4[pml4index]; 2398 if ((*pml4 & PG_V) == 0) { 2399 /* Have to allocate a new pdp, recurse */ 2400 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index, 2401 lockp) == NULL) { 2402 --m->wire_count; 2403 atomic_subtract_int(&cnt.v_wire_count, 1); 2404 vm_page_free_zero(m); 2405 return (NULL); 2406 } 2407 } else { 2408 /* Add reference to pdp page */ 2409 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME); 2410 pdppg->wire_count++; 2411 } 2412 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); 2413 2414 /* Now find the pdp page */ 2415 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; 2416 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; 2417 2418 } else { 2419 vm_pindex_t pml4index; 2420 vm_pindex_t pdpindex; 2421 pml4_entry_t *pml4; 2422 pdp_entry_t *pdp; 2423 pd_entry_t *pd; 2424 2425 /* Wire up a new PTE page */ 2426 pdpindex = ptepindex >> NPDPEPGSHIFT; 2427 pml4index = pdpindex >> NPML4EPGSHIFT; 2428 2429 /* First, find the pdp and check that its valid. */ 2430 pml4 = &pmap->pm_pml4[pml4index]; 2431 if ((*pml4 & PG_V) == 0) { 2432 /* Have to allocate a new pd, recurse */ 2433 if (_pmap_allocpte(pmap, NUPDE + pdpindex, 2434 lockp) == NULL) { 2435 --m->wire_count; 2436 atomic_subtract_int(&cnt.v_wire_count, 1); 2437 vm_page_free_zero(m); 2438 return (NULL); 2439 } 2440 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); 2441 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; 2442 } else { 2443 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); 2444 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; 2445 if ((*pdp & PG_V) == 0) { 2446 /* Have to allocate a new pd, recurse */ 2447 if (_pmap_allocpte(pmap, NUPDE + pdpindex, 2448 lockp) == NULL) { 2449 --m->wire_count; 2450 atomic_subtract_int(&cnt.v_wire_count, 2451 1); 2452 vm_page_free_zero(m); 2453 return (NULL); 2454 } 2455 } else { 2456 /* Add reference to the pd page */ 2457 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME); 2458 pdpg->wire_count++; 2459 } 2460 } 2461 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME); 2462 2463 /* Now we know where the page directory page is */ 2464 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)]; 2465 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; 2466 } 2467 2468 pmap_resident_count_inc(pmap, 1); 2469 2470 return (m); 2471} 2472 2473static vm_page_t 2474pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) 2475{ 2476 vm_pindex_t pdpindex, ptepindex; 2477 pdp_entry_t *pdpe, PG_V; 2478 vm_page_t pdpg; 2479 2480 PG_V = pmap_valid_bit(pmap); 2481 2482retry: 2483 pdpe = pmap_pdpe(pmap, va); 2484 if (pdpe != NULL && (*pdpe & PG_V) != 0) { 2485 /* Add a reference to the pd page. */ 2486 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME); 2487 pdpg->wire_count++; 2488 } else { 2489 /* Allocate a pd page. */ 2490 ptepindex = pmap_pde_pindex(va); 2491 pdpindex = ptepindex >> NPDPEPGSHIFT; 2492 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp); 2493 if (pdpg == NULL && lockp != NULL) 2494 goto retry; 2495 } 2496 return (pdpg); 2497} 2498 2499static vm_page_t 2500pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) 2501{ 2502 vm_pindex_t ptepindex; 2503 pd_entry_t *pd, PG_V; 2504 vm_page_t m; 2505 2506 PG_V = pmap_valid_bit(pmap); 2507 2508 /* 2509 * Calculate pagetable page index 2510 */ 2511 ptepindex = pmap_pde_pindex(va); 2512retry: 2513 /* 2514 * Get the page directory entry 2515 */ 2516 pd = pmap_pde(pmap, va); 2517 2518 /* 2519 * This supports switching from a 2MB page to a 2520 * normal 4K page. 2521 */ 2522 if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) { 2523 if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) { 2524 /* 2525 * Invalidation of the 2MB page mapping may have caused 2526 * the deallocation of the underlying PD page. 2527 */ 2528 pd = NULL; 2529 } 2530 } 2531 2532 /* 2533 * If the page table page is mapped, we just increment the 2534 * hold count, and activate it. 2535 */ 2536 if (pd != NULL && (*pd & PG_V) != 0) { 2537 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME); 2538 m->wire_count++; 2539 } else { 2540 /* 2541 * Here if the pte page isn't mapped, or if it has been 2542 * deallocated. 2543 */ 2544 m = _pmap_allocpte(pmap, ptepindex, lockp); 2545 if (m == NULL && lockp != NULL) 2546 goto retry; 2547 } 2548 return (m); 2549} 2550 2551 2552/*************************************************** 2553 * Pmap allocation/deallocation routines. 2554 ***************************************************/ 2555 2556/* 2557 * Release any resources held by the given physical map. 2558 * Called when a pmap initialized by pmap_pinit is being released. 2559 * Should only be called if the map contains no valid mappings. 2560 */ 2561void 2562pmap_release(pmap_t pmap) 2563{ 2564 vm_page_t m; 2565 int i; 2566 2567 KASSERT(pmap->pm_stats.resident_count == 0, 2568 ("pmap_release: pmap resident count %ld != 0", 2569 pmap->pm_stats.resident_count)); 2570 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2571 ("pmap_release: pmap has reserved page table page(s)")); 2572 2573 if (pmap_pcid_enabled) { 2574 /* 2575 * Invalidate any left TLB entries, to allow the reuse 2576 * of the pcid. 2577 */ 2578 pmap_invalidate_all(pmap); 2579 } 2580 2581 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4)); 2582 2583 for (i = 0; i < NKPML4E; i++) /* KVA */ 2584 pmap->pm_pml4[KPML4BASE + i] = 0; 2585 for (i = 0; i < ndmpdpphys; i++)/* Direct Map */ 2586 pmap->pm_pml4[DMPML4I + i] = 0; 2587 pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */ 2588 2589 m->wire_count--; 2590 atomic_subtract_int(&cnt.v_wire_count, 1); 2591 vm_page_free_zero(m); 2592 if (pmap->pm_pcid != -1) 2593 free_unr(&pcid_unr, pmap->pm_pcid); 2594} 2595 2596static int 2597kvm_size(SYSCTL_HANDLER_ARGS) 2598{ 2599 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 2600 2601 return sysctl_handle_long(oidp, &ksize, 0, req); 2602} 2603SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 2604 0, 0, kvm_size, "LU", "Size of KVM"); 2605 2606static int 2607kvm_free(SYSCTL_HANDLER_ARGS) 2608{ 2609 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 2610 2611 return sysctl_handle_long(oidp, &kfree, 0, req); 2612} 2613SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 2614 0, 0, kvm_free, "LU", "Amount of KVM free"); 2615 2616/* 2617 * grow the number of kernel page table entries, if needed 2618 */ 2619void 2620pmap_growkernel(vm_offset_t addr) 2621{ 2622 vm_paddr_t paddr; 2623 vm_page_t nkpg; 2624 pd_entry_t *pde, newpdir; 2625 pdp_entry_t *pdpe; 2626 2627 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2628 2629 /* 2630 * Return if "addr" is within the range of kernel page table pages 2631 * that were preallocated during pmap bootstrap. Moreover, leave 2632 * "kernel_vm_end" and the kernel page table as they were. 2633 * 2634 * The correctness of this action is based on the following 2635 * argument: vm_map_insert() allocates contiguous ranges of the 2636 * kernel virtual address space. It calls this function if a range 2637 * ends after "kernel_vm_end". If the kernel is mapped between 2638 * "kernel_vm_end" and "addr", then the range cannot begin at 2639 * "kernel_vm_end". In fact, its beginning address cannot be less 2640 * than the kernel. Thus, there is no immediate need to allocate 2641 * any new kernel page table pages between "kernel_vm_end" and 2642 * "KERNBASE". 2643 */ 2644 if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR) 2645 return; 2646 2647 addr = roundup2(addr, NBPDR); 2648 if (addr - 1 >= kernel_map->max_offset) 2649 addr = kernel_map->max_offset; 2650 while (kernel_vm_end < addr) { 2651 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end); 2652 if ((*pdpe & X86_PG_V) == 0) { 2653 /* We need a new PDP entry */ 2654 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT, 2655 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 2656 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2657 if (nkpg == NULL) 2658 panic("pmap_growkernel: no memory to grow kernel"); 2659 if ((nkpg->flags & PG_ZERO) == 0) 2660 pmap_zero_page(nkpg); 2661 paddr = VM_PAGE_TO_PHYS(nkpg); 2662 *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW | 2663 X86_PG_A | X86_PG_M); 2664 continue; /* try again */ 2665 } 2666 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); 2667 if ((*pde & X86_PG_V) != 0) { 2668 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2669 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 2670 kernel_vm_end = kernel_map->max_offset; 2671 break; 2672 } 2673 continue; 2674 } 2675 2676 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end), 2677 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 2678 VM_ALLOC_ZERO); 2679 if (nkpg == NULL) 2680 panic("pmap_growkernel: no memory to grow kernel"); 2681 if ((nkpg->flags & PG_ZERO) == 0) 2682 pmap_zero_page(nkpg); 2683 paddr = VM_PAGE_TO_PHYS(nkpg); 2684 newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; 2685 pde_store(pde, newpdir); 2686 2687 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2688 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 2689 kernel_vm_end = kernel_map->max_offset; 2690 break; 2691 } 2692 } 2693} 2694 2695 2696/*************************************************** 2697 * page management routines. 2698 ***************************************************/ 2699 2700CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 2701CTASSERT(_NPCM == 3); 2702CTASSERT(_NPCPV == 168); 2703 2704static __inline struct pv_chunk * 2705pv_to_chunk(pv_entry_t pv) 2706{ 2707 2708 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 2709} 2710 2711#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2712 2713#define PC_FREE0 0xfffffffffffffffful 2714#define PC_FREE1 0xfffffffffffffffful 2715#define PC_FREE2 0x000000fffffffffful 2716 2717static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; 2718 2719#ifdef PV_STATS 2720static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2721 2722SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 2723 "Current number of pv entry chunks"); 2724SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 2725 "Current number of pv entry chunks allocated"); 2726SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 2727 "Current number of pv entry chunks frees"); 2728SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 2729 "Number of times tried to get a chunk page but failed."); 2730 2731static long pv_entry_frees, pv_entry_allocs, pv_entry_count; 2732static int pv_entry_spare; 2733 2734SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 2735 "Current number of pv entry frees"); 2736SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 2737 "Current number of pv entry allocs"); 2738SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 2739 "Current number of pv entries"); 2740SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 2741 "Current number of spare pv entries"); 2742#endif 2743 2744/* 2745 * We are in a serious low memory condition. Resort to 2746 * drastic measures to free some pages so we can allocate 2747 * another pv entry chunk. 2748 * 2749 * Returns NULL if PV entries were reclaimed from the specified pmap. 2750 * 2751 * We do not, however, unmap 2mpages because subsequent accesses will 2752 * allocate per-page pv entries until repromotion occurs, thereby 2753 * exacerbating the shortage of free pv entries. 2754 */ 2755static vm_page_t 2756reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) 2757{ 2758 struct pch new_tail; 2759 struct pv_chunk *pc; 2760 struct md_page *pvh; 2761 pd_entry_t *pde; 2762 pmap_t pmap; 2763 pt_entry_t *pte, tpte; 2764 pt_entry_t PG_G, PG_A, PG_M, PG_RW; 2765 pv_entry_t pv; 2766 vm_offset_t va; 2767 vm_page_t m, m_pc; 2768 struct spglist free; 2769 uint64_t inuse; 2770 int bit, field, freed; 2771 2772 rw_assert(&pvh_global_lock, RA_LOCKED); 2773 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2774 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); 2775 pmap = NULL; 2776 m_pc = NULL; 2777 PG_G = PG_A = PG_M = PG_RW = 0; 2778 SLIST_INIT(&free); 2779 TAILQ_INIT(&new_tail); 2780 mtx_lock(&pv_chunks_mutex); 2781 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) { 2782 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2783 mtx_unlock(&pv_chunks_mutex); 2784 if (pmap != pc->pc_pmap) { 2785 if (pmap != NULL) { 2786 pmap_invalidate_all(pmap); 2787 if (pmap != locked_pmap) 2788 PMAP_UNLOCK(pmap); 2789 } 2790 pmap = pc->pc_pmap; 2791 /* Avoid deadlock and lock recursion. */ 2792 if (pmap > locked_pmap) { 2793 RELEASE_PV_LIST_LOCK(lockp); 2794 PMAP_LOCK(pmap); 2795 } else if (pmap != locked_pmap && 2796 !PMAP_TRYLOCK(pmap)) { 2797 pmap = NULL; 2798 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); 2799 mtx_lock(&pv_chunks_mutex); 2800 continue; 2801 } 2802 PG_G = pmap_global_bit(pmap); 2803 PG_A = pmap_accessed_bit(pmap); 2804 PG_M = pmap_modified_bit(pmap); 2805 PG_RW = pmap_rw_bit(pmap); 2806 } 2807 2808 /* 2809 * Destroy every non-wired, 4 KB page mapping in the chunk. 2810 */ 2811 freed = 0; 2812 for (field = 0; field < _NPCM; field++) { 2813 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2814 inuse != 0; inuse &= ~(1UL << bit)) { 2815 bit = bsfq(inuse); 2816 pv = &pc->pc_pventry[field * 64 + bit]; 2817 va = pv->pv_va; 2818 pde = pmap_pde(pmap, va); 2819 if ((*pde & PG_PS) != 0) 2820 continue; 2821 pte = pmap_pde_to_pte(pde, va); 2822 if ((*pte & PG_W) != 0) 2823 continue; 2824 tpte = pte_load_clear(pte); 2825 if ((tpte & PG_G) != 0) 2826 pmap_invalidate_page(pmap, va); 2827 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2828 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2829 vm_page_dirty(m); 2830 if ((tpte & PG_A) != 0) 2831 vm_page_aflag_set(m, PGA_REFERENCED); 2832 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 2833 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2834 m->md.pv_gen++; 2835 if (TAILQ_EMPTY(&m->md.pv_list) && 2836 (m->flags & PG_FICTITIOUS) == 0) { 2837 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2838 if (TAILQ_EMPTY(&pvh->pv_list)) { 2839 vm_page_aflag_clear(m, 2840 PGA_WRITEABLE); 2841 } 2842 } 2843 pc->pc_map[field] |= 1UL << bit; 2844 pmap_unuse_pt(pmap, va, *pde, &free); 2845 freed++; 2846 } 2847 } 2848 if (freed == 0) { 2849 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); 2850 mtx_lock(&pv_chunks_mutex); 2851 continue; 2852 } 2853 /* Every freed mapping is for a 4 KB page. */ 2854 pmap_resident_count_dec(pmap, freed); 2855 PV_STAT(atomic_add_long(&pv_entry_frees, freed)); 2856 PV_STAT(atomic_add_int(&pv_entry_spare, freed)); 2857 PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); 2858 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2859 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 && 2860 pc->pc_map[2] == PC_FREE2) { 2861 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); 2862 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); 2863 PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); 2864 /* Entire chunk is free; return it. */ 2865 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); 2866 dump_drop_page(m_pc->phys_addr); 2867 mtx_lock(&pv_chunks_mutex); 2868 break; 2869 } 2870 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2871 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); 2872 mtx_lock(&pv_chunks_mutex); 2873 /* One freed pv entry in locked_pmap is sufficient. */ 2874 if (pmap == locked_pmap) 2875 break; 2876 } 2877 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); 2878 mtx_unlock(&pv_chunks_mutex); 2879 if (pmap != NULL) { 2880 pmap_invalidate_all(pmap); 2881 if (pmap != locked_pmap) 2882 PMAP_UNLOCK(pmap); 2883 } 2884 if (m_pc == NULL && !SLIST_EMPTY(&free)) { 2885 m_pc = SLIST_FIRST(&free); 2886 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2887 /* Recycle a freed page table page. */ 2888 m_pc->wire_count = 1; 2889 atomic_add_int(&cnt.v_wire_count, 1); 2890 } 2891 pmap_free_zero_pages(&free); 2892 return (m_pc); 2893} 2894 2895/* 2896 * free the pv_entry back to the free list 2897 */ 2898static void 2899free_pv_entry(pmap_t pmap, pv_entry_t pv) 2900{ 2901 struct pv_chunk *pc; 2902 int idx, field, bit; 2903 2904 rw_assert(&pvh_global_lock, RA_LOCKED); 2905 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2906 PV_STAT(atomic_add_long(&pv_entry_frees, 1)); 2907 PV_STAT(atomic_add_int(&pv_entry_spare, 1)); 2908 PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); 2909 pc = pv_to_chunk(pv); 2910 idx = pv - &pc->pc_pventry[0]; 2911 field = idx / 64; 2912 bit = idx % 64; 2913 pc->pc_map[field] |= 1ul << bit; 2914 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || 2915 pc->pc_map[2] != PC_FREE2) { 2916 /* 98% of the time, pc is already at the head of the list. */ 2917 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { 2918 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2919 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2920 } 2921 return; 2922 } 2923 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2924 free_pv_chunk(pc); 2925} 2926 2927static void 2928free_pv_chunk(struct pv_chunk *pc) 2929{ 2930 vm_page_t m; 2931 2932 mtx_lock(&pv_chunks_mutex); 2933 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2934 mtx_unlock(&pv_chunks_mutex); 2935 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); 2936 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); 2937 PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); 2938 /* entire chunk is free, return it */ 2939 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); 2940 dump_drop_page(m->phys_addr); 2941 vm_page_unwire(m, 0); 2942 vm_page_free(m); 2943} 2944 2945/* 2946 * Returns a new PV entry, allocating a new PV chunk from the system when 2947 * needed. If this PV chunk allocation fails and a PV list lock pointer was 2948 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is 2949 * returned. 2950 * 2951 * The given PV list lock may be released. 2952 */ 2953static pv_entry_t 2954get_pv_entry(pmap_t pmap, struct rwlock **lockp) 2955{ 2956 int bit, field; 2957 pv_entry_t pv; 2958 struct pv_chunk *pc; 2959 vm_page_t m; 2960 2961 rw_assert(&pvh_global_lock, RA_LOCKED); 2962 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2963 PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); 2964retry: 2965 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2966 if (pc != NULL) { 2967 for (field = 0; field < _NPCM; field++) { 2968 if (pc->pc_map[field]) { 2969 bit = bsfq(pc->pc_map[field]); 2970 break; 2971 } 2972 } 2973 if (field < _NPCM) { 2974 pv = &pc->pc_pventry[field * 64 + bit]; 2975 pc->pc_map[field] &= ~(1ul << bit); 2976 /* If this was the last item, move it to tail */ 2977 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && 2978 pc->pc_map[2] == 0) { 2979 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2980 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, 2981 pc_list); 2982 } 2983 PV_STAT(atomic_add_long(&pv_entry_count, 1)); 2984 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); 2985 return (pv); 2986 } 2987 } 2988 /* No free items, allocate another chunk */ 2989 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 2990 VM_ALLOC_WIRED); 2991 if (m == NULL) { 2992 if (lockp == NULL) { 2993 PV_STAT(pc_chunk_tryfail++); 2994 return (NULL); 2995 } 2996 m = reclaim_pv_chunk(pmap, lockp); 2997 if (m == NULL) 2998 goto retry; 2999 } 3000 PV_STAT(atomic_add_int(&pc_chunk_count, 1)); 3001 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); 3002 dump_add_page(m->phys_addr); 3003 pc = (void *)PHYS_TO_DMAP(m->phys_addr); 3004 pc->pc_pmap = pmap; 3005 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ 3006 pc->pc_map[1] = PC_FREE1; 3007 pc->pc_map[2] = PC_FREE2; 3008 mtx_lock(&pv_chunks_mutex); 3009 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 3010 mtx_unlock(&pv_chunks_mutex); 3011 pv = &pc->pc_pventry[0]; 3012 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 3013 PV_STAT(atomic_add_long(&pv_entry_count, 1)); 3014 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); 3015 return (pv); 3016} 3017 3018/* 3019 * Returns the number of one bits within the given PV chunk map element. 3020 */ 3021static int 3022popcnt_pc_map_elem(uint64_t elem) 3023{ 3024 int count; 3025 3026 /* 3027 * This simple method of counting the one bits performs well because 3028 * the given element typically contains more zero bits than one bits. 3029 */ 3030 count = 0; 3031 for (; elem != 0; elem &= elem - 1) 3032 count++; 3033 return (count); 3034} 3035 3036/* 3037 * Ensure that the number of spare PV entries in the specified pmap meets or 3038 * exceeds the given count, "needed". 3039 * 3040 * The given PV list lock may be released. 3041 */ 3042static void 3043reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) 3044{ 3045 struct pch new_tail; 3046 struct pv_chunk *pc; 3047 int avail, free; 3048 vm_page_t m; 3049 3050 rw_assert(&pvh_global_lock, RA_LOCKED); 3051 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3052 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); 3053 3054 /* 3055 * Newly allocated PV chunks must be stored in a private list until 3056 * the required number of PV chunks have been allocated. Otherwise, 3057 * reclaim_pv_chunk() could recycle one of these chunks. In 3058 * contrast, these chunks must be added to the pmap upon allocation. 3059 */ 3060 TAILQ_INIT(&new_tail); 3061retry: 3062 avail = 0; 3063 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) { 3064 if ((cpu_feature2 & CPUID2_POPCNT) == 0) { 3065 free = popcnt_pc_map_elem(pc->pc_map[0]); 3066 free += popcnt_pc_map_elem(pc->pc_map[1]); 3067 free += popcnt_pc_map_elem(pc->pc_map[2]); 3068 } else { 3069 free = popcntq(pc->pc_map[0]); 3070 free += popcntq(pc->pc_map[1]); 3071 free += popcntq(pc->pc_map[2]); 3072 } 3073 if (free == 0) 3074 break; 3075 avail += free; 3076 if (avail >= needed) 3077 break; 3078 } 3079 for (; avail < needed; avail += _NPCPV) { 3080 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 3081 VM_ALLOC_WIRED); 3082 if (m == NULL) { 3083 m = reclaim_pv_chunk(pmap, lockp); 3084 if (m == NULL) 3085 goto retry; 3086 } 3087 PV_STAT(atomic_add_int(&pc_chunk_count, 1)); 3088 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); 3089 dump_add_page(m->phys_addr); 3090 pc = (void *)PHYS_TO_DMAP(m->phys_addr); 3091 pc->pc_pmap = pmap; 3092 pc->pc_map[0] = PC_FREE0; 3093 pc->pc_map[1] = PC_FREE1; 3094 pc->pc_map[2] = PC_FREE2; 3095 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 3096 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); 3097 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); 3098 } 3099 if (!TAILQ_EMPTY(&new_tail)) { 3100 mtx_lock(&pv_chunks_mutex); 3101 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); 3102 mtx_unlock(&pv_chunks_mutex); 3103 } 3104} 3105 3106/* 3107 * First find and then remove the pv entry for the specified pmap and virtual 3108 * address from the specified pv list. Returns the pv entry if found and NULL 3109 * otherwise. This operation can be performed on pv lists for either 4KB or 3110 * 2MB page mappings. 3111 */ 3112static __inline pv_entry_t 3113pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3114{ 3115 pv_entry_t pv; 3116 3117 rw_assert(&pvh_global_lock, RA_LOCKED); 3118 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 3119 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 3120 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 3121 pvh->pv_gen++; 3122 break; 3123 } 3124 } 3125 return (pv); 3126} 3127 3128/* 3129 * After demotion from a 2MB page mapping to 512 4KB page mappings, 3130 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv 3131 * entries for each of the 4KB page mappings. 3132 */ 3133static void 3134pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 3135 struct rwlock **lockp) 3136{ 3137 struct md_page *pvh; 3138 struct pv_chunk *pc; 3139 pv_entry_t pv; 3140 vm_offset_t va_last; 3141 vm_page_t m; 3142 int bit, field; 3143 3144 rw_assert(&pvh_global_lock, RA_LOCKED); 3145 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3146 KASSERT((pa & PDRMASK) == 0, 3147 ("pmap_pv_demote_pde: pa is not 2mpage aligned")); 3148 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 3149 3150 /* 3151 * Transfer the 2mpage's pv entry for this mapping to the first 3152 * page's pv list. Once this transfer begins, the pv list lock 3153 * must not be released until the last pv entry is reinstantiated. 3154 */ 3155 pvh = pa_to_pvh(pa); 3156 va = trunc_2mpage(va); 3157 pv = pmap_pvh_remove(pvh, pmap, va); 3158 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 3159 m = PHYS_TO_VM_PAGE(pa); 3160 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3161 m->md.pv_gen++; 3162 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 3163 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1)); 3164 va_last = va + NBPDR - PAGE_SIZE; 3165 for (;;) { 3166 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 3167 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 || 3168 pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare")); 3169 for (field = 0; field < _NPCM; field++) { 3170 while (pc->pc_map[field]) { 3171 bit = bsfq(pc->pc_map[field]); 3172 pc->pc_map[field] &= ~(1ul << bit); 3173 pv = &pc->pc_pventry[field * 64 + bit]; 3174 va += PAGE_SIZE; 3175 pv->pv_va = va; 3176 m++; 3177 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3178 ("pmap_pv_demote_pde: page %p is not managed", m)); 3179 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3180 m->md.pv_gen++; 3181 if (va == va_last) 3182 goto out; 3183 } 3184 } 3185 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3186 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 3187 } 3188out: 3189 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) { 3190 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3191 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 3192 } 3193 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1)); 3194 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1)); 3195} 3196 3197/* 3198 * After promotion from 512 4KB page mappings to a single 2MB page mapping, 3199 * replace the many pv entries for the 4KB page mappings by a single pv entry 3200 * for the 2MB page mapping. 3201 */ 3202static void 3203pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 3204 struct rwlock **lockp) 3205{ 3206 struct md_page *pvh; 3207 pv_entry_t pv; 3208 vm_offset_t va_last; 3209 vm_page_t m; 3210 3211 rw_assert(&pvh_global_lock, RA_LOCKED); 3212 KASSERT((pa & PDRMASK) == 0, 3213 ("pmap_pv_promote_pde: pa is not 2mpage aligned")); 3214 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 3215 3216 /* 3217 * Transfer the first page's pv entry for this mapping to the 2mpage's 3218 * pv list. Aside from avoiding the cost of a call to get_pv_entry(), 3219 * a transfer avoids the possibility that get_pv_entry() calls 3220 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the 3221 * mappings that is being promoted. 3222 */ 3223 m = PHYS_TO_VM_PAGE(pa); 3224 va = trunc_2mpage(va); 3225 pv = pmap_pvh_remove(&m->md, pmap, va); 3226 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 3227 pvh = pa_to_pvh(pa); 3228 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3229 pvh->pv_gen++; 3230 /* Free the remaining NPTEPG - 1 pv entries. */ 3231 va_last = va + NBPDR - PAGE_SIZE; 3232 do { 3233 m++; 3234 va += PAGE_SIZE; 3235 pmap_pvh_free(&m->md, pmap, va); 3236 } while (va < va_last); 3237} 3238 3239/* 3240 * First find and then destroy the pv entry for the specified pmap and virtual 3241 * address. This operation can be performed on pv lists for either 4KB or 2MB 3242 * page mappings. 3243 */ 3244static void 3245pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3246{ 3247 pv_entry_t pv; 3248 3249 pv = pmap_pvh_remove(pvh, pmap, va); 3250 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 3251 free_pv_entry(pmap, pv); 3252} 3253 3254/* 3255 * Conditionally create the PV entry for a 4KB page mapping if the required 3256 * memory can be allocated without resorting to reclamation. 3257 */ 3258static boolean_t 3259pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, 3260 struct rwlock **lockp) 3261{ 3262 pv_entry_t pv; 3263 3264 rw_assert(&pvh_global_lock, RA_LOCKED); 3265 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3266 /* Pass NULL instead of the lock pointer to disable reclamation. */ 3267 if ((pv = get_pv_entry(pmap, NULL)) != NULL) { 3268 pv->pv_va = va; 3269 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 3270 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3271 m->md.pv_gen++; 3272 return (TRUE); 3273 } else 3274 return (FALSE); 3275} 3276 3277/* 3278 * Conditionally create the PV entry for a 2MB page mapping if the required 3279 * memory can be allocated without resorting to reclamation. 3280 */ 3281static boolean_t 3282pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 3283 struct rwlock **lockp) 3284{ 3285 struct md_page *pvh; 3286 pv_entry_t pv; 3287 3288 rw_assert(&pvh_global_lock, RA_LOCKED); 3289 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3290 /* Pass NULL instead of the lock pointer to disable reclamation. */ 3291 if ((pv = get_pv_entry(pmap, NULL)) != NULL) { 3292 pv->pv_va = va; 3293 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 3294 pvh = pa_to_pvh(pa); 3295 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3296 pvh->pv_gen++; 3297 return (TRUE); 3298 } else 3299 return (FALSE); 3300} 3301 3302/* 3303 * Fills a page table page with mappings to consecutive physical pages. 3304 */ 3305static void 3306pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 3307{ 3308 pt_entry_t *pte; 3309 3310 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 3311 *pte = newpte; 3312 newpte += PAGE_SIZE; 3313 } 3314} 3315 3316/* 3317 * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page 3318 * mapping is invalidated. 3319 */ 3320static boolean_t 3321pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 3322{ 3323 struct rwlock *lock; 3324 boolean_t rv; 3325 3326 lock = NULL; 3327 rv = pmap_demote_pde_locked(pmap, pde, va, &lock); 3328 if (lock != NULL) 3329 rw_wunlock(lock); 3330 return (rv); 3331} 3332 3333static boolean_t 3334pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, 3335 struct rwlock **lockp) 3336{ 3337 pd_entry_t newpde, oldpde; 3338 pt_entry_t *firstpte, newpte; 3339 pt_entry_t PG_A, PG_G, PG_M, PG_RW, PG_V; 3340 vm_paddr_t mptepa; 3341 vm_page_t mpte; 3342 struct spglist free; 3343 int PG_PTE_CACHE; 3344 3345 PG_G = pmap_global_bit(pmap); 3346 PG_A = pmap_accessed_bit(pmap); 3347 PG_M = pmap_modified_bit(pmap); 3348 PG_RW = pmap_rw_bit(pmap); 3349 PG_V = pmap_valid_bit(pmap); 3350 PG_PTE_CACHE = pmap_cache_mask(pmap, 0); 3351 3352 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3353 oldpde = *pde; 3354 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), 3355 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); 3356 if ((oldpde & PG_A) != 0 && (mpte = pmap_lookup_pt_page(pmap, va)) != 3357 NULL) 3358 pmap_remove_pt_page(pmap, mpte); 3359 else { 3360 KASSERT((oldpde & PG_W) == 0, 3361 ("pmap_demote_pde: page table page for a wired mapping" 3362 " is missing")); 3363 3364 /* 3365 * Invalidate the 2MB page mapping and return "failure" if the 3366 * mapping was never accessed or the allocation of the new 3367 * page table page fails. If the 2MB page mapping belongs to 3368 * the direct map region of the kernel's address space, then 3369 * the page allocation request specifies the highest possible 3370 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is 3371 * normal. Page table pages are preallocated for every other 3372 * part of the kernel address space, so the direct map region 3373 * is the only part of the kernel address space that must be 3374 * handled here. 3375 */ 3376 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, 3377 pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va < 3378 DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | 3379 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 3380 SLIST_INIT(&free); 3381 pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free, 3382 lockp); 3383 pmap_invalidate_page(pmap, trunc_2mpage(va)); 3384 pmap_free_zero_pages(&free); 3385 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx" 3386 " in pmap %p", va, pmap); 3387 return (FALSE); 3388 } 3389 if (va < VM_MAXUSER_ADDRESS) 3390 pmap_resident_count_inc(pmap, 1); 3391 } 3392 mptepa = VM_PAGE_TO_PHYS(mpte); 3393 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa); 3394 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; 3395 KASSERT((oldpde & PG_A) != 0, 3396 ("pmap_demote_pde: oldpde is missing PG_A")); 3397 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 3398 ("pmap_demote_pde: oldpde is missing PG_M")); 3399 newpte = oldpde & ~PG_PS; 3400 newpte = pmap_swap_pat(pmap, newpte); 3401 3402 /* 3403 * If the page table page is new, initialize it. 3404 */ 3405 if (mpte->wire_count == 1) { 3406 mpte->wire_count = NPTEPG; 3407 pmap_fill_ptp(firstpte, newpte); 3408 } 3409 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), 3410 ("pmap_demote_pde: firstpte and newpte map different physical" 3411 " addresses")); 3412 3413 /* 3414 * If the mapping has changed attributes, update the page table 3415 * entries. 3416 */ 3417 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) 3418 pmap_fill_ptp(firstpte, newpte); 3419 3420 /* 3421 * The spare PV entries must be reserved prior to demoting the 3422 * mapping, that is, prior to changing the PDE. Otherwise, the state 3423 * of the PDE and the PV lists will be inconsistent, which can result 3424 * in reclaim_pv_chunk() attempting to remove a PV entry from the 3425 * wrong PV list and pmap_pv_demote_pde() failing to find the expected 3426 * PV entry for the 2MB page mapping that is being demoted. 3427 */ 3428 if ((oldpde & PG_MANAGED) != 0) 3429 reserve_pv_entries(pmap, NPTEPG - 1, lockp); 3430 3431 /* 3432 * Demote the mapping. This pmap is locked. The old PDE has 3433 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 3434 * set. Thus, there is no danger of a race with another 3435 * processor changing the setting of PG_A and/or PG_M between 3436 * the read above and the store below. 3437 */ 3438 if (workaround_erratum383) 3439 pmap_update_pde(pmap, va, pde, newpde); 3440 else 3441 pde_store(pde, newpde); 3442 3443 /* 3444 * Invalidate a stale recursive mapping of the page table page. 3445 */ 3446 if (va >= VM_MAXUSER_ADDRESS) 3447 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va)); 3448 3449 /* 3450 * Demote the PV entry. 3451 */ 3452 if ((oldpde & PG_MANAGED) != 0) 3453 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp); 3454 3455 atomic_add_long(&pmap_pde_demotions, 1); 3456 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx" 3457 " in pmap %p", va, pmap); 3458 return (TRUE); 3459} 3460 3461/* 3462 * pmap_remove_kernel_pde: Remove a kernel superpage mapping. 3463 */ 3464static void 3465pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 3466{ 3467 pd_entry_t newpde; 3468 vm_paddr_t mptepa; 3469 vm_page_t mpte; 3470 3471 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap)); 3472 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3473 mpte = pmap_lookup_pt_page(pmap, va); 3474 if (mpte == NULL) 3475 panic("pmap_remove_kernel_pde: Missing pt page."); 3476 3477 pmap_remove_pt_page(pmap, mpte); 3478 mptepa = VM_PAGE_TO_PHYS(mpte); 3479 newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V; 3480 3481 /* 3482 * Initialize the page table page. 3483 */ 3484 pagezero((void *)PHYS_TO_DMAP(mptepa)); 3485 3486 /* 3487 * Demote the mapping. 3488 */ 3489 if (workaround_erratum383) 3490 pmap_update_pde(pmap, va, pde, newpde); 3491 else 3492 pde_store(pde, newpde); 3493 3494 /* 3495 * Invalidate a stale recursive mapping of the page table page. 3496 */ 3497 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va)); 3498} 3499 3500/* 3501 * pmap_remove_pde: do the things to unmap a superpage in a process 3502 */ 3503static int 3504pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 3505 struct spglist *free, struct rwlock **lockp) 3506{ 3507 struct md_page *pvh; 3508 pd_entry_t oldpde; 3509 vm_offset_t eva, va; 3510 vm_page_t m, mpte; 3511 pt_entry_t PG_G, PG_A, PG_M, PG_RW; 3512 3513 PG_G = pmap_global_bit(pmap); 3514 PG_A = pmap_accessed_bit(pmap); 3515 PG_M = pmap_modified_bit(pmap); 3516 PG_RW = pmap_rw_bit(pmap); 3517 3518 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3519 KASSERT((sva & PDRMASK) == 0, 3520 ("pmap_remove_pde: sva is not 2mpage aligned")); 3521 oldpde = pte_load_clear(pdq); 3522 if (oldpde & PG_W) 3523 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; 3524 3525 /* 3526 * Machines that don't support invlpg, also don't support 3527 * PG_G. 3528 */ 3529 if (oldpde & PG_G) 3530 pmap_invalidate_page(kernel_pmap, sva); 3531 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE); 3532 if (oldpde & PG_MANAGED) { 3533 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME); 3534 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 3535 pmap_pvh_free(pvh, pmap, sva); 3536 eva = sva + NBPDR; 3537 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3538 va < eva; va += PAGE_SIZE, m++) { 3539 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3540 vm_page_dirty(m); 3541 if (oldpde & PG_A) 3542 vm_page_aflag_set(m, PGA_REFERENCED); 3543 if (TAILQ_EMPTY(&m->md.pv_list) && 3544 TAILQ_EMPTY(&pvh->pv_list)) 3545 vm_page_aflag_clear(m, PGA_WRITEABLE); 3546 } 3547 } 3548 if (pmap == kernel_pmap) { 3549 pmap_remove_kernel_pde(pmap, pdq, sva); 3550 } else { 3551 mpte = pmap_lookup_pt_page(pmap, sva); 3552 if (mpte != NULL) { 3553 pmap_remove_pt_page(pmap, mpte); 3554 pmap_resident_count_dec(pmap, 1); 3555 KASSERT(mpte->wire_count == NPTEPG, 3556 ("pmap_remove_pde: pte page wire count error")); 3557 mpte->wire_count = 0; 3558 pmap_add_delayed_free_list(mpte, free, FALSE); 3559 atomic_subtract_int(&cnt.v_wire_count, 1); 3560 } 3561 } 3562 return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free)); 3563} 3564 3565/* 3566 * pmap_remove_pte: do the things to unmap a page in a process 3567 */ 3568static int 3569pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 3570 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp) 3571{ 3572 struct md_page *pvh; 3573 pt_entry_t oldpte, PG_A, PG_M, PG_RW; 3574 vm_page_t m; 3575 3576 PG_A = pmap_accessed_bit(pmap); 3577 PG_M = pmap_modified_bit(pmap); 3578 PG_RW = pmap_rw_bit(pmap); 3579 3580 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3581 oldpte = pte_load_clear(ptq); 3582 if (oldpte & PG_W) 3583 pmap->pm_stats.wired_count -= 1; 3584 pmap_resident_count_dec(pmap, 1); 3585 if (oldpte & PG_MANAGED) { 3586 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 3587 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3588 vm_page_dirty(m); 3589 if (oldpte & PG_A) 3590 vm_page_aflag_set(m, PGA_REFERENCED); 3591 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 3592 pmap_pvh_free(&m->md, pmap, va); 3593 if (TAILQ_EMPTY(&m->md.pv_list) && 3594 (m->flags & PG_FICTITIOUS) == 0) { 3595 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3596 if (TAILQ_EMPTY(&pvh->pv_list)) 3597 vm_page_aflag_clear(m, PGA_WRITEABLE); 3598 } 3599 } 3600 return (pmap_unuse_pt(pmap, va, ptepde, free)); 3601} 3602 3603/* 3604 * Remove a single page from a process address space 3605 */ 3606static void 3607pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 3608 struct spglist *free) 3609{ 3610 struct rwlock *lock; 3611 pt_entry_t *pte, PG_V; 3612 3613 PG_V = pmap_valid_bit(pmap); 3614 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3615 if ((*pde & PG_V) == 0) 3616 return; 3617 pte = pmap_pde_to_pte(pde, va); 3618 if ((*pte & PG_V) == 0) 3619 return; 3620 lock = NULL; 3621 pmap_remove_pte(pmap, pte, va, *pde, free, &lock); 3622 if (lock != NULL) 3623 rw_wunlock(lock); 3624 pmap_invalidate_page(pmap, va); 3625} 3626 3627/* 3628 * Remove the given range of addresses from the specified map. 3629 * 3630 * It is assumed that the start and end are properly 3631 * rounded to the page size. 3632 */ 3633void 3634pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3635{ 3636 struct rwlock *lock; 3637 vm_offset_t va, va_next; 3638 pml4_entry_t *pml4e; 3639 pdp_entry_t *pdpe; 3640 pd_entry_t ptpaddr, *pde; 3641 pt_entry_t *pte, PG_G, PG_V; 3642 struct spglist free; 3643 int anyvalid; 3644 3645 PG_G = pmap_global_bit(pmap); 3646 PG_V = pmap_valid_bit(pmap); 3647 3648 /* 3649 * Perform an unsynchronized read. This is, however, safe. 3650 */ 3651 if (pmap->pm_stats.resident_count == 0) 3652 return; 3653 3654 anyvalid = 0; 3655 SLIST_INIT(&free); 3656 3657 rw_rlock(&pvh_global_lock); 3658 PMAP_LOCK(pmap); 3659 3660 /* 3661 * special handling of removing one page. a very 3662 * common operation and easy to short circuit some 3663 * code. 3664 */ 3665 if (sva + PAGE_SIZE == eva) { 3666 pde = pmap_pde(pmap, sva); 3667 if (pde && (*pde & PG_PS) == 0) { 3668 pmap_remove_page(pmap, sva, pde, &free); 3669 goto out; 3670 } 3671 } 3672 3673 lock = NULL; 3674 for (; sva < eva; sva = va_next) { 3675 3676 if (pmap->pm_stats.resident_count == 0) 3677 break; 3678 3679 pml4e = pmap_pml4e(pmap, sva); 3680 if ((*pml4e & PG_V) == 0) { 3681 va_next = (sva + NBPML4) & ~PML4MASK; 3682 if (va_next < sva) 3683 va_next = eva; 3684 continue; 3685 } 3686 3687 pdpe = pmap_pml4e_to_pdpe(pml4e, sva); 3688 if ((*pdpe & PG_V) == 0) { 3689 va_next = (sva + NBPDP) & ~PDPMASK; 3690 if (va_next < sva) 3691 va_next = eva; 3692 continue; 3693 } 3694 3695 /* 3696 * Calculate index for next page table. 3697 */ 3698 va_next = (sva + NBPDR) & ~PDRMASK; 3699 if (va_next < sva) 3700 va_next = eva; 3701 3702 pde = pmap_pdpe_to_pde(pdpe, sva); 3703 ptpaddr = *pde; 3704 3705 /* 3706 * Weed out invalid mappings. 3707 */ 3708 if (ptpaddr == 0) 3709 continue; 3710 3711 /* 3712 * Check for large page. 3713 */ 3714 if ((ptpaddr & PG_PS) != 0) { 3715 /* 3716 * Are we removing the entire large page? If not, 3717 * demote the mapping and fall through. 3718 */ 3719 if (sva + NBPDR == va_next && eva >= va_next) { 3720 /* 3721 * The TLB entry for a PG_G mapping is 3722 * invalidated by pmap_remove_pde(). 3723 */ 3724 if ((ptpaddr & PG_G) == 0) 3725 anyvalid = 1; 3726 pmap_remove_pde(pmap, pde, sva, &free, &lock); 3727 continue; 3728 } else if (!pmap_demote_pde_locked(pmap, pde, sva, 3729 &lock)) { 3730 /* The large page mapping was destroyed. */ 3731 continue; 3732 } else 3733 ptpaddr = *pde; 3734 } 3735 3736 /* 3737 * Limit our scan to either the end of the va represented 3738 * by the current page table page, or to the end of the 3739 * range being removed. 3740 */ 3741 if (va_next > eva) 3742 va_next = eva; 3743 3744 va = va_next; 3745 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 3746 sva += PAGE_SIZE) { 3747 if (*pte == 0) { 3748 if (va != va_next) { 3749 pmap_invalidate_range(pmap, va, sva); 3750 va = va_next; 3751 } 3752 continue; 3753 } 3754 if ((*pte & PG_G) == 0) 3755 anyvalid = 1; 3756 else if (va == va_next) 3757 va = sva; 3758 if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free, 3759 &lock)) { 3760 sva += PAGE_SIZE; 3761 break; 3762 } 3763 } 3764 if (va != va_next) 3765 pmap_invalidate_range(pmap, va, sva); 3766 } 3767 if (lock != NULL) 3768 rw_wunlock(lock); 3769out: 3770 if (anyvalid) 3771 pmap_invalidate_all(pmap); 3772 rw_runlock(&pvh_global_lock); 3773 PMAP_UNLOCK(pmap); 3774 pmap_free_zero_pages(&free); 3775} 3776 3777/* 3778 * Routine: pmap_remove_all 3779 * Function: 3780 * Removes this physical page from 3781 * all physical maps in which it resides. 3782 * Reflects back modify bits to the pager. 3783 * 3784 * Notes: 3785 * Original versions of this routine were very 3786 * inefficient because they iteratively called 3787 * pmap_remove (slow...) 3788 */ 3789 3790void 3791pmap_remove_all(vm_page_t m) 3792{ 3793 struct md_page *pvh; 3794 pv_entry_t pv; 3795 pmap_t pmap; 3796 pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW; 3797 pd_entry_t *pde; 3798 vm_offset_t va; 3799 struct spglist free; 3800 3801 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3802 ("pmap_remove_all: page %p is not managed", m)); 3803 SLIST_INIT(&free); 3804 rw_wlock(&pvh_global_lock); 3805 if ((m->flags & PG_FICTITIOUS) != 0) 3806 goto small_mappings; 3807 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3808 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 3809 pmap = PV_PMAP(pv); 3810 PMAP_LOCK(pmap); 3811 va = pv->pv_va; 3812 pde = pmap_pde(pmap, va); 3813 (void)pmap_demote_pde(pmap, pde, va); 3814 PMAP_UNLOCK(pmap); 3815 } 3816small_mappings: 3817 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3818 pmap = PV_PMAP(pv); 3819 PMAP_LOCK(pmap); 3820 PG_A = pmap_accessed_bit(pmap); 3821 PG_M = pmap_modified_bit(pmap); 3822 PG_RW = pmap_rw_bit(pmap); 3823 pmap_resident_count_dec(pmap, 1); 3824 pde = pmap_pde(pmap, pv->pv_va); 3825 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" 3826 " a 2mpage in page %p's pv list", m)); 3827 pte = pmap_pde_to_pte(pde, pv->pv_va); 3828 tpte = pte_load_clear(pte); 3829 if (tpte & PG_W) 3830 pmap->pm_stats.wired_count--; 3831 if (tpte & PG_A) 3832 vm_page_aflag_set(m, PGA_REFERENCED); 3833 3834 /* 3835 * Update the vm_page_t clean and reference bits. 3836 */ 3837 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3838 vm_page_dirty(m); 3839 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free); 3840 pmap_invalidate_page(pmap, pv->pv_va); 3841 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 3842 m->md.pv_gen++; 3843 free_pv_entry(pmap, pv); 3844 PMAP_UNLOCK(pmap); 3845 } 3846 vm_page_aflag_clear(m, PGA_WRITEABLE); 3847 rw_wunlock(&pvh_global_lock); 3848 pmap_free_zero_pages(&free); 3849} 3850 3851/* 3852 * pmap_protect_pde: do the things to protect a 2mpage in a process 3853 */ 3854static boolean_t 3855pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) 3856{ 3857 pd_entry_t newpde, oldpde; 3858 vm_offset_t eva, va; 3859 vm_page_t m; 3860 boolean_t anychanged; 3861 pt_entry_t PG_G, PG_M, PG_RW; 3862 3863 PG_G = pmap_global_bit(pmap); 3864 PG_M = pmap_modified_bit(pmap); 3865 PG_RW = pmap_rw_bit(pmap); 3866 3867 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3868 KASSERT((sva & PDRMASK) == 0, 3869 ("pmap_protect_pde: sva is not 2mpage aligned")); 3870 anychanged = FALSE; 3871retry: 3872 oldpde = newpde = *pde; 3873 if (oldpde & PG_MANAGED) { 3874 eva = sva + NBPDR; 3875 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3876 va < eva; va += PAGE_SIZE, m++) 3877 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3878 vm_page_dirty(m); 3879 } 3880 if ((prot & VM_PROT_WRITE) == 0) 3881 newpde &= ~(PG_RW | PG_M); 3882 if ((prot & VM_PROT_EXECUTE) == 0) 3883 newpde |= pg_nx; 3884 if (newpde != oldpde) { 3885 if (!atomic_cmpset_long(pde, oldpde, newpde)) 3886 goto retry; 3887 if (oldpde & PG_G) 3888 pmap_invalidate_page(pmap, sva); 3889 else 3890 anychanged = TRUE; 3891 } 3892 return (anychanged); 3893} 3894 3895/* 3896 * Set the physical protection on the 3897 * specified range of this map as requested. 3898 */ 3899void 3900pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 3901{ 3902 vm_offset_t va_next; 3903 pml4_entry_t *pml4e; 3904 pdp_entry_t *pdpe; 3905 pd_entry_t ptpaddr, *pde; 3906 pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V; 3907 boolean_t anychanged, pv_lists_locked; 3908 3909 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 3910 if (prot == VM_PROT_NONE) { 3911 pmap_remove(pmap, sva, eva); 3912 return; 3913 } 3914 3915 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 3916 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 3917 return; 3918 3919 PG_G = pmap_global_bit(pmap); 3920 PG_M = pmap_modified_bit(pmap); 3921 PG_V = pmap_valid_bit(pmap); 3922 PG_RW = pmap_rw_bit(pmap); 3923 pv_lists_locked = FALSE; 3924resume: 3925 anychanged = FALSE; 3926 3927 PMAP_LOCK(pmap); 3928 for (; sva < eva; sva = va_next) { 3929 3930 pml4e = pmap_pml4e(pmap, sva); 3931 if ((*pml4e & PG_V) == 0) { 3932 va_next = (sva + NBPML4) & ~PML4MASK; 3933 if (va_next < sva) 3934 va_next = eva; 3935 continue; 3936 } 3937 3938 pdpe = pmap_pml4e_to_pdpe(pml4e, sva); 3939 if ((*pdpe & PG_V) == 0) { 3940 va_next = (sva + NBPDP) & ~PDPMASK; 3941 if (va_next < sva) 3942 va_next = eva; 3943 continue; 3944 } 3945 3946 va_next = (sva + NBPDR) & ~PDRMASK; 3947 if (va_next < sva) 3948 va_next = eva; 3949 3950 pde = pmap_pdpe_to_pde(pdpe, sva); 3951 ptpaddr = *pde; 3952 3953 /* 3954 * Weed out invalid mappings. 3955 */ 3956 if (ptpaddr == 0) 3957 continue; 3958 3959 /* 3960 * Check for large page. 3961 */ 3962 if ((ptpaddr & PG_PS) != 0) { 3963 /* 3964 * Are we protecting the entire large page? If not, 3965 * demote the mapping and fall through. 3966 */ 3967 if (sva + NBPDR == va_next && eva >= va_next) { 3968 /* 3969 * The TLB entry for a PG_G mapping is 3970 * invalidated by pmap_protect_pde(). 3971 */ 3972 if (pmap_protect_pde(pmap, pde, sva, prot)) 3973 anychanged = TRUE; 3974 continue; 3975 } else { 3976 if (!pv_lists_locked) { 3977 pv_lists_locked = TRUE; 3978 if (!rw_try_rlock(&pvh_global_lock)) { 3979 if (anychanged) 3980 pmap_invalidate_all( 3981 pmap); 3982 PMAP_UNLOCK(pmap); 3983 rw_rlock(&pvh_global_lock); 3984 goto resume; 3985 } 3986 } 3987 if (!pmap_demote_pde(pmap, pde, sva)) { 3988 /* 3989 * The large page mapping was 3990 * destroyed. 3991 */ 3992 continue; 3993 } 3994 } 3995 } 3996 3997 if (va_next > eva) 3998 va_next = eva; 3999 4000 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 4001 sva += PAGE_SIZE) { 4002 pt_entry_t obits, pbits; 4003 vm_page_t m; 4004 4005retry: 4006 obits = pbits = *pte; 4007 if ((pbits & PG_V) == 0) 4008 continue; 4009 4010 if ((prot & VM_PROT_WRITE) == 0) { 4011 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 4012 (PG_MANAGED | PG_M | PG_RW)) { 4013 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 4014 vm_page_dirty(m); 4015 } 4016 pbits &= ~(PG_RW | PG_M); 4017 } 4018 if ((prot & VM_PROT_EXECUTE) == 0) 4019 pbits |= pg_nx; 4020 4021 if (pbits != obits) { 4022 if (!atomic_cmpset_long(pte, obits, pbits)) 4023 goto retry; 4024 if (obits & PG_G) 4025 pmap_invalidate_page(pmap, sva); 4026 else 4027 anychanged = TRUE; 4028 } 4029 } 4030 } 4031 if (anychanged) 4032 pmap_invalidate_all(pmap); 4033 if (pv_lists_locked) 4034 rw_runlock(&pvh_global_lock); 4035 PMAP_UNLOCK(pmap); 4036} 4037 4038/* 4039 * Tries to promote the 512, contiguous 4KB page mappings that are within a 4040 * single page table page (PTP) to a single 2MB page mapping. For promotion 4041 * to occur, two conditions must be met: (1) the 4KB page mappings must map 4042 * aligned, contiguous physical memory and (2) the 4KB page mappings must have 4043 * identical characteristics. 4044 */ 4045static void 4046pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, 4047 struct rwlock **lockp) 4048{ 4049 pd_entry_t newpde; 4050 pt_entry_t *firstpte, oldpte, pa, *pte; 4051 pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V; 4052 vm_page_t mpte; 4053 int PG_PTE_CACHE; 4054 4055 PG_A = pmap_accessed_bit(pmap); 4056 PG_G = pmap_global_bit(pmap); 4057 PG_M = pmap_modified_bit(pmap); 4058 PG_V = pmap_valid_bit(pmap); 4059 PG_RW = pmap_rw_bit(pmap); 4060 PG_PTE_CACHE = pmap_cache_mask(pmap, 0); 4061 4062 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4063 4064 /* 4065 * Examine the first PTE in the specified PTP. Abort if this PTE is 4066 * either invalid, unused, or does not map the first 4KB physical page 4067 * within a 2MB page. 4068 */ 4069 firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME); 4070setpde: 4071 newpde = *firstpte; 4072 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 4073 atomic_add_long(&pmap_pde_p_failures, 1); 4074 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" 4075 " in pmap %p", va, pmap); 4076 return; 4077 } 4078 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 4079 /* 4080 * When PG_M is already clear, PG_RW can be cleared without 4081 * a TLB invalidation. 4082 */ 4083 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW)) 4084 goto setpde; 4085 newpde &= ~PG_RW; 4086 } 4087 4088 /* 4089 * Examine each of the other PTEs in the specified PTP. Abort if this 4090 * PTE maps an unexpected 4KB physical page or does not have identical 4091 * characteristics to the first PTE. 4092 */ 4093 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; 4094 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 4095setpte: 4096 oldpte = *pte; 4097 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 4098 atomic_add_long(&pmap_pde_p_failures, 1); 4099 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" 4100 " in pmap %p", va, pmap); 4101 return; 4102 } 4103 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 4104 /* 4105 * When PG_M is already clear, PG_RW can be cleared 4106 * without a TLB invalidation. 4107 */ 4108 if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW)) 4109 goto setpte; 4110 oldpte &= ~PG_RW; 4111 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx" 4112 " in pmap %p", (oldpte & PG_FRAME & PDRMASK) | 4113 (va & ~PDRMASK), pmap); 4114 } 4115 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 4116 atomic_add_long(&pmap_pde_p_failures, 1); 4117 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" 4118 " in pmap %p", va, pmap); 4119 return; 4120 } 4121 pa -= PAGE_SIZE; 4122 } 4123 4124 /* 4125 * Save the page table page in its current state until the PDE 4126 * mapping the superpage is demoted by pmap_demote_pde() or 4127 * destroyed by pmap_remove_pde(). 4128 */ 4129 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 4130 KASSERT(mpte >= vm_page_array && 4131 mpte < &vm_page_array[vm_page_array_size], 4132 ("pmap_promote_pde: page table page is out of range")); 4133 KASSERT(mpte->pindex == pmap_pde_pindex(va), 4134 ("pmap_promote_pde: page table page's pindex is wrong")); 4135 if (pmap_insert_pt_page(pmap, mpte)) { 4136 atomic_add_long(&pmap_pde_p_failures, 1); 4137 CTR2(KTR_PMAP, 4138 "pmap_promote_pde: failure for va %#lx in pmap %p", va, 4139 pmap); 4140 return; 4141 } 4142 4143 /* 4144 * Promote the pv entries. 4145 */ 4146 if ((newpde & PG_MANAGED) != 0) 4147 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp); 4148 4149 /* 4150 * Propagate the PAT index to its proper position. 4151 */ 4152 newpde = pmap_swap_pat(pmap, newpde); 4153 4154 /* 4155 * Map the superpage. 4156 */ 4157 if (workaround_erratum383) 4158 pmap_update_pde(pmap, va, pde, PG_PS | newpde); 4159 else 4160 pde_store(pde, PG_PS | newpde); 4161 4162 atomic_add_long(&pmap_pde_promotions, 1); 4163 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx" 4164 " in pmap %p", va, pmap); 4165} 4166 4167/* 4168 * Insert the given physical page (p) at 4169 * the specified virtual address (v) in the 4170 * target physical map with the protection requested. 4171 * 4172 * If specified, the page will be wired down, meaning 4173 * that the related pte can not be reclaimed. 4174 * 4175 * NB: This is the only routine which MAY NOT lazy-evaluate 4176 * or lose information. That is, this routine must actually 4177 * insert this page into the given map NOW. 4178 */ 4179int 4180pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 4181 u_int flags, int8_t psind __unused) 4182{ 4183 struct rwlock *lock; 4184 pd_entry_t *pde; 4185 pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V; 4186 pt_entry_t newpte, origpte; 4187 pv_entry_t pv; 4188 vm_paddr_t opa, pa; 4189 vm_page_t mpte, om; 4190 boolean_t nosleep; 4191 4192 PG_A = pmap_accessed_bit(pmap); 4193 PG_G = pmap_global_bit(pmap); 4194 PG_M = pmap_modified_bit(pmap); 4195 PG_V = pmap_valid_bit(pmap); 4196 PG_RW = pmap_rw_bit(pmap); 4197 4198 va = trunc_page(va); 4199 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 4200 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 4201 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", 4202 va)); 4203 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || 4204 va >= kmi.clean_eva, 4205 ("pmap_enter: managed mapping within the clean submap")); 4206 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 4207 VM_OBJECT_ASSERT_LOCKED(m->object); 4208 pa = VM_PAGE_TO_PHYS(m); 4209 newpte = (pt_entry_t)(pa | PG_A | PG_V); 4210 if ((flags & VM_PROT_WRITE) != 0) 4211 newpte |= PG_M; 4212 if ((prot & VM_PROT_WRITE) != 0) 4213 newpte |= PG_RW; 4214 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 4215 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 4216 if ((prot & VM_PROT_EXECUTE) == 0) 4217 newpte |= pg_nx; 4218 if ((flags & PMAP_ENTER_WIRED) != 0) 4219 newpte |= PG_W; 4220 if (va < VM_MAXUSER_ADDRESS) 4221 newpte |= PG_U; 4222 if (pmap == kernel_pmap) 4223 newpte |= PG_G; 4224 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, 0); 4225 4226 /* 4227 * Set modified bit gratuitously for writeable mappings if 4228 * the page is unmanaged. We do not want to take a fault 4229 * to do the dirty bit accounting for these mappings. 4230 */ 4231 if ((m->oflags & VPO_UNMANAGED) != 0) { 4232 if ((newpte & PG_RW) != 0) 4233 newpte |= PG_M; 4234 } 4235 4236 mpte = NULL; 4237 4238 lock = NULL; 4239 rw_rlock(&pvh_global_lock); 4240 PMAP_LOCK(pmap); 4241 4242 /* 4243 * In the case that a page table page is not 4244 * resident, we are creating it here. 4245 */ 4246retry: 4247 pde = pmap_pde(pmap, va); 4248 if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 || 4249 pmap_demote_pde_locked(pmap, pde, va, &lock))) { 4250 pte = pmap_pde_to_pte(pde, va); 4251 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) { 4252 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 4253 mpte->wire_count++; 4254 } 4255 } else if (va < VM_MAXUSER_ADDRESS) { 4256 /* 4257 * Here if the pte page isn't mapped, or if it has been 4258 * deallocated. 4259 */ 4260 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; 4261 mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), 4262 nosleep ? NULL : &lock); 4263 if (mpte == NULL && nosleep) { 4264 if (lock != NULL) 4265 rw_wunlock(lock); 4266 rw_runlock(&pvh_global_lock); 4267 PMAP_UNLOCK(pmap); 4268 return (KERN_RESOURCE_SHORTAGE); 4269 } 4270 goto retry; 4271 } else 4272 panic("pmap_enter: invalid page directory va=%#lx", va); 4273 4274 origpte = *pte; 4275 4276 /* 4277 * Is the specified virtual address already mapped? 4278 */ 4279 if ((origpte & PG_V) != 0) { 4280 /* 4281 * Wiring change, just update stats. We don't worry about 4282 * wiring PT pages as they remain resident as long as there 4283 * are valid mappings in them. Hence, if a user page is wired, 4284 * the PT page will be also. 4285 */ 4286 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 4287 pmap->pm_stats.wired_count++; 4288 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 4289 pmap->pm_stats.wired_count--; 4290 4291 /* 4292 * Remove the extra PT page reference. 4293 */ 4294 if (mpte != NULL) { 4295 mpte->wire_count--; 4296 KASSERT(mpte->wire_count > 0, 4297 ("pmap_enter: missing reference to page table page," 4298 " va: 0x%lx", va)); 4299 } 4300 4301 /* 4302 * Has the physical page changed? 4303 */ 4304 opa = origpte & PG_FRAME; 4305 if (opa == pa) { 4306 /* 4307 * No, might be a protection or wiring change. 4308 */ 4309 if ((origpte & PG_MANAGED) != 0) { 4310 newpte |= PG_MANAGED; 4311 if ((newpte & PG_RW) != 0) 4312 vm_page_aflag_set(m, PGA_WRITEABLE); 4313 } 4314 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 4315 goto unchanged; 4316 goto validate; 4317 } 4318 } else { 4319 /* 4320 * Increment the counters. 4321 */ 4322 if ((newpte & PG_W) != 0) 4323 pmap->pm_stats.wired_count++; 4324 pmap_resident_count_inc(pmap, 1); 4325 } 4326 4327 /* 4328 * Enter on the PV list if part of our managed memory. 4329 */ 4330 if ((m->oflags & VPO_UNMANAGED) == 0) { 4331 newpte |= PG_MANAGED; 4332 pv = get_pv_entry(pmap, &lock); 4333 pv->pv_va = va; 4334 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); 4335 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 4336 m->md.pv_gen++; 4337 if ((newpte & PG_RW) != 0) 4338 vm_page_aflag_set(m, PGA_WRITEABLE); 4339 } 4340 4341 /* 4342 * Update the PTE. 4343 */ 4344 if ((origpte & PG_V) != 0) { 4345validate: 4346 origpte = pte_load_store(pte, newpte); 4347 opa = origpte & PG_FRAME; 4348 if (opa != pa) { 4349 if ((origpte & PG_MANAGED) != 0) { 4350 om = PHYS_TO_VM_PAGE(opa); 4351 if ((origpte & (PG_M | PG_RW)) == (PG_M | 4352 PG_RW)) 4353 vm_page_dirty(om); 4354 if ((origpte & PG_A) != 0) 4355 vm_page_aflag_set(om, PGA_REFERENCED); 4356 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); 4357 pmap_pvh_free(&om->md, pmap, va); 4358 if ((om->aflags & PGA_WRITEABLE) != 0 && 4359 TAILQ_EMPTY(&om->md.pv_list) && 4360 ((om->flags & PG_FICTITIOUS) != 0 || 4361 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 4362 vm_page_aflag_clear(om, PGA_WRITEABLE); 4363 } 4364 } else if ((newpte & PG_M) == 0 && (origpte & (PG_M | 4365 PG_RW)) == (PG_M | PG_RW)) { 4366 if ((origpte & PG_MANAGED) != 0) 4367 vm_page_dirty(m); 4368 4369 /* 4370 * Although the PTE may still have PG_RW set, TLB 4371 * invalidation may nonetheless be required because 4372 * the PTE no longer has PG_M set. 4373 */ 4374 } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { 4375 /* 4376 * This PTE change does not require TLB invalidation. 4377 */ 4378 goto unchanged; 4379 } 4380 if ((origpte & PG_A) != 0) 4381 pmap_invalidate_page(pmap, va); 4382 } else 4383 pte_store(pte, newpte); 4384 4385unchanged: 4386 4387 /* 4388 * If both the page table page and the reservation are fully 4389 * populated, then attempt promotion. 4390 */ 4391 if ((mpte == NULL || mpte->wire_count == NPTEPG) && 4392 pmap_ps_enabled(pmap) && 4393 (m->flags & PG_FICTITIOUS) == 0 && 4394 vm_reserv_level_iffullpop(m) == 0) 4395 pmap_promote_pde(pmap, pde, va, &lock); 4396 4397 if (lock != NULL) 4398 rw_wunlock(lock); 4399 rw_runlock(&pvh_global_lock); 4400 PMAP_UNLOCK(pmap); 4401 return (KERN_SUCCESS); 4402} 4403 4404/* 4405 * Tries to create a 2MB page mapping. Returns TRUE if successful and FALSE 4406 * otherwise. Fails if (1) a page table page cannot be allocated without 4407 * blocking, (2) a mapping already exists at the specified virtual address, or 4408 * (3) a pv entry cannot be allocated without reclaiming another pv entry. 4409 */ 4410static boolean_t 4411pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 4412 struct rwlock **lockp) 4413{ 4414 pd_entry_t *pde, newpde; 4415 pt_entry_t PG_V; 4416 vm_page_t mpde; 4417 struct spglist free; 4418 4419 PG_V = pmap_valid_bit(pmap); 4420 rw_assert(&pvh_global_lock, RA_LOCKED); 4421 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4422 4423 if ((mpde = pmap_allocpde(pmap, va, NULL)) == NULL) { 4424 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 4425 " in pmap %p", va, pmap); 4426 return (FALSE); 4427 } 4428 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde)); 4429 pde = &pde[pmap_pde_index(va)]; 4430 if ((*pde & PG_V) != 0) { 4431 KASSERT(mpde->wire_count > 1, 4432 ("pmap_enter_pde: mpde's wire count is too low")); 4433 mpde->wire_count--; 4434 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 4435 " in pmap %p", va, pmap); 4436 return (FALSE); 4437 } 4438 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | 4439 PG_PS | PG_V; 4440 if ((m->oflags & VPO_UNMANAGED) == 0) { 4441 newpde |= PG_MANAGED; 4442 4443 /* 4444 * Abort this mapping if its PV entry could not be created. 4445 */ 4446 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m), 4447 lockp)) { 4448 SLIST_INIT(&free); 4449 if (pmap_unwire_ptp(pmap, va, mpde, &free)) { 4450 pmap_invalidate_page(pmap, va); 4451 pmap_free_zero_pages(&free); 4452 } 4453 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 4454 " in pmap %p", va, pmap); 4455 return (FALSE); 4456 } 4457 } 4458 if ((prot & VM_PROT_EXECUTE) == 0) 4459 newpde |= pg_nx; 4460 if (va < VM_MAXUSER_ADDRESS) 4461 newpde |= PG_U; 4462 4463 /* 4464 * Increment counters. 4465 */ 4466 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); 4467 4468 /* 4469 * Map the superpage. 4470 */ 4471 pde_store(pde, newpde); 4472 4473 atomic_add_long(&pmap_pde_mappings, 1); 4474 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" 4475 " in pmap %p", va, pmap); 4476 return (TRUE); 4477} 4478 4479/* 4480 * Maps a sequence of resident pages belonging to the same object. 4481 * The sequence begins with the given page m_start. This page is 4482 * mapped at the given virtual address start. Each subsequent page is 4483 * mapped at a virtual address that is offset from start by the same 4484 * amount as the page is offset from m_start within the object. The 4485 * last page in the sequence is the page with the largest offset from 4486 * m_start that can be mapped at a virtual address less than the given 4487 * virtual address end. Not every virtual page between start and end 4488 * is mapped; only those for which a resident page exists with the 4489 * corresponding offset from m_start are mapped. 4490 */ 4491void 4492pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4493 vm_page_t m_start, vm_prot_t prot) 4494{ 4495 struct rwlock *lock; 4496 vm_offset_t va; 4497 vm_page_t m, mpte; 4498 vm_pindex_t diff, psize; 4499 4500 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4501 4502 psize = atop(end - start); 4503 mpte = NULL; 4504 m = m_start; 4505 lock = NULL; 4506 rw_rlock(&pvh_global_lock); 4507 PMAP_LOCK(pmap); 4508 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4509 va = start + ptoa(diff); 4510 if ((va & PDRMASK) == 0 && va + NBPDR <= end && 4511 m->psind == 1 && pmap_ps_enabled(pmap) && 4512 pmap_enter_pde(pmap, va, m, prot, &lock)) 4513 m = &m[NBPDR / PAGE_SIZE - 1]; 4514 else 4515 mpte = pmap_enter_quick_locked(pmap, va, m, prot, 4516 mpte, &lock); 4517 m = TAILQ_NEXT(m, listq); 4518 } 4519 if (lock != NULL) 4520 rw_wunlock(lock); 4521 rw_runlock(&pvh_global_lock); 4522 PMAP_UNLOCK(pmap); 4523} 4524 4525/* 4526 * this code makes some *MAJOR* assumptions: 4527 * 1. Current pmap & pmap exists. 4528 * 2. Not wired. 4529 * 3. Read access. 4530 * 4. No page table pages. 4531 * but is *MUCH* faster than pmap_enter... 4532 */ 4533 4534void 4535pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4536{ 4537 struct rwlock *lock; 4538 4539 lock = NULL; 4540 rw_rlock(&pvh_global_lock); 4541 PMAP_LOCK(pmap); 4542 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); 4543 if (lock != NULL) 4544 rw_wunlock(lock); 4545 rw_runlock(&pvh_global_lock); 4546 PMAP_UNLOCK(pmap); 4547} 4548 4549static vm_page_t 4550pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4551 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) 4552{ 4553 struct spglist free; 4554 pt_entry_t *pte, PG_V; 4555 vm_paddr_t pa; 4556 4557 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 4558 (m->oflags & VPO_UNMANAGED) != 0, 4559 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 4560 PG_V = pmap_valid_bit(pmap); 4561 rw_assert(&pvh_global_lock, RA_LOCKED); 4562 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4563 4564 /* 4565 * In the case that a page table page is not 4566 * resident, we are creating it here. 4567 */ 4568 if (va < VM_MAXUSER_ADDRESS) { 4569 vm_pindex_t ptepindex; 4570 pd_entry_t *ptepa; 4571 4572 /* 4573 * Calculate pagetable page index 4574 */ 4575 ptepindex = pmap_pde_pindex(va); 4576 if (mpte && (mpte->pindex == ptepindex)) { 4577 mpte->wire_count++; 4578 } else { 4579 /* 4580 * Get the page directory entry 4581 */ 4582 ptepa = pmap_pde(pmap, va); 4583 4584 /* 4585 * If the page table page is mapped, we just increment 4586 * the hold count, and activate it. Otherwise, we 4587 * attempt to allocate a page table page. If this 4588 * attempt fails, we don't retry. Instead, we give up. 4589 */ 4590 if (ptepa && (*ptepa & PG_V) != 0) { 4591 if (*ptepa & PG_PS) 4592 return (NULL); 4593 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME); 4594 mpte->wire_count++; 4595 } else { 4596 /* 4597 * Pass NULL instead of the PV list lock 4598 * pointer, because we don't intend to sleep. 4599 */ 4600 mpte = _pmap_allocpte(pmap, ptepindex, NULL); 4601 if (mpte == NULL) 4602 return (mpte); 4603 } 4604 } 4605 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); 4606 pte = &pte[pmap_pte_index(va)]; 4607 } else { 4608 mpte = NULL; 4609 pte = vtopte(va); 4610 } 4611 if (*pte) { 4612 if (mpte != NULL) { 4613 mpte->wire_count--; 4614 mpte = NULL; 4615 } 4616 return (mpte); 4617 } 4618 4619 /* 4620 * Enter on the PV list if part of our managed memory. 4621 */ 4622 if ((m->oflags & VPO_UNMANAGED) == 0 && 4623 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { 4624 if (mpte != NULL) { 4625 SLIST_INIT(&free); 4626 if (pmap_unwire_ptp(pmap, va, mpte, &free)) { 4627 pmap_invalidate_page(pmap, va); 4628 pmap_free_zero_pages(&free); 4629 } 4630 mpte = NULL; 4631 } 4632 return (mpte); 4633 } 4634 4635 /* 4636 * Increment counters 4637 */ 4638 pmap_resident_count_inc(pmap, 1); 4639 4640 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0); 4641 if ((prot & VM_PROT_EXECUTE) == 0) 4642 pa |= pg_nx; 4643 4644 /* 4645 * Now validate mapping with RO protection 4646 */ 4647 if ((m->oflags & VPO_UNMANAGED) != 0) 4648 pte_store(pte, pa | PG_V | PG_U); 4649 else 4650 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 4651 return (mpte); 4652} 4653 4654/* 4655 * Make a temporary mapping for a physical address. This is only intended 4656 * to be used for panic dumps. 4657 */ 4658void * 4659pmap_kenter_temporary(vm_paddr_t pa, int i) 4660{ 4661 vm_offset_t va; 4662 4663 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 4664 pmap_kenter(va, pa); 4665 invlpg(va); 4666 return ((void *)crashdumpmap); 4667} 4668 4669/* 4670 * This code maps large physical mmap regions into the 4671 * processor address space. Note that some shortcuts 4672 * are taken, but the code works. 4673 */ 4674void 4675pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 4676 vm_pindex_t pindex, vm_size_t size) 4677{ 4678 pd_entry_t *pde; 4679 pt_entry_t PG_A, PG_M, PG_RW, PG_V; 4680 vm_paddr_t pa, ptepa; 4681 vm_page_t p, pdpg; 4682 int pat_mode; 4683 4684 PG_A = pmap_accessed_bit(pmap); 4685 PG_M = pmap_modified_bit(pmap); 4686 PG_V = pmap_valid_bit(pmap); 4687 PG_RW = pmap_rw_bit(pmap); 4688 4689 VM_OBJECT_ASSERT_WLOCKED(object); 4690 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4691 ("pmap_object_init_pt: non-device object")); 4692 if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 4693 if (!pmap_ps_enabled(pmap)) 4694 return; 4695 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4696 return; 4697 p = vm_page_lookup(object, pindex); 4698 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4699 ("pmap_object_init_pt: invalid page %p", p)); 4700 pat_mode = p->md.pat_mode; 4701 4702 /* 4703 * Abort the mapping if the first page is not physically 4704 * aligned to a 2MB page boundary. 4705 */ 4706 ptepa = VM_PAGE_TO_PHYS(p); 4707 if (ptepa & (NBPDR - 1)) 4708 return; 4709 4710 /* 4711 * Skip the first page. Abort the mapping if the rest of 4712 * the pages are not physically contiguous or have differing 4713 * memory attributes. 4714 */ 4715 p = TAILQ_NEXT(p, listq); 4716 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 4717 pa += PAGE_SIZE) { 4718 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4719 ("pmap_object_init_pt: invalid page %p", p)); 4720 if (pa != VM_PAGE_TO_PHYS(p) || 4721 pat_mode != p->md.pat_mode) 4722 return; 4723 p = TAILQ_NEXT(p, listq); 4724 } 4725 4726 /* 4727 * Map using 2MB pages. Since "ptepa" is 2M aligned and 4728 * "size" is a multiple of 2M, adding the PAT setting to "pa" 4729 * will not affect the termination of this loop. 4730 */ 4731 PMAP_LOCK(pmap); 4732 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); 4733 pa < ptepa + size; pa += NBPDR) { 4734 pdpg = pmap_allocpde(pmap, addr, NULL); 4735 if (pdpg == NULL) { 4736 /* 4737 * The creation of mappings below is only an 4738 * optimization. If a page directory page 4739 * cannot be allocated without blocking, 4740 * continue on to the next mapping rather than 4741 * blocking. 4742 */ 4743 addr += NBPDR; 4744 continue; 4745 } 4746 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); 4747 pde = &pde[pmap_pde_index(addr)]; 4748 if ((*pde & PG_V) == 0) { 4749 pde_store(pde, pa | PG_PS | PG_M | PG_A | 4750 PG_U | PG_RW | PG_V); 4751 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); 4752 atomic_add_long(&pmap_pde_mappings, 1); 4753 } else { 4754 /* Continue on if the PDE is already valid. */ 4755 pdpg->wire_count--; 4756 KASSERT(pdpg->wire_count > 0, 4757 ("pmap_object_init_pt: missing reference " 4758 "to page directory page, va: 0x%lx", addr)); 4759 } 4760 addr += NBPDR; 4761 } 4762 PMAP_UNLOCK(pmap); 4763 } 4764} 4765 4766/* 4767 * Clear the wired attribute from the mappings for the specified range of 4768 * addresses in the given pmap. Every valid mapping within that range 4769 * must have the wired attribute set. In contrast, invalid mappings 4770 * cannot have the wired attribute set, so they are ignored. 4771 * 4772 * The wired attribute of the page table entry is not a hardware feature, 4773 * so there is no need to invalidate any TLB entries. 4774 */ 4775void 4776pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4777{ 4778 vm_offset_t va_next; 4779 pml4_entry_t *pml4e; 4780 pdp_entry_t *pdpe; 4781 pd_entry_t *pde; 4782 pt_entry_t *pte, PG_V; 4783 boolean_t pv_lists_locked; 4784 4785 PG_V = pmap_valid_bit(pmap); 4786 pv_lists_locked = FALSE; 4787resume: 4788 PMAP_LOCK(pmap); 4789 for (; sva < eva; sva = va_next) { 4790 pml4e = pmap_pml4e(pmap, sva); 4791 if ((*pml4e & PG_V) == 0) { 4792 va_next = (sva + NBPML4) & ~PML4MASK; 4793 if (va_next < sva) 4794 va_next = eva; 4795 continue; 4796 } 4797 pdpe = pmap_pml4e_to_pdpe(pml4e, sva); 4798 if ((*pdpe & PG_V) == 0) { 4799 va_next = (sva + NBPDP) & ~PDPMASK; 4800 if (va_next < sva) 4801 va_next = eva; 4802 continue; 4803 } 4804 va_next = (sva + NBPDR) & ~PDRMASK; 4805 if (va_next < sva) 4806 va_next = eva; 4807 pde = pmap_pdpe_to_pde(pdpe, sva); 4808 if ((*pde & PG_V) == 0) 4809 continue; 4810 if ((*pde & PG_PS) != 0) { 4811 if ((*pde & PG_W) == 0) 4812 panic("pmap_unwire: pde %#jx is missing PG_W", 4813 (uintmax_t)*pde); 4814 4815 /* 4816 * Are we unwiring the entire large page? If not, 4817 * demote the mapping and fall through. 4818 */ 4819 if (sva + NBPDR == va_next && eva >= va_next) { 4820 atomic_clear_long(pde, PG_W); 4821 pmap->pm_stats.wired_count -= NBPDR / 4822 PAGE_SIZE; 4823 continue; 4824 } else { 4825 if (!pv_lists_locked) { 4826 pv_lists_locked = TRUE; 4827 if (!rw_try_rlock(&pvh_global_lock)) { 4828 PMAP_UNLOCK(pmap); 4829 rw_rlock(&pvh_global_lock); 4830 /* Repeat sva. */ 4831 goto resume; 4832 } 4833 } 4834 if (!pmap_demote_pde(pmap, pde, sva)) 4835 panic("pmap_unwire: demotion failed"); 4836 } 4837 } 4838 if (va_next > eva) 4839 va_next = eva; 4840 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 4841 sva += PAGE_SIZE) { 4842 if ((*pte & PG_V) == 0) 4843 continue; 4844 if ((*pte & PG_W) == 0) 4845 panic("pmap_unwire: pte %#jx is missing PG_W", 4846 (uintmax_t)*pte); 4847 4848 /* 4849 * PG_W must be cleared atomically. Although the pmap 4850 * lock synchronizes access to PG_W, another processor 4851 * could be setting PG_M and/or PG_A concurrently. 4852 */ 4853 atomic_clear_long(pte, PG_W); 4854 pmap->pm_stats.wired_count--; 4855 } 4856 } 4857 if (pv_lists_locked) 4858 rw_runlock(&pvh_global_lock); 4859 PMAP_UNLOCK(pmap); 4860} 4861 4862/* 4863 * Copy the range specified by src_addr/len 4864 * from the source map to the range dst_addr/len 4865 * in the destination map. 4866 * 4867 * This routine is only advisory and need not do anything. 4868 */ 4869 4870void 4871pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 4872 vm_offset_t src_addr) 4873{ 4874 struct rwlock *lock; 4875 struct spglist free; 4876 vm_offset_t addr; 4877 vm_offset_t end_addr = src_addr + len; 4878 vm_offset_t va_next; 4879 pt_entry_t PG_A, PG_M, PG_V; 4880 4881 if (dst_addr != src_addr) 4882 return; 4883 4884 if (dst_pmap->pm_type != src_pmap->pm_type) 4885 return; 4886 4887 /* 4888 * EPT page table entries that require emulation of A/D bits are 4889 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although 4890 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit 4891 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT 4892 * implementations flag an EPT misconfiguration for exec-only 4893 * mappings we skip this function entirely for emulated pmaps. 4894 */ 4895 if (pmap_emulate_ad_bits(dst_pmap)) 4896 return; 4897 4898 lock = NULL; 4899 rw_rlock(&pvh_global_lock); 4900 if (dst_pmap < src_pmap) { 4901 PMAP_LOCK(dst_pmap); 4902 PMAP_LOCK(src_pmap); 4903 } else { 4904 PMAP_LOCK(src_pmap); 4905 PMAP_LOCK(dst_pmap); 4906 } 4907 4908 PG_A = pmap_accessed_bit(dst_pmap); 4909 PG_M = pmap_modified_bit(dst_pmap); 4910 PG_V = pmap_valid_bit(dst_pmap); 4911 4912 for (addr = src_addr; addr < end_addr; addr = va_next) { 4913 pt_entry_t *src_pte, *dst_pte; 4914 vm_page_t dstmpde, dstmpte, srcmpte; 4915 pml4_entry_t *pml4e; 4916 pdp_entry_t *pdpe; 4917 pd_entry_t srcptepaddr, *pde; 4918 4919 KASSERT(addr < UPT_MIN_ADDRESS, 4920 ("pmap_copy: invalid to pmap_copy page tables")); 4921 4922 pml4e = pmap_pml4e(src_pmap, addr); 4923 if ((*pml4e & PG_V) == 0) { 4924 va_next = (addr + NBPML4) & ~PML4MASK; 4925 if (va_next < addr) 4926 va_next = end_addr; 4927 continue; 4928 } 4929 4930 pdpe = pmap_pml4e_to_pdpe(pml4e, addr); 4931 if ((*pdpe & PG_V) == 0) { 4932 va_next = (addr + NBPDP) & ~PDPMASK; 4933 if (va_next < addr) 4934 va_next = end_addr; 4935 continue; 4936 } 4937 4938 va_next = (addr + NBPDR) & ~PDRMASK; 4939 if (va_next < addr) 4940 va_next = end_addr; 4941 4942 pde = pmap_pdpe_to_pde(pdpe, addr); 4943 srcptepaddr = *pde; 4944 if (srcptepaddr == 0) 4945 continue; 4946 4947 if (srcptepaddr & PG_PS) { 4948 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) 4949 continue; 4950 dstmpde = pmap_allocpde(dst_pmap, addr, NULL); 4951 if (dstmpde == NULL) 4952 break; 4953 pde = (pd_entry_t *) 4954 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde)); 4955 pde = &pde[pmap_pde_index(addr)]; 4956 if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 || 4957 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr & 4958 PG_PS_FRAME, &lock))) { 4959 *pde = srcptepaddr & ~PG_W; 4960 pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE); 4961 } else 4962 dstmpde->wire_count--; 4963 continue; 4964 } 4965 4966 srcptepaddr &= PG_FRAME; 4967 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr); 4968 KASSERT(srcmpte->wire_count > 0, 4969 ("pmap_copy: source page table page is unused")); 4970 4971 if (va_next > end_addr) 4972 va_next = end_addr; 4973 4974 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr); 4975 src_pte = &src_pte[pmap_pte_index(addr)]; 4976 dstmpte = NULL; 4977 while (addr < va_next) { 4978 pt_entry_t ptetemp; 4979 ptetemp = *src_pte; 4980 /* 4981 * we only virtual copy managed pages 4982 */ 4983 if ((ptetemp & PG_MANAGED) != 0) { 4984 if (dstmpte != NULL && 4985 dstmpte->pindex == pmap_pde_pindex(addr)) 4986 dstmpte->wire_count++; 4987 else if ((dstmpte = pmap_allocpte(dst_pmap, 4988 addr, NULL)) == NULL) 4989 goto out; 4990 dst_pte = (pt_entry_t *) 4991 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); 4992 dst_pte = &dst_pte[pmap_pte_index(addr)]; 4993 if (*dst_pte == 0 && 4994 pmap_try_insert_pv_entry(dst_pmap, addr, 4995 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), 4996 &lock)) { 4997 /* 4998 * Clear the wired, modified, and 4999 * accessed (referenced) bits 5000 * during the copy. 5001 */ 5002 *dst_pte = ptetemp & ~(PG_W | PG_M | 5003 PG_A); 5004 pmap_resident_count_inc(dst_pmap, 1); 5005 } else { 5006 SLIST_INIT(&free); 5007 if (pmap_unwire_ptp(dst_pmap, addr, 5008 dstmpte, &free)) { 5009 pmap_invalidate_page(dst_pmap, 5010 addr); 5011 pmap_free_zero_pages(&free); 5012 } 5013 goto out; 5014 } 5015 if (dstmpte->wire_count >= srcmpte->wire_count) 5016 break; 5017 } 5018 addr += PAGE_SIZE; 5019 src_pte++; 5020 } 5021 } 5022out: 5023 if (lock != NULL) 5024 rw_wunlock(lock); 5025 rw_runlock(&pvh_global_lock); 5026 PMAP_UNLOCK(src_pmap); 5027 PMAP_UNLOCK(dst_pmap); 5028} 5029 5030/* 5031 * pmap_zero_page zeros the specified hardware page by mapping 5032 * the page into KVM and using bzero to clear its contents. 5033 */ 5034void 5035pmap_zero_page(vm_page_t m) 5036{ 5037 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 5038 5039 pagezero((void *)va); 5040} 5041 5042/* 5043 * pmap_zero_page_area zeros the specified hardware page by mapping 5044 * the page into KVM and using bzero to clear its contents. 5045 * 5046 * off and size may not cover an area beyond a single hardware page. 5047 */ 5048void 5049pmap_zero_page_area(vm_page_t m, int off, int size) 5050{ 5051 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 5052 5053 if (off == 0 && size == PAGE_SIZE) 5054 pagezero((void *)va); 5055 else 5056 bzero((char *)va + off, size); 5057} 5058 5059/* 5060 * pmap_zero_page_idle zeros the specified hardware page by mapping 5061 * the page into KVM and using bzero to clear its contents. This 5062 * is intended to be called from the vm_pagezero process only and 5063 * outside of Giant. 5064 */ 5065void 5066pmap_zero_page_idle(vm_page_t m) 5067{ 5068 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 5069 5070 pagezero((void *)va); 5071} 5072 5073/* 5074 * pmap_copy_page copies the specified (machine independent) 5075 * page by mapping the page into virtual memory and using 5076 * bcopy to copy the page, one machine dependent page at a 5077 * time. 5078 */ 5079void 5080pmap_copy_page(vm_page_t msrc, vm_page_t mdst) 5081{ 5082 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); 5083 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); 5084 5085 pagecopy((void *)src, (void *)dst); 5086} 5087 5088int unmapped_buf_allowed = 1; 5089 5090void 5091pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 5092 vm_offset_t b_offset, int xfersize) 5093{ 5094 void *a_cp, *b_cp; 5095 vm_page_t m_a, m_b; 5096 vm_paddr_t p_a, p_b; 5097 pt_entry_t *pte; 5098 vm_offset_t a_pg_offset, b_pg_offset; 5099 int cnt; 5100 boolean_t pinned; 5101 5102 /* 5103 * NB: The sequence of updating a page table followed by accesses 5104 * to the corresponding pages used in the !DMAP case is subject to 5105 * the situation described in the "AMD64 Architecture Programmer's 5106 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special 5107 * Coherency Considerations". Therefore, issuing the INVLPG right 5108 * after modifying the PTE bits is crucial. 5109 */ 5110 pinned = FALSE; 5111 while (xfersize > 0) { 5112 a_pg_offset = a_offset & PAGE_MASK; 5113 m_a = ma[a_offset >> PAGE_SHIFT]; 5114 p_a = m_a->phys_addr; 5115 b_pg_offset = b_offset & PAGE_MASK; 5116 m_b = mb[b_offset >> PAGE_SHIFT]; 5117 p_b = m_b->phys_addr; 5118 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 5119 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 5120 if (__predict_false(p_a < DMAP_MIN_ADDRESS || 5121 p_a > DMAP_MIN_ADDRESS + dmaplimit)) { 5122 mtx_lock(&cpage_lock); 5123 sched_pin(); 5124 pinned = TRUE; 5125 pte = vtopte(cpage_a); 5126 *pte = p_a | X86_PG_A | X86_PG_V | 5127 pmap_cache_bits(kernel_pmap, m_a->md.pat_mode, 0); 5128 invlpg(cpage_a); 5129 a_cp = (char *)cpage_a + a_pg_offset; 5130 } else { 5131 a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset; 5132 } 5133 if (__predict_false(p_b < DMAP_MIN_ADDRESS || 5134 p_b > DMAP_MIN_ADDRESS + dmaplimit)) { 5135 if (!pinned) { 5136 mtx_lock(&cpage_lock); 5137 sched_pin(); 5138 pinned = TRUE; 5139 } 5140 pte = vtopte(cpage_b); 5141 *pte = p_b | X86_PG_A | X86_PG_M | X86_PG_RW | 5142 X86_PG_V | pmap_cache_bits(kernel_pmap, 5143 m_b->md.pat_mode, 0); 5144 invlpg(cpage_b); 5145 b_cp = (char *)cpage_b + b_pg_offset; 5146 } else { 5147 b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset; 5148 } 5149 bcopy(a_cp, b_cp, cnt); 5150 if (__predict_false(pinned)) { 5151 sched_unpin(); 5152 mtx_unlock(&cpage_lock); 5153 pinned = FALSE; 5154 } 5155 a_offset += cnt; 5156 b_offset += cnt; 5157 xfersize -= cnt; 5158 } 5159} 5160 5161/* 5162 * Returns true if the pmap's pv is one of the first 5163 * 16 pvs linked to from this page. This count may 5164 * be changed upwards or downwards in the future; it 5165 * is only necessary that true be returned for a small 5166 * subset of pmaps for proper page aging. 5167 */ 5168boolean_t 5169pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 5170{ 5171 struct md_page *pvh; 5172 struct rwlock *lock; 5173 pv_entry_t pv; 5174 int loops = 0; 5175 boolean_t rv; 5176 5177 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5178 ("pmap_page_exists_quick: page %p is not managed", m)); 5179 rv = FALSE; 5180 rw_rlock(&pvh_global_lock); 5181 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5182 rw_rlock(lock); 5183 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5184 if (PV_PMAP(pv) == pmap) { 5185 rv = TRUE; 5186 break; 5187 } 5188 loops++; 5189 if (loops >= 16) 5190 break; 5191 } 5192 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 5193 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5194 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5195 if (PV_PMAP(pv) == pmap) { 5196 rv = TRUE; 5197 break; 5198 } 5199 loops++; 5200 if (loops >= 16) 5201 break; 5202 } 5203 } 5204 rw_runlock(lock); 5205 rw_runlock(&pvh_global_lock); 5206 return (rv); 5207} 5208 5209/* 5210 * pmap_page_wired_mappings: 5211 * 5212 * Return the number of managed mappings to the given physical page 5213 * that are wired. 5214 */ 5215int 5216pmap_page_wired_mappings(vm_page_t m) 5217{ 5218 struct rwlock *lock; 5219 struct md_page *pvh; 5220 pmap_t pmap; 5221 pt_entry_t *pte; 5222 pv_entry_t pv; 5223 int count, md_gen, pvh_gen; 5224 5225 if ((m->oflags & VPO_UNMANAGED) != 0) 5226 return (0); 5227 rw_rlock(&pvh_global_lock); 5228 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5229 rw_rlock(lock); 5230restart: 5231 count = 0; 5232 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5233 pmap = PV_PMAP(pv); 5234 if (!PMAP_TRYLOCK(pmap)) { 5235 md_gen = m->md.pv_gen; 5236 rw_runlock(lock); 5237 PMAP_LOCK(pmap); 5238 rw_rlock(lock); 5239 if (md_gen != m->md.pv_gen) { 5240 PMAP_UNLOCK(pmap); 5241 goto restart; 5242 } 5243 } 5244 pte = pmap_pte(pmap, pv->pv_va); 5245 if ((*pte & PG_W) != 0) 5246 count++; 5247 PMAP_UNLOCK(pmap); 5248 } 5249 if ((m->flags & PG_FICTITIOUS) == 0) { 5250 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5251 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5252 pmap = PV_PMAP(pv); 5253 if (!PMAP_TRYLOCK(pmap)) { 5254 md_gen = m->md.pv_gen; 5255 pvh_gen = pvh->pv_gen; 5256 rw_runlock(lock); 5257 PMAP_LOCK(pmap); 5258 rw_rlock(lock); 5259 if (md_gen != m->md.pv_gen || 5260 pvh_gen != pvh->pv_gen) { 5261 PMAP_UNLOCK(pmap); 5262 goto restart; 5263 } 5264 } 5265 pte = pmap_pde(pmap, pv->pv_va); 5266 if ((*pte & PG_W) != 0) 5267 count++; 5268 PMAP_UNLOCK(pmap); 5269 } 5270 } 5271 rw_runlock(lock); 5272 rw_runlock(&pvh_global_lock); 5273 return (count); 5274} 5275 5276/* 5277 * Returns TRUE if the given page is mapped individually or as part of 5278 * a 2mpage. Otherwise, returns FALSE. 5279 */ 5280boolean_t 5281pmap_page_is_mapped(vm_page_t m) 5282{ 5283 struct rwlock *lock; 5284 boolean_t rv; 5285 5286 if ((m->oflags & VPO_UNMANAGED) != 0) 5287 return (FALSE); 5288 rw_rlock(&pvh_global_lock); 5289 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5290 rw_rlock(lock); 5291 rv = !TAILQ_EMPTY(&m->md.pv_list) || 5292 ((m->flags & PG_FICTITIOUS) == 0 && 5293 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 5294 rw_runlock(lock); 5295 rw_runlock(&pvh_global_lock); 5296 return (rv); 5297} 5298 5299/* 5300 * Destroy all managed, non-wired mappings in the given user-space 5301 * pmap. This pmap cannot be active on any processor besides the 5302 * caller. 5303 * 5304 * This function cannot be applied to the kernel pmap. Moreover, it 5305 * is not intended for general use. It is only to be used during 5306 * process termination. Consequently, it can be implemented in ways 5307 * that make it faster than pmap_remove(). First, it can more quickly 5308 * destroy mappings by iterating over the pmap's collection of PV 5309 * entries, rather than searching the page table. Second, it doesn't 5310 * have to test and clear the page table entries atomically, because 5311 * no processor is currently accessing the user address space. In 5312 * particular, a page table entry's dirty bit won't change state once 5313 * this function starts. 5314 */ 5315void 5316pmap_remove_pages(pmap_t pmap) 5317{ 5318 pd_entry_t ptepde; 5319 pt_entry_t *pte, tpte; 5320 pt_entry_t PG_M, PG_RW, PG_V; 5321 struct spglist free; 5322 vm_page_t m, mpte, mt; 5323 pv_entry_t pv; 5324 struct md_page *pvh; 5325 struct pv_chunk *pc, *npc; 5326 struct rwlock *lock; 5327 int64_t bit; 5328 uint64_t inuse, bitmask; 5329 int allfree, field, freed, idx; 5330 boolean_t superpage; 5331 vm_paddr_t pa; 5332 5333 /* 5334 * Assert that the given pmap is only active on the current 5335 * CPU. Unfortunately, we cannot block another CPU from 5336 * activating the pmap while this function is executing. 5337 */ 5338 KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap)); 5339#ifdef INVARIANTS 5340 { 5341 cpuset_t other_cpus; 5342 5343 other_cpus = all_cpus; 5344 critical_enter(); 5345 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 5346 CPU_AND(&other_cpus, &pmap->pm_active); 5347 critical_exit(); 5348 KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap)); 5349 } 5350#endif 5351 5352 lock = NULL; 5353 PG_M = pmap_modified_bit(pmap); 5354 PG_V = pmap_valid_bit(pmap); 5355 PG_RW = pmap_rw_bit(pmap); 5356 5357 SLIST_INIT(&free); 5358 rw_rlock(&pvh_global_lock); 5359 PMAP_LOCK(pmap); 5360 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 5361 allfree = 1; 5362 freed = 0; 5363 for (field = 0; field < _NPCM; field++) { 5364 inuse = ~pc->pc_map[field] & pc_freemask[field]; 5365 while (inuse != 0) { 5366 bit = bsfq(inuse); 5367 bitmask = 1UL << bit; 5368 idx = field * 64 + bit; 5369 pv = &pc->pc_pventry[idx]; 5370 inuse &= ~bitmask; 5371 5372 pte = pmap_pdpe(pmap, pv->pv_va); 5373 ptepde = *pte; 5374 pte = pmap_pdpe_to_pde(pte, pv->pv_va); 5375 tpte = *pte; 5376 if ((tpte & (PG_PS | PG_V)) == PG_V) { 5377 superpage = FALSE; 5378 ptepde = tpte; 5379 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte & 5380 PG_FRAME); 5381 pte = &pte[pmap_pte_index(pv->pv_va)]; 5382 tpte = *pte; 5383 } else { 5384 /* 5385 * Keep track whether 'tpte' is a 5386 * superpage explicitly instead of 5387 * relying on PG_PS being set. 5388 * 5389 * This is because PG_PS is numerically 5390 * identical to PG_PTE_PAT and thus a 5391 * regular page could be mistaken for 5392 * a superpage. 5393 */ 5394 superpage = TRUE; 5395 } 5396 5397 if ((tpte & PG_V) == 0) { 5398 panic("bad pte va %lx pte %lx", 5399 pv->pv_va, tpte); 5400 } 5401 5402/* 5403 * We cannot remove wired pages from a process' mapping at this time 5404 */ 5405 if (tpte & PG_W) { 5406 allfree = 0; 5407 continue; 5408 } 5409 5410 if (superpage) 5411 pa = tpte & PG_PS_FRAME; 5412 else 5413 pa = tpte & PG_FRAME; 5414 5415 m = PHYS_TO_VM_PAGE(pa); 5416 KASSERT(m->phys_addr == pa, 5417 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 5418 m, (uintmax_t)m->phys_addr, 5419 (uintmax_t)tpte)); 5420 5421 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 5422 m < &vm_page_array[vm_page_array_size], 5423 ("pmap_remove_pages: bad tpte %#jx", 5424 (uintmax_t)tpte)); 5425 5426 pte_clear(pte); 5427 5428 /* 5429 * Update the vm_page_t clean/reference bits. 5430 */ 5431 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5432 if (superpage) { 5433 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 5434 vm_page_dirty(mt); 5435 } else 5436 vm_page_dirty(m); 5437 } 5438 5439 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); 5440 5441 /* Mark free */ 5442 pc->pc_map[field] |= bitmask; 5443 if (superpage) { 5444 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE); 5445 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 5446 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5447 pvh->pv_gen++; 5448 if (TAILQ_EMPTY(&pvh->pv_list)) { 5449 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 5450 if ((mt->aflags & PGA_WRITEABLE) != 0 && 5451 TAILQ_EMPTY(&mt->md.pv_list)) 5452 vm_page_aflag_clear(mt, PGA_WRITEABLE); 5453 } 5454 mpte = pmap_lookup_pt_page(pmap, pv->pv_va); 5455 if (mpte != NULL) { 5456 pmap_remove_pt_page(pmap, mpte); 5457 pmap_resident_count_dec(pmap, 1); 5458 KASSERT(mpte->wire_count == NPTEPG, 5459 ("pmap_remove_pages: pte page wire count error")); 5460 mpte->wire_count = 0; 5461 pmap_add_delayed_free_list(mpte, &free, FALSE); 5462 atomic_subtract_int(&cnt.v_wire_count, 1); 5463 } 5464 } else { 5465 pmap_resident_count_dec(pmap, 1); 5466 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5467 m->md.pv_gen++; 5468 if ((m->aflags & PGA_WRITEABLE) != 0 && 5469 TAILQ_EMPTY(&m->md.pv_list) && 5470 (m->flags & PG_FICTITIOUS) == 0) { 5471 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5472 if (TAILQ_EMPTY(&pvh->pv_list)) 5473 vm_page_aflag_clear(m, PGA_WRITEABLE); 5474 } 5475 } 5476 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free); 5477 freed++; 5478 } 5479 } 5480 PV_STAT(atomic_add_long(&pv_entry_frees, freed)); 5481 PV_STAT(atomic_add_int(&pv_entry_spare, freed)); 5482 PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); 5483 if (allfree) { 5484 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 5485 free_pv_chunk(pc); 5486 } 5487 } 5488 if (lock != NULL) 5489 rw_wunlock(lock); 5490 pmap_invalidate_all(pmap); 5491 rw_runlock(&pvh_global_lock); 5492 PMAP_UNLOCK(pmap); 5493 pmap_free_zero_pages(&free); 5494} 5495 5496static boolean_t 5497pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) 5498{ 5499 struct rwlock *lock; 5500 pv_entry_t pv; 5501 struct md_page *pvh; 5502 pt_entry_t *pte, mask; 5503 pt_entry_t PG_A, PG_M, PG_RW, PG_V; 5504 pmap_t pmap; 5505 int md_gen, pvh_gen; 5506 boolean_t rv; 5507 5508 rv = FALSE; 5509 rw_rlock(&pvh_global_lock); 5510 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5511 rw_rlock(lock); 5512restart: 5513 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5514 pmap = PV_PMAP(pv); 5515 if (!PMAP_TRYLOCK(pmap)) { 5516 md_gen = m->md.pv_gen; 5517 rw_runlock(lock); 5518 PMAP_LOCK(pmap); 5519 rw_rlock(lock); 5520 if (md_gen != m->md.pv_gen) { 5521 PMAP_UNLOCK(pmap); 5522 goto restart; 5523 } 5524 } 5525 pte = pmap_pte(pmap, pv->pv_va); 5526 mask = 0; 5527 if (modified) { 5528 PG_M = pmap_modified_bit(pmap); 5529 PG_RW = pmap_rw_bit(pmap); 5530 mask |= PG_RW | PG_M; 5531 } 5532 if (accessed) { 5533 PG_A = pmap_accessed_bit(pmap); 5534 PG_V = pmap_valid_bit(pmap); 5535 mask |= PG_V | PG_A; 5536 } 5537 rv = (*pte & mask) == mask; 5538 PMAP_UNLOCK(pmap); 5539 if (rv) 5540 goto out; 5541 } 5542 if ((m->flags & PG_FICTITIOUS) == 0) { 5543 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5544 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5545 pmap = PV_PMAP(pv); 5546 if (!PMAP_TRYLOCK(pmap)) { 5547 md_gen = m->md.pv_gen; 5548 pvh_gen = pvh->pv_gen; 5549 rw_runlock(lock); 5550 PMAP_LOCK(pmap); 5551 rw_rlock(lock); 5552 if (md_gen != m->md.pv_gen || 5553 pvh_gen != pvh->pv_gen) { 5554 PMAP_UNLOCK(pmap); 5555 goto restart; 5556 } 5557 } 5558 pte = pmap_pde(pmap, pv->pv_va); 5559 mask = 0; 5560 if (modified) { 5561 PG_M = pmap_modified_bit(pmap); 5562 PG_RW = pmap_rw_bit(pmap); 5563 mask |= PG_RW | PG_M; 5564 } 5565 if (accessed) { 5566 PG_A = pmap_accessed_bit(pmap); 5567 PG_V = pmap_valid_bit(pmap); 5568 mask |= PG_V | PG_A; 5569 } 5570 rv = (*pte & mask) == mask; 5571 PMAP_UNLOCK(pmap); 5572 if (rv) 5573 goto out; 5574 } 5575 } 5576out: 5577 rw_runlock(lock); 5578 rw_runlock(&pvh_global_lock); 5579 return (rv); 5580} 5581 5582/* 5583 * pmap_is_modified: 5584 * 5585 * Return whether or not the specified physical page was modified 5586 * in any physical maps. 5587 */ 5588boolean_t 5589pmap_is_modified(vm_page_t m) 5590{ 5591 5592 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5593 ("pmap_is_modified: page %p is not managed", m)); 5594 5595 /* 5596 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5597 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 5598 * is clear, no PTEs can have PG_M set. 5599 */ 5600 VM_OBJECT_ASSERT_WLOCKED(m->object); 5601 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5602 return (FALSE); 5603 return (pmap_page_test_mappings(m, FALSE, TRUE)); 5604} 5605 5606/* 5607 * pmap_is_prefaultable: 5608 * 5609 * Return whether or not the specified virtual address is eligible 5610 * for prefault. 5611 */ 5612boolean_t 5613pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 5614{ 5615 pd_entry_t *pde; 5616 pt_entry_t *pte, PG_V; 5617 boolean_t rv; 5618 5619 PG_V = pmap_valid_bit(pmap); 5620 rv = FALSE; 5621 PMAP_LOCK(pmap); 5622 pde = pmap_pde(pmap, addr); 5623 if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) { 5624 pte = pmap_pde_to_pte(pde, addr); 5625 rv = (*pte & PG_V) == 0; 5626 } 5627 PMAP_UNLOCK(pmap); 5628 return (rv); 5629} 5630 5631/* 5632 * pmap_is_referenced: 5633 * 5634 * Return whether or not the specified physical page was referenced 5635 * in any physical maps. 5636 */ 5637boolean_t 5638pmap_is_referenced(vm_page_t m) 5639{ 5640 5641 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5642 ("pmap_is_referenced: page %p is not managed", m)); 5643 return (pmap_page_test_mappings(m, TRUE, FALSE)); 5644} 5645 5646/* 5647 * Clear the write and modified bits in each of the given page's mappings. 5648 */ 5649void 5650pmap_remove_write(vm_page_t m) 5651{ 5652 struct md_page *pvh; 5653 pmap_t pmap; 5654 struct rwlock *lock; 5655 pv_entry_t next_pv, pv; 5656 pd_entry_t *pde; 5657 pt_entry_t oldpte, *pte, PG_M, PG_RW; 5658 vm_offset_t va; 5659 int pvh_gen, md_gen; 5660 5661 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5662 ("pmap_remove_write: page %p is not managed", m)); 5663 5664 /* 5665 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5666 * set by another thread while the object is locked. Thus, 5667 * if PGA_WRITEABLE is clear, no page table entries need updating. 5668 */ 5669 VM_OBJECT_ASSERT_WLOCKED(m->object); 5670 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5671 return; 5672 rw_rlock(&pvh_global_lock); 5673 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5674 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5675retry_pv_loop: 5676 rw_wlock(lock); 5677 if ((m->flags & PG_FICTITIOUS) != 0) 5678 goto small_mappings; 5679 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5680 pmap = PV_PMAP(pv); 5681 if (!PMAP_TRYLOCK(pmap)) { 5682 pvh_gen = pvh->pv_gen; 5683 rw_wunlock(lock); 5684 PMAP_LOCK(pmap); 5685 rw_wlock(lock); 5686 if (pvh_gen != pvh->pv_gen) { 5687 PMAP_UNLOCK(pmap); 5688 rw_wunlock(lock); 5689 goto retry_pv_loop; 5690 } 5691 } 5692 PG_RW = pmap_rw_bit(pmap); 5693 va = pv->pv_va; 5694 pde = pmap_pde(pmap, va); 5695 if ((*pde & PG_RW) != 0) 5696 (void)pmap_demote_pde_locked(pmap, pde, va, &lock); 5697 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), 5698 ("inconsistent pv lock %p %p for page %p", 5699 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); 5700 PMAP_UNLOCK(pmap); 5701 } 5702small_mappings: 5703 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5704 pmap = PV_PMAP(pv); 5705 if (!PMAP_TRYLOCK(pmap)) { 5706 pvh_gen = pvh->pv_gen; 5707 md_gen = m->md.pv_gen; 5708 rw_wunlock(lock); 5709 PMAP_LOCK(pmap); 5710 rw_wlock(lock); 5711 if (pvh_gen != pvh->pv_gen || 5712 md_gen != m->md.pv_gen) { 5713 PMAP_UNLOCK(pmap); 5714 rw_wunlock(lock); 5715 goto retry_pv_loop; 5716 } 5717 } 5718 PG_M = pmap_modified_bit(pmap); 5719 PG_RW = pmap_rw_bit(pmap); 5720 pde = pmap_pde(pmap, pv->pv_va); 5721 KASSERT((*pde & PG_PS) == 0, 5722 ("pmap_remove_write: found a 2mpage in page %p's pv list", 5723 m)); 5724 pte = pmap_pde_to_pte(pde, pv->pv_va); 5725retry: 5726 oldpte = *pte; 5727 if (oldpte & PG_RW) { 5728 if (!atomic_cmpset_long(pte, oldpte, oldpte & 5729 ~(PG_RW | PG_M))) 5730 goto retry; 5731 if ((oldpte & PG_M) != 0) 5732 vm_page_dirty(m); 5733 pmap_invalidate_page(pmap, pv->pv_va); 5734 } 5735 PMAP_UNLOCK(pmap); 5736 } 5737 rw_wunlock(lock); 5738 vm_page_aflag_clear(m, PGA_WRITEABLE); 5739 rw_runlock(&pvh_global_lock); 5740} 5741 5742static __inline boolean_t 5743safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) 5744{ 5745 5746 if (!pmap_emulate_ad_bits(pmap)) 5747 return (TRUE); 5748 5749 KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type)); 5750 5751 /* 5752 * RWX = 010 or 110 will cause an unconditional EPT misconfiguration 5753 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared 5754 * if the EPT_PG_WRITE bit is set. 5755 */ 5756 if ((pte & EPT_PG_WRITE) != 0) 5757 return (FALSE); 5758 5759 /* 5760 * RWX = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set. 5761 */ 5762 if ((pte & EPT_PG_EXECUTE) == 0 || 5763 ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0)) 5764 return (TRUE); 5765 else 5766 return (FALSE); 5767} 5768 5769#define PMAP_TS_REFERENCED_MAX 5 5770 5771/* 5772 * pmap_ts_referenced: 5773 * 5774 * Return a count of reference bits for a page, clearing those bits. 5775 * It is not necessary for every reference bit to be cleared, but it 5776 * is necessary that 0 only be returned when there are truly no 5777 * reference bits set. 5778 * 5779 * XXX: The exact number of bits to check and clear is a matter that 5780 * should be tested and standardized at some point in the future for 5781 * optimal aging of shared pages. 5782 */ 5783int 5784pmap_ts_referenced(vm_page_t m) 5785{ 5786 struct md_page *pvh; 5787 pv_entry_t pv, pvf; 5788 pmap_t pmap; 5789 struct rwlock *lock; 5790 pd_entry_t oldpde, *pde; 5791 pt_entry_t *pte, PG_A; 5792 vm_offset_t va; 5793 vm_paddr_t pa; 5794 int cleared, md_gen, not_cleared, pvh_gen; 5795 struct spglist free; 5796 boolean_t demoted; 5797 5798 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5799 ("pmap_ts_referenced: page %p is not managed", m)); 5800 SLIST_INIT(&free); 5801 cleared = 0; 5802 pa = VM_PAGE_TO_PHYS(m); 5803 lock = PHYS_TO_PV_LIST_LOCK(pa); 5804 pvh = pa_to_pvh(pa); 5805 rw_rlock(&pvh_global_lock); 5806 rw_wlock(lock); 5807retry: 5808 not_cleared = 0; 5809 if ((m->flags & PG_FICTITIOUS) != 0 || 5810 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5811 goto small_mappings; 5812 pv = pvf; 5813 do { 5814 if (pvf == NULL) 5815 pvf = pv; 5816 pmap = PV_PMAP(pv); 5817 if (!PMAP_TRYLOCK(pmap)) { 5818 pvh_gen = pvh->pv_gen; 5819 rw_wunlock(lock); 5820 PMAP_LOCK(pmap); 5821 rw_wlock(lock); 5822 if (pvh_gen != pvh->pv_gen) { 5823 PMAP_UNLOCK(pmap); 5824 goto retry; 5825 } 5826 } 5827 PG_A = pmap_accessed_bit(pmap); 5828 va = pv->pv_va; 5829 pde = pmap_pde(pmap, pv->pv_va); 5830 oldpde = *pde; 5831 if ((*pde & PG_A) != 0) { 5832 /* 5833 * Since this reference bit is shared by 512 4KB 5834 * pages, it should not be cleared every time it is 5835 * tested. Apply a simple "hash" function on the 5836 * physical page number, the virtual superpage number, 5837 * and the pmap address to select one 4KB page out of 5838 * the 512 on which testing the reference bit will 5839 * result in clearing that reference bit. This 5840 * function is designed to avoid the selection of the 5841 * same 4KB page for every 2MB page mapping. 5842 * 5843 * On demotion, a mapping that hasn't been referenced 5844 * is simply destroyed. To avoid the possibility of a 5845 * subsequent page fault on a demoted wired mapping, 5846 * always leave its reference bit set. Moreover, 5847 * since the superpage is wired, the current state of 5848 * its reference bit won't affect page replacement. 5849 */ 5850 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ 5851 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 5852 (*pde & PG_W) == 0) { 5853 if (safe_to_clear_referenced(pmap, oldpde)) { 5854 atomic_clear_long(pde, PG_A); 5855 pmap_invalidate_page(pmap, pv->pv_va); 5856 demoted = FALSE; 5857 } else if (pmap_demote_pde_locked(pmap, pde, 5858 pv->pv_va, &lock)) { 5859 /* 5860 * Remove the mapping to a single page 5861 * so that a subsequent access may 5862 * repromote. Since the underlying 5863 * page table page is fully populated, 5864 * this removal never frees a page 5865 * table page. 5866 */ 5867 demoted = TRUE; 5868 va += VM_PAGE_TO_PHYS(m) - (oldpde & 5869 PG_PS_FRAME); 5870 pte = pmap_pde_to_pte(pde, va); 5871 pmap_remove_pte(pmap, pte, va, *pde, 5872 NULL, &lock); 5873 pmap_invalidate_page(pmap, va); 5874 } else 5875 demoted = TRUE; 5876 5877 if (demoted) { 5878 /* 5879 * The superpage mapping was removed 5880 * entirely and therefore 'pv' is no 5881 * longer valid. 5882 */ 5883 if (pvf == pv) 5884 pvf = NULL; 5885 pv = NULL; 5886 } 5887 cleared++; 5888 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), 5889 ("inconsistent pv lock %p %p for page %p", 5890 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); 5891 } else 5892 not_cleared++; 5893 } 5894 PMAP_UNLOCK(pmap); 5895 /* Rotate the PV list if it has more than one entry. */ 5896 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { 5897 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5898 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5899 pvh->pv_gen++; 5900 } 5901 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX) 5902 goto out; 5903 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5904small_mappings: 5905 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5906 goto out; 5907 pv = pvf; 5908 do { 5909 if (pvf == NULL) 5910 pvf = pv; 5911 pmap = PV_PMAP(pv); 5912 if (!PMAP_TRYLOCK(pmap)) { 5913 pvh_gen = pvh->pv_gen; 5914 md_gen = m->md.pv_gen; 5915 rw_wunlock(lock); 5916 PMAP_LOCK(pmap); 5917 rw_wlock(lock); 5918 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 5919 PMAP_UNLOCK(pmap); 5920 goto retry; 5921 } 5922 } 5923 PG_A = pmap_accessed_bit(pmap); 5924 pde = pmap_pde(pmap, pv->pv_va); 5925 KASSERT((*pde & PG_PS) == 0, 5926 ("pmap_ts_referenced: found a 2mpage in page %p's pv list", 5927 m)); 5928 pte = pmap_pde_to_pte(pde, pv->pv_va); 5929 if ((*pte & PG_A) != 0) { 5930 if (safe_to_clear_referenced(pmap, *pte)) { 5931 atomic_clear_long(pte, PG_A); 5932 pmap_invalidate_page(pmap, pv->pv_va); 5933 cleared++; 5934 } else if ((*pte & PG_W) == 0) { 5935 /* 5936 * Wired pages cannot be paged out so 5937 * doing accessed bit emulation for 5938 * them is wasted effort. We do the 5939 * hard work for unwired pages only. 5940 */ 5941 pmap_remove_pte(pmap, pte, pv->pv_va, 5942 *pde, &free, &lock); 5943 pmap_invalidate_page(pmap, pv->pv_va); 5944 cleared++; 5945 if (pvf == pv) 5946 pvf = NULL; 5947 pv = NULL; 5948 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), 5949 ("inconsistent pv lock %p %p for page %p", 5950 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); 5951 } else 5952 not_cleared++; 5953 } 5954 PMAP_UNLOCK(pmap); 5955 /* Rotate the PV list if it has more than one entry. */ 5956 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { 5957 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5958 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5959 m->md.pv_gen++; 5960 } 5961 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + 5962 not_cleared < PMAP_TS_REFERENCED_MAX); 5963out: 5964 rw_wunlock(lock); 5965 rw_runlock(&pvh_global_lock); 5966 pmap_free_zero_pages(&free); 5967 return (cleared + not_cleared); 5968} 5969 5970/* 5971 * Apply the given advice to the specified range of addresses within the 5972 * given pmap. Depending on the advice, clear the referenced and/or 5973 * modified flags in each mapping and set the mapped page's dirty field. 5974 */ 5975void 5976pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 5977{ 5978 struct rwlock *lock; 5979 pml4_entry_t *pml4e; 5980 pdp_entry_t *pdpe; 5981 pd_entry_t oldpde, *pde; 5982 pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V; 5983 vm_offset_t va_next; 5984 vm_page_t m; 5985 boolean_t anychanged, pv_lists_locked; 5986 5987 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5988 return; 5989 5990 /* 5991 * A/D bit emulation requires an alternate code path when clearing 5992 * the modified and accessed bits below. Since this function is 5993 * advisory in nature we skip it entirely for pmaps that require 5994 * A/D bit emulation. 5995 */ 5996 if (pmap_emulate_ad_bits(pmap)) 5997 return; 5998 5999 PG_A = pmap_accessed_bit(pmap); 6000 PG_G = pmap_global_bit(pmap); 6001 PG_M = pmap_modified_bit(pmap); 6002 PG_V = pmap_valid_bit(pmap); 6003 PG_RW = pmap_rw_bit(pmap); 6004 6005 pv_lists_locked = FALSE; 6006resume: 6007 anychanged = FALSE; 6008 PMAP_LOCK(pmap); 6009 for (; sva < eva; sva = va_next) { 6010 pml4e = pmap_pml4e(pmap, sva); 6011 if ((*pml4e & PG_V) == 0) { 6012 va_next = (sva + NBPML4) & ~PML4MASK; 6013 if (va_next < sva) 6014 va_next = eva; 6015 continue; 6016 } 6017 pdpe = pmap_pml4e_to_pdpe(pml4e, sva); 6018 if ((*pdpe & PG_V) == 0) { 6019 va_next = (sva + NBPDP) & ~PDPMASK; 6020 if (va_next < sva) 6021 va_next = eva; 6022 continue; 6023 } 6024 va_next = (sva + NBPDR) & ~PDRMASK; 6025 if (va_next < sva) 6026 va_next = eva; 6027 pde = pmap_pdpe_to_pde(pdpe, sva); 6028 oldpde = *pde; 6029 if ((oldpde & PG_V) == 0) 6030 continue; 6031 else if ((oldpde & PG_PS) != 0) { 6032 if ((oldpde & PG_MANAGED) == 0) 6033 continue; 6034 if (!pv_lists_locked) { 6035 pv_lists_locked = TRUE; 6036 if (!rw_try_rlock(&pvh_global_lock)) { 6037 if (anychanged) 6038 pmap_invalidate_all(pmap); 6039 PMAP_UNLOCK(pmap); 6040 rw_rlock(&pvh_global_lock); 6041 goto resume; 6042 } 6043 } 6044 lock = NULL; 6045 if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) { 6046 if (lock != NULL) 6047 rw_wunlock(lock); 6048 6049 /* 6050 * The large page mapping was destroyed. 6051 */ 6052 continue; 6053 } 6054 6055 /* 6056 * Unless the page mappings are wired, remove the 6057 * mapping to a single page so that a subsequent 6058 * access may repromote. Since the underlying page 6059 * table page is fully populated, this removal never 6060 * frees a page table page. 6061 */ 6062 if ((oldpde & PG_W) == 0) { 6063 pte = pmap_pde_to_pte(pde, sva); 6064 KASSERT((*pte & PG_V) != 0, 6065 ("pmap_advise: invalid PTE")); 6066 pmap_remove_pte(pmap, pte, sva, *pde, NULL, 6067 &lock); 6068 anychanged = TRUE; 6069 } 6070 if (lock != NULL) 6071 rw_wunlock(lock); 6072 } 6073 if (va_next > eva) 6074 va_next = eva; 6075 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 6076 sva += PAGE_SIZE) { 6077 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | 6078 PG_V)) 6079 continue; 6080 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 6081 if (advice == MADV_DONTNEED) { 6082 /* 6083 * Future calls to pmap_is_modified() 6084 * can be avoided by making the page 6085 * dirty now. 6086 */ 6087 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 6088 vm_page_dirty(m); 6089 } 6090 atomic_clear_long(pte, PG_M | PG_A); 6091 } else if ((*pte & PG_A) != 0) 6092 atomic_clear_long(pte, PG_A); 6093 else 6094 continue; 6095 if ((*pte & PG_G) != 0) 6096 pmap_invalidate_page(pmap, sva); 6097 else 6098 anychanged = TRUE; 6099 } 6100 } 6101 if (anychanged) 6102 pmap_invalidate_all(pmap); 6103 if (pv_lists_locked) 6104 rw_runlock(&pvh_global_lock); 6105 PMAP_UNLOCK(pmap); 6106} 6107 6108/* 6109 * Clear the modify bits on the specified physical page. 6110 */ 6111void 6112pmap_clear_modify(vm_page_t m) 6113{ 6114 struct md_page *pvh; 6115 pmap_t pmap; 6116 pv_entry_t next_pv, pv; 6117 pd_entry_t oldpde, *pde; 6118 pt_entry_t oldpte, *pte, PG_M, PG_RW, PG_V; 6119 struct rwlock *lock; 6120 vm_offset_t va; 6121 int md_gen, pvh_gen; 6122 6123 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 6124 ("pmap_clear_modify: page %p is not managed", m)); 6125 VM_OBJECT_ASSERT_WLOCKED(m->object); 6126 KASSERT(!vm_page_xbusied(m), 6127 ("pmap_clear_modify: page %p is exclusive busied", m)); 6128 6129 /* 6130 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 6131 * If the object containing the page is locked and the page is not 6132 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 6133 */ 6134 if ((m->aflags & PGA_WRITEABLE) == 0) 6135 return; 6136 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 6137 rw_rlock(&pvh_global_lock); 6138 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 6139 rw_wlock(lock); 6140restart: 6141 if ((m->flags & PG_FICTITIOUS) != 0) 6142 goto small_mappings; 6143 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 6144 pmap = PV_PMAP(pv); 6145 if (!PMAP_TRYLOCK(pmap)) { 6146 pvh_gen = pvh->pv_gen; 6147 rw_wunlock(lock); 6148 PMAP_LOCK(pmap); 6149 rw_wlock(lock); 6150 if (pvh_gen != pvh->pv_gen) { 6151 PMAP_UNLOCK(pmap); 6152 goto restart; 6153 } 6154 } 6155 PG_M = pmap_modified_bit(pmap); 6156 PG_V = pmap_valid_bit(pmap); 6157 PG_RW = pmap_rw_bit(pmap); 6158 va = pv->pv_va; 6159 pde = pmap_pde(pmap, va); 6160 oldpde = *pde; 6161 if ((oldpde & PG_RW) != 0) { 6162 if (pmap_demote_pde_locked(pmap, pde, va, &lock)) { 6163 if ((oldpde & PG_W) == 0) { 6164 /* 6165 * Write protect the mapping to a 6166 * single page so that a subsequent 6167 * write access may repromote. 6168 */ 6169 va += VM_PAGE_TO_PHYS(m) - (oldpde & 6170 PG_PS_FRAME); 6171 pte = pmap_pde_to_pte(pde, va); 6172 oldpte = *pte; 6173 if ((oldpte & PG_V) != 0) { 6174 while (!atomic_cmpset_long(pte, 6175 oldpte, 6176 oldpte & ~(PG_M | PG_RW))) 6177 oldpte = *pte; 6178 vm_page_dirty(m); 6179 pmap_invalidate_page(pmap, va); 6180 } 6181 } 6182 } 6183 } 6184 PMAP_UNLOCK(pmap); 6185 } 6186small_mappings: 6187 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 6188 pmap = PV_PMAP(pv); 6189 if (!PMAP_TRYLOCK(pmap)) { 6190 md_gen = m->md.pv_gen; 6191 pvh_gen = pvh->pv_gen; 6192 rw_wunlock(lock); 6193 PMAP_LOCK(pmap); 6194 rw_wlock(lock); 6195 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 6196 PMAP_UNLOCK(pmap); 6197 goto restart; 6198 } 6199 } 6200 PG_M = pmap_modified_bit(pmap); 6201 PG_RW = pmap_rw_bit(pmap); 6202 pde = pmap_pde(pmap, pv->pv_va); 6203 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" 6204 " a 2mpage in page %p's pv list", m)); 6205 pte = pmap_pde_to_pte(pde, pv->pv_va); 6206 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 6207 atomic_clear_long(pte, PG_M); 6208 pmap_invalidate_page(pmap, pv->pv_va); 6209 } 6210 PMAP_UNLOCK(pmap); 6211 } 6212 rw_wunlock(lock); 6213 rw_runlock(&pvh_global_lock); 6214} 6215 6216/* 6217 * Miscellaneous support routines follow 6218 */ 6219 6220/* Adjust the cache mode for a 4KB page mapped via a PTE. */ 6221static __inline void 6222pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask) 6223{ 6224 u_int opte, npte; 6225 6226 /* 6227 * The cache mode bits are all in the low 32-bits of the 6228 * PTE, so we can just spin on updating the low 32-bits. 6229 */ 6230 do { 6231 opte = *(u_int *)pte; 6232 npte = opte & ~mask; 6233 npte |= cache_bits; 6234 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); 6235} 6236 6237/* Adjust the cache mode for a 2MB page mapped via a PDE. */ 6238static __inline void 6239pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask) 6240{ 6241 u_int opde, npde; 6242 6243 /* 6244 * The cache mode bits are all in the low 32-bits of the 6245 * PDE, so we can just spin on updating the low 32-bits. 6246 */ 6247 do { 6248 opde = *(u_int *)pde; 6249 npde = opde & ~mask; 6250 npde |= cache_bits; 6251 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); 6252} 6253 6254/* 6255 * Map a set of physical memory pages into the kernel virtual 6256 * address space. Return a pointer to where it is mapped. This 6257 * routine is intended to be used for mapping device memory, 6258 * NOT real memory. 6259 */ 6260void * 6261pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 6262{ 6263 struct pmap_preinit_mapping *ppim; 6264 vm_offset_t va, offset; 6265 vm_size_t tmpsize; 6266 int i; 6267 6268 offset = pa & PAGE_MASK; 6269 size = round_page(offset + size); 6270 pa = trunc_page(pa); 6271 6272 if (!pmap_initialized) { 6273 va = 0; 6274 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 6275 ppim = pmap_preinit_mapping + i; 6276 if (ppim->va == 0) { 6277 ppim->pa = pa; 6278 ppim->sz = size; 6279 ppim->mode = mode; 6280 ppim->va = virtual_avail; 6281 virtual_avail += size; 6282 va = ppim->va; 6283 break; 6284 } 6285 } 6286 if (va == 0) 6287 panic("%s: too many preinit mappings", __func__); 6288 } else { 6289 /* 6290 * If we have a preinit mapping, re-use it. 6291 */ 6292 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 6293 ppim = pmap_preinit_mapping + i; 6294 if (ppim->pa == pa && ppim->sz == size && 6295 ppim->mode == mode) 6296 return ((void *)(ppim->va + offset)); 6297 } 6298 /* 6299 * If the specified range of physical addresses fits within 6300 * the direct map window, use the direct map. 6301 */ 6302 if (pa < dmaplimit && pa + size < dmaplimit) { 6303 va = PHYS_TO_DMAP(pa); 6304 if (!pmap_change_attr(va, size, mode)) 6305 return ((void *)(va + offset)); 6306 } 6307 va = kva_alloc(size); 6308 if (va == 0) 6309 panic("%s: Couldn't allocate KVA", __func__); 6310 } 6311 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 6312 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 6313 pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 6314 pmap_invalidate_cache_range(va, va + tmpsize, FALSE); 6315 return ((void *)(va + offset)); 6316} 6317 6318void * 6319pmap_mapdev(vm_paddr_t pa, vm_size_t size) 6320{ 6321 6322 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 6323} 6324 6325void * 6326pmap_mapbios(vm_paddr_t pa, vm_size_t size) 6327{ 6328 6329 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 6330} 6331 6332void 6333pmap_unmapdev(vm_offset_t va, vm_size_t size) 6334{ 6335 struct pmap_preinit_mapping *ppim; 6336 vm_offset_t offset; 6337 int i; 6338 6339 /* If we gave a direct map region in pmap_mapdev, do nothing */ 6340 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) 6341 return; 6342 offset = va & PAGE_MASK; 6343 size = round_page(offset + size); 6344 va = trunc_page(va); 6345 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 6346 ppim = pmap_preinit_mapping + i; 6347 if (ppim->va == va && ppim->sz == size) { 6348 if (pmap_initialized) 6349 return; 6350 ppim->pa = 0; 6351 ppim->va = 0; 6352 ppim->sz = 0; 6353 ppim->mode = 0; 6354 if (va + size == virtual_avail) 6355 virtual_avail = va; 6356 return; 6357 } 6358 } 6359 if (pmap_initialized) 6360 kva_free(va, size); 6361} 6362 6363/* 6364 * Tries to demote a 1GB page mapping. 6365 */ 6366static boolean_t 6367pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va) 6368{ 6369 pdp_entry_t newpdpe, oldpdpe; 6370 pd_entry_t *firstpde, newpde, *pde; 6371 pt_entry_t PG_A, PG_M, PG_RW, PG_V; 6372 vm_paddr_t mpdepa; 6373 vm_page_t mpde; 6374 6375 PG_A = pmap_accessed_bit(pmap); 6376 PG_M = pmap_modified_bit(pmap); 6377 PG_V = pmap_valid_bit(pmap); 6378 PG_RW = pmap_rw_bit(pmap); 6379 6380 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 6381 oldpdpe = *pdpe; 6382 KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V), 6383 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V")); 6384 if ((mpde = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT | 6385 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 6386 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" 6387 " in pmap %p", va, pmap); 6388 return (FALSE); 6389 } 6390 mpdepa = VM_PAGE_TO_PHYS(mpde); 6391 firstpde = (pd_entry_t *)PHYS_TO_DMAP(mpdepa); 6392 newpdpe = mpdepa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V; 6393 KASSERT((oldpdpe & PG_A) != 0, 6394 ("pmap_demote_pdpe: oldpdpe is missing PG_A")); 6395 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW, 6396 ("pmap_demote_pdpe: oldpdpe is missing PG_M")); 6397 newpde = oldpdpe; 6398 6399 /* 6400 * Initialize the page directory page. 6401 */ 6402 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) { 6403 *pde = newpde; 6404 newpde += NBPDR; 6405 } 6406 6407 /* 6408 * Demote the mapping. 6409 */ 6410 *pdpe = newpdpe; 6411 6412 /* 6413 * Invalidate a stale recursive mapping of the page directory page. 6414 */ 6415 pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va)); 6416 6417 pmap_pdpe_demotions++; 6418 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" 6419 " in pmap %p", va, pmap); 6420 return (TRUE); 6421} 6422 6423/* 6424 * Sets the memory attribute for the specified page. 6425 */ 6426void 6427pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 6428{ 6429 6430 m->md.pat_mode = ma; 6431 6432 /* 6433 * If "m" is a normal page, update its direct mapping. This update 6434 * can be relied upon to perform any cache operations that are 6435 * required for data coherence. 6436 */ 6437 if ((m->flags & PG_FICTITIOUS) == 0 && 6438 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, 6439 m->md.pat_mode)) 6440 panic("memory attribute change on the direct map failed"); 6441} 6442 6443/* 6444 * Changes the specified virtual address range's memory type to that given by 6445 * the parameter "mode". The specified virtual address range must be 6446 * completely contained within either the direct map or the kernel map. If 6447 * the virtual address range is contained within the kernel map, then the 6448 * memory type for each of the corresponding ranges of the direct map is also 6449 * changed. (The corresponding ranges of the direct map are those ranges that 6450 * map the same physical pages as the specified virtual address range.) These 6451 * changes to the direct map are necessary because Intel describes the 6452 * behavior of their processors as "undefined" if two or more mappings to the 6453 * same physical page have different memory types. 6454 * 6455 * Returns zero if the change completed successfully, and either EINVAL or 6456 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 6457 * of the virtual address range was not mapped, and ENOMEM is returned if 6458 * there was insufficient memory available to complete the change. In the 6459 * latter case, the memory type may have been changed on some part of the 6460 * virtual address range or the direct map. 6461 */ 6462int 6463pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) 6464{ 6465 int error; 6466 6467 PMAP_LOCK(kernel_pmap); 6468 error = pmap_change_attr_locked(va, size, mode); 6469 PMAP_UNLOCK(kernel_pmap); 6470 return (error); 6471} 6472 6473static int 6474pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode) 6475{ 6476 vm_offset_t base, offset, tmpva; 6477 vm_paddr_t pa_start, pa_end; 6478 pdp_entry_t *pdpe; 6479 pd_entry_t *pde; 6480 pt_entry_t *pte; 6481 int cache_bits_pte, cache_bits_pde, error; 6482 boolean_t changed; 6483 6484 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); 6485 base = trunc_page(va); 6486 offset = va & PAGE_MASK; 6487 size = round_page(offset + size); 6488 6489 /* 6490 * Only supported on kernel virtual addresses, including the direct 6491 * map but excluding the recursive map. 6492 */ 6493 if (base < DMAP_MIN_ADDRESS) 6494 return (EINVAL); 6495 6496 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); 6497 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); 6498 changed = FALSE; 6499 6500 /* 6501 * Pages that aren't mapped aren't supported. Also break down 2MB pages 6502 * into 4KB pages if required. 6503 */ 6504 for (tmpva = base; tmpva < base + size; ) { 6505 pdpe = pmap_pdpe(kernel_pmap, tmpva); 6506 if (*pdpe == 0) 6507 return (EINVAL); 6508 if (*pdpe & PG_PS) { 6509 /* 6510 * If the current 1GB page already has the required 6511 * memory type, then we need not demote this page. Just 6512 * increment tmpva to the next 1GB page frame. 6513 */ 6514 if ((*pdpe & X86_PG_PDE_CACHE) == cache_bits_pde) { 6515 tmpva = trunc_1gpage(tmpva) + NBPDP; 6516 continue; 6517 } 6518 6519 /* 6520 * If the current offset aligns with a 1GB page frame 6521 * and there is at least 1GB left within the range, then 6522 * we need not break down this page into 2MB pages. 6523 */ 6524 if ((tmpva & PDPMASK) == 0 && 6525 tmpva + PDPMASK < base + size) { 6526 tmpva += NBPDP; 6527 continue; 6528 } 6529 if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva)) 6530 return (ENOMEM); 6531 } 6532 pde = pmap_pdpe_to_pde(pdpe, tmpva); 6533 if (*pde == 0) 6534 return (EINVAL); 6535 if (*pde & PG_PS) { 6536 /* 6537 * If the current 2MB page already has the required 6538 * memory type, then we need not demote this page. Just 6539 * increment tmpva to the next 2MB page frame. 6540 */ 6541 if ((*pde & X86_PG_PDE_CACHE) == cache_bits_pde) { 6542 tmpva = trunc_2mpage(tmpva) + NBPDR; 6543 continue; 6544 } 6545 6546 /* 6547 * If the current offset aligns with a 2MB page frame 6548 * and there is at least 2MB left within the range, then 6549 * we need not break down this page into 4KB pages. 6550 */ 6551 if ((tmpva & PDRMASK) == 0 && 6552 tmpva + PDRMASK < base + size) { 6553 tmpva += NBPDR; 6554 continue; 6555 } 6556 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) 6557 return (ENOMEM); 6558 } 6559 pte = pmap_pde_to_pte(pde, tmpva); 6560 if (*pte == 0) 6561 return (EINVAL); 6562 tmpva += PAGE_SIZE; 6563 } 6564 error = 0; 6565 6566 /* 6567 * Ok, all the pages exist, so run through them updating their 6568 * cache mode if required. 6569 */ 6570 pa_start = pa_end = 0; 6571 for (tmpva = base; tmpva < base + size; ) { 6572 pdpe = pmap_pdpe(kernel_pmap, tmpva); 6573 if (*pdpe & PG_PS) { 6574 if ((*pdpe & X86_PG_PDE_CACHE) != cache_bits_pde) { 6575 pmap_pde_attr(pdpe, cache_bits_pde, 6576 X86_PG_PDE_CACHE); 6577 changed = TRUE; 6578 } 6579 if (tmpva >= VM_MIN_KERNEL_ADDRESS) { 6580 if (pa_start == pa_end) { 6581 /* Start physical address run. */ 6582 pa_start = *pdpe & PG_PS_FRAME; 6583 pa_end = pa_start + NBPDP; 6584 } else if (pa_end == (*pdpe & PG_PS_FRAME)) 6585 pa_end += NBPDP; 6586 else { 6587 /* Run ended, update direct map. */ 6588 error = pmap_change_attr_locked( 6589 PHYS_TO_DMAP(pa_start), 6590 pa_end - pa_start, mode); 6591 if (error != 0) 6592 break; 6593 /* Start physical address run. */ 6594 pa_start = *pdpe & PG_PS_FRAME; 6595 pa_end = pa_start + NBPDP; 6596 } 6597 } 6598 tmpva = trunc_1gpage(tmpva) + NBPDP; 6599 continue; 6600 } 6601 pde = pmap_pdpe_to_pde(pdpe, tmpva); 6602 if (*pde & PG_PS) { 6603 if ((*pde & X86_PG_PDE_CACHE) != cache_bits_pde) { 6604 pmap_pde_attr(pde, cache_bits_pde, 6605 X86_PG_PDE_CACHE); 6606 changed = TRUE; 6607 } 6608 if (tmpva >= VM_MIN_KERNEL_ADDRESS) { 6609 if (pa_start == pa_end) { 6610 /* Start physical address run. */ 6611 pa_start = *pde & PG_PS_FRAME; 6612 pa_end = pa_start + NBPDR; 6613 } else if (pa_end == (*pde & PG_PS_FRAME)) 6614 pa_end += NBPDR; 6615 else { 6616 /* Run ended, update direct map. */ 6617 error = pmap_change_attr_locked( 6618 PHYS_TO_DMAP(pa_start), 6619 pa_end - pa_start, mode); 6620 if (error != 0) 6621 break; 6622 /* Start physical address run. */ 6623 pa_start = *pde & PG_PS_FRAME; 6624 pa_end = pa_start + NBPDR; 6625 } 6626 } 6627 tmpva = trunc_2mpage(tmpva) + NBPDR; 6628 } else { 6629 pte = pmap_pde_to_pte(pde, tmpva); 6630 if ((*pte & X86_PG_PTE_CACHE) != cache_bits_pte) { 6631 pmap_pte_attr(pte, cache_bits_pte, 6632 X86_PG_PTE_CACHE); 6633 changed = TRUE; 6634 } 6635 if (tmpva >= VM_MIN_KERNEL_ADDRESS) { 6636 if (pa_start == pa_end) { 6637 /* Start physical address run. */ 6638 pa_start = *pte & PG_FRAME; 6639 pa_end = pa_start + PAGE_SIZE; 6640 } else if (pa_end == (*pte & PG_FRAME)) 6641 pa_end += PAGE_SIZE; 6642 else { 6643 /* Run ended, update direct map. */ 6644 error = pmap_change_attr_locked( 6645 PHYS_TO_DMAP(pa_start), 6646 pa_end - pa_start, mode); 6647 if (error != 0) 6648 break; 6649 /* Start physical address run. */ 6650 pa_start = *pte & PG_FRAME; 6651 pa_end = pa_start + PAGE_SIZE; 6652 } 6653 } 6654 tmpva += PAGE_SIZE; 6655 } 6656 } 6657 if (error == 0 && pa_start != pa_end) 6658 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start), 6659 pa_end - pa_start, mode); 6660 6661 /* 6662 * Flush CPU caches if required to make sure any data isn't cached that 6663 * shouldn't be, etc. 6664 */ 6665 if (changed) { 6666 pmap_invalidate_range(kernel_pmap, base, tmpva); 6667 pmap_invalidate_cache_range(base, tmpva, FALSE); 6668 } 6669 return (error); 6670} 6671 6672/* 6673 * Demotes any mapping within the direct map region that covers more than the 6674 * specified range of physical addresses. This range's size must be a power 6675 * of two and its starting address must be a multiple of its size. Since the 6676 * demotion does not change any attributes of the mapping, a TLB invalidation 6677 * is not mandatory. The caller may, however, request a TLB invalidation. 6678 */ 6679void 6680pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate) 6681{ 6682 pdp_entry_t *pdpe; 6683 pd_entry_t *pde; 6684 vm_offset_t va; 6685 boolean_t changed; 6686 6687 if (len == 0) 6688 return; 6689 KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2")); 6690 KASSERT((base & (len - 1)) == 0, 6691 ("pmap_demote_DMAP: base is not a multiple of len")); 6692 if (len < NBPDP && base < dmaplimit) { 6693 va = PHYS_TO_DMAP(base); 6694 changed = FALSE; 6695 PMAP_LOCK(kernel_pmap); 6696 pdpe = pmap_pdpe(kernel_pmap, va); 6697 if ((*pdpe & X86_PG_V) == 0) 6698 panic("pmap_demote_DMAP: invalid PDPE"); 6699 if ((*pdpe & PG_PS) != 0) { 6700 if (!pmap_demote_pdpe(kernel_pmap, pdpe, va)) 6701 panic("pmap_demote_DMAP: PDPE failed"); 6702 changed = TRUE; 6703 } 6704 if (len < NBPDR) { 6705 pde = pmap_pdpe_to_pde(pdpe, va); 6706 if ((*pde & X86_PG_V) == 0) 6707 panic("pmap_demote_DMAP: invalid PDE"); 6708 if ((*pde & PG_PS) != 0) { 6709 if (!pmap_demote_pde(kernel_pmap, pde, va)) 6710 panic("pmap_demote_DMAP: PDE failed"); 6711 changed = TRUE; 6712 } 6713 } 6714 if (changed && invalidate) 6715 pmap_invalidate_page(kernel_pmap, va); 6716 PMAP_UNLOCK(kernel_pmap); 6717 } 6718} 6719 6720/* 6721 * perform the pmap work for mincore 6722 */ 6723int 6724pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 6725{ 6726 pd_entry_t *pdep; 6727 pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V; 6728 vm_paddr_t pa; 6729 int val; 6730 6731 PG_A = pmap_accessed_bit(pmap); 6732 PG_M = pmap_modified_bit(pmap); 6733 PG_V = pmap_valid_bit(pmap); 6734 PG_RW = pmap_rw_bit(pmap); 6735 6736 PMAP_LOCK(pmap); 6737retry: 6738 pdep = pmap_pde(pmap, addr); 6739 if (pdep != NULL && (*pdep & PG_V)) { 6740 if (*pdep & PG_PS) { 6741 pte = *pdep; 6742 /* Compute the physical address of the 4KB page. */ 6743 pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) & 6744 PG_FRAME; 6745 val = MINCORE_SUPER; 6746 } else { 6747 pte = *pmap_pde_to_pte(pdep, addr); 6748 pa = pte & PG_FRAME; 6749 val = 0; 6750 } 6751 } else { 6752 pte = 0; 6753 pa = 0; 6754 val = 0; 6755 } 6756 if ((pte & PG_V) != 0) { 6757 val |= MINCORE_INCORE; 6758 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 6759 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6760 if ((pte & PG_A) != 0) 6761 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6762 } 6763 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 6764 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 6765 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 6766 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 6767 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 6768 goto retry; 6769 } else 6770 PA_UNLOCK_COND(*locked_pa); 6771 PMAP_UNLOCK(pmap); 6772 return (val); 6773} 6774 6775void 6776pmap_activate(struct thread *td) 6777{ 6778 pmap_t pmap, oldpmap; 6779 u_int cpuid; 6780 6781 critical_enter(); 6782 pmap = vmspace_pmap(td->td_proc->p_vmspace); 6783 oldpmap = PCPU_GET(curpmap); 6784 cpuid = PCPU_GET(cpuid); 6785#ifdef SMP 6786 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 6787 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 6788 CPU_SET_ATOMIC(cpuid, &pmap->pm_save); 6789#else 6790 CPU_CLR(cpuid, &oldpmap->pm_active); 6791 CPU_SET(cpuid, &pmap->pm_active); 6792 CPU_SET(cpuid, &pmap->pm_save); 6793#endif 6794 td->td_pcb->pcb_cr3 = pmap->pm_cr3; 6795 load_cr3(pmap->pm_cr3); 6796 PCPU_SET(curpmap, pmap); 6797 critical_exit(); 6798} 6799 6800void 6801pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 6802{ 6803} 6804 6805/* 6806 * Increase the starting virtual address of the given mapping if a 6807 * different alignment might result in more superpage mappings. 6808 */ 6809void 6810pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 6811 vm_offset_t *addr, vm_size_t size) 6812{ 6813 vm_offset_t superpage_offset; 6814 6815 if (size < NBPDR) 6816 return; 6817 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 6818 offset += ptoa(object->pg_color); 6819 superpage_offset = offset & PDRMASK; 6820 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 6821 (*addr & PDRMASK) == superpage_offset) 6822 return; 6823 if ((*addr & PDRMASK) < superpage_offset) 6824 *addr = (*addr & ~PDRMASK) + superpage_offset; 6825 else 6826 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 6827} 6828 6829#ifdef INVARIANTS 6830static unsigned long num_dirty_emulations; 6831SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW, 6832 &num_dirty_emulations, 0, NULL); 6833 6834static unsigned long num_accessed_emulations; 6835SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW, 6836 &num_accessed_emulations, 0, NULL); 6837 6838static unsigned long num_superpage_accessed_emulations; 6839SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW, 6840 &num_superpage_accessed_emulations, 0, NULL); 6841 6842static unsigned long ad_emulation_superpage_promotions; 6843SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW, 6844 &ad_emulation_superpage_promotions, 0, NULL); 6845#endif /* INVARIANTS */ 6846 6847int 6848pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype) 6849{ 6850 int rv; 6851 struct rwlock *lock; 6852 vm_page_t m, mpte; 6853 pd_entry_t *pde; 6854 pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V; 6855 boolean_t pv_lists_locked; 6856 6857 KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE, 6858 ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype)); 6859 6860 if (!pmap_emulate_ad_bits(pmap)) 6861 return (-1); 6862 6863 PG_A = pmap_accessed_bit(pmap); 6864 PG_M = pmap_modified_bit(pmap); 6865 PG_V = pmap_valid_bit(pmap); 6866 PG_RW = pmap_rw_bit(pmap); 6867 6868 rv = -1; 6869 lock = NULL; 6870 pv_lists_locked = FALSE; 6871retry: 6872 PMAP_LOCK(pmap); 6873 6874 pde = pmap_pde(pmap, va); 6875 if (pde == NULL || (*pde & PG_V) == 0) 6876 goto done; 6877 6878 if ((*pde & PG_PS) != 0) { 6879 if (ftype == VM_PROT_READ) { 6880#ifdef INVARIANTS 6881 atomic_add_long(&num_superpage_accessed_emulations, 1); 6882#endif 6883 *pde |= PG_A; 6884 rv = 0; 6885 } 6886 goto done; 6887 } 6888 6889 pte = pmap_pde_to_pte(pde, va); 6890 if ((*pte & PG_V) == 0) 6891 goto done; 6892 6893 if (ftype == VM_PROT_WRITE) { 6894 if ((*pte & PG_RW) == 0) 6895 goto done; 6896 /* 6897 * Set the modified and accessed bits simultaneously. 6898 * 6899 * Intel EPT PTEs that do software emulation of A/D bits map 6900 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively. 6901 * An EPT misconfiguration is triggered if the PTE is writable 6902 * but not readable (WR=10). This is avoided by setting PG_A 6903 * and PG_M simultaneously. 6904 */ 6905 *pte |= PG_M | PG_A; 6906 } else { 6907 *pte |= PG_A; 6908 } 6909 6910 /* try to promote the mapping */ 6911 if (va < VM_MAXUSER_ADDRESS) 6912 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 6913 else 6914 mpte = NULL; 6915 6916 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 6917 6918 if ((mpte == NULL || mpte->wire_count == NPTEPG) && 6919 pmap_ps_enabled(pmap) && 6920 (m->flags & PG_FICTITIOUS) == 0 && 6921 vm_reserv_level_iffullpop(m) == 0) { 6922 if (!pv_lists_locked) { 6923 pv_lists_locked = TRUE; 6924 if (!rw_try_rlock(&pvh_global_lock)) { 6925 PMAP_UNLOCK(pmap); 6926 rw_rlock(&pvh_global_lock); 6927 goto retry; 6928 } 6929 } 6930 pmap_promote_pde(pmap, pde, va, &lock); 6931#ifdef INVARIANTS 6932 atomic_add_long(&ad_emulation_superpage_promotions, 1); 6933#endif 6934 } 6935#ifdef INVARIANTS 6936 if (ftype == VM_PROT_WRITE) 6937 atomic_add_long(&num_dirty_emulations, 1); 6938 else 6939 atomic_add_long(&num_accessed_emulations, 1); 6940#endif 6941 rv = 0; /* success */ 6942done: 6943 if (lock != NULL) 6944 rw_wunlock(lock); 6945 if (pv_lists_locked) 6946 rw_runlock(&pvh_global_lock); 6947 PMAP_UNLOCK(pmap); 6948 return (rv); 6949} 6950 6951void 6952pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num) 6953{ 6954 pml4_entry_t *pml4; 6955 pdp_entry_t *pdp; 6956 pd_entry_t *pde; 6957 pt_entry_t *pte, PG_V; 6958 int idx; 6959 6960 idx = 0; 6961 PG_V = pmap_valid_bit(pmap); 6962 PMAP_LOCK(pmap); 6963 6964 pml4 = pmap_pml4e(pmap, va); 6965 ptr[idx++] = *pml4; 6966 if ((*pml4 & PG_V) == 0) 6967 goto done; 6968 6969 pdp = pmap_pml4e_to_pdpe(pml4, va); 6970 ptr[idx++] = *pdp; 6971 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) 6972 goto done; 6973 6974 pde = pmap_pdpe_to_pde(pdp, va); 6975 ptr[idx++] = *pde; 6976 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) 6977 goto done; 6978 6979 pte = pmap_pde_to_pte(pde, va); 6980 ptr[idx++] = *pte; 6981 6982done: 6983 PMAP_UNLOCK(pmap); 6984 *num = idx; 6985} 6986 6987#include "opt_ddb.h" 6988#ifdef DDB 6989#include <ddb/ddb.h> 6990 6991DB_SHOW_COMMAND(pte, pmap_print_pte) 6992{ 6993 pmap_t pmap; 6994 pml4_entry_t *pml4; 6995 pdp_entry_t *pdp; 6996 pd_entry_t *pde; 6997 pt_entry_t *pte, PG_V; 6998 vm_offset_t va; 6999 7000 if (have_addr) { 7001 va = (vm_offset_t)addr; 7002 pmap = PCPU_GET(curpmap); /* XXX */ 7003 } else { 7004 db_printf("show pte addr\n"); 7005 return; 7006 } 7007 PG_V = pmap_valid_bit(pmap); 7008 pml4 = pmap_pml4e(pmap, va); 7009 db_printf("VA %#016lx pml4e %#016lx", va, *pml4); 7010 if ((*pml4 & PG_V) == 0) { 7011 db_printf("\n"); 7012 return; 7013 } 7014 pdp = pmap_pml4e_to_pdpe(pml4, va); 7015 db_printf(" pdpe %#016lx", *pdp); 7016 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) { 7017 db_printf("\n"); 7018 return; 7019 } 7020 pde = pmap_pdpe_to_pde(pdp, va); 7021 db_printf(" pde %#016lx", *pde); 7022 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) { 7023 db_printf("\n"); 7024 return; 7025 } 7026 pte = pmap_pde_to_pte(pde, va); 7027 db_printf(" pte %#016lx\n", *pte); 7028} 7029 7030DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap) 7031{ 7032 vm_paddr_t a; 7033 7034 if (have_addr) { 7035 a = (vm_paddr_t)addr; 7036 db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a)); 7037 } else { 7038 db_printf("show phys2dmap addr\n"); 7039 } 7040} 7041#endif 7042