pmap.h revision 263875
1/*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1991 Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department and William Jolitz of UUNET Technologies Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Derived from hp300 version by Mike Hibler, this version by William 35 * Jolitz uses a recursive map [a pde points to the page directory] to 36 * map the page tables using the pagetables themselves. This is done to 37 * reduce the impact on kernel virtual memory for lots of sparse address 38 * space, and to reduce the cost of memory to each process. 39 * 40 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 41 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 42 * $FreeBSD: stable/10/sys/amd64/include/pmap.h 263875 2014-03-28 15:38:38Z kib $ 43 */ 44 45#ifndef _MACHINE_PMAP_H_ 46#define _MACHINE_PMAP_H_ 47 48/* 49 * Page-directory and page-table entries follow this format, with a few 50 * of the fields not present here and there, depending on a lot of things. 51 */ 52 /* ---- Intel Nomenclature ---- */ 53#define X86_PG_V 0x001 /* P Valid */ 54#define X86_PG_RW 0x002 /* R/W Read/Write */ 55#define X86_PG_U 0x004 /* U/S User/Supervisor */ 56#define X86_PG_NC_PWT 0x008 /* PWT Write through */ 57#define X86_PG_NC_PCD 0x010 /* PCD Cache disable */ 58#define X86_PG_A 0x020 /* A Accessed */ 59#define X86_PG_M 0x040 /* D Dirty */ 60#define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */ 61#define X86_PG_PTE_PAT 0x080 /* PAT PAT index */ 62#define X86_PG_G 0x100 /* G Global */ 63#define X86_PG_AVAIL1 0x200 /* / Available for system */ 64#define X86_PG_AVAIL2 0x400 /* < programmers use */ 65#define X86_PG_AVAIL3 0x800 /* \ */ 66#define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */ 67#define X86_PG_NX (1ul<<63) /* No-execute */ 68#define X86_PG_AVAIL(x) (1ul << (x)) 69 70/* Page level cache control fields used to determine the PAT type */ 71#define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 72#define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 73 74/* 75 * Intel extended page table (EPT) bit definitions. 76 */ 77#define EPT_PG_READ 0x001 /* R Read */ 78#define EPT_PG_WRITE 0x002 /* W Write */ 79#define EPT_PG_EXECUTE 0x004 /* X Execute */ 80#define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */ 81#define EPT_PG_PS 0x080 /* PS Page size */ 82#define EPT_PG_A 0x100 /* A Accessed */ 83#define EPT_PG_M 0x200 /* D Dirty */ 84#define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */ 85 86/* 87 * Define the PG_xx macros in terms of the bits on x86 PTEs. 88 */ 89#define PG_V X86_PG_V 90#define PG_RW X86_PG_RW 91#define PG_U X86_PG_U 92#define PG_NC_PWT X86_PG_NC_PWT 93#define PG_NC_PCD X86_PG_NC_PCD 94#define PG_A X86_PG_A 95#define PG_M X86_PG_M 96#define PG_PS X86_PG_PS 97#define PG_PTE_PAT X86_PG_PTE_PAT 98#define PG_G X86_PG_G 99#define PG_AVAIL1 X86_PG_AVAIL1 100#define PG_AVAIL2 X86_PG_AVAIL2 101#define PG_AVAIL3 X86_PG_AVAIL3 102#define PG_PDE_PAT X86_PG_PDE_PAT 103#define PG_NX X86_PG_NX 104#define PG_PDE_CACHE X86_PG_PDE_CACHE 105#define PG_PTE_CACHE X86_PG_PTE_CACHE 106 107/* Our various interpretations of the above */ 108#define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */ 109#define PG_MANAGED X86_PG_AVAIL2 110#define EPT_PG_EMUL_V X86_PG_AVAIL(52) 111#define EPT_PG_EMUL_RW X86_PG_AVAIL(53) 112#define PG_FRAME (0x000ffffffffff000ul) 113#define PG_PS_FRAME (0x000fffffffe00000ul) 114 115/* 116 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB 117 * (PTE) page mappings have identical settings for the following fields: 118 */ 119#define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \ 120 PG_M | PG_A | PG_U | PG_RW | PG_V) 121 122/* 123 * Page Protection Exception bits 124 */ 125 126#define PGEX_P 0x01 /* Protection violation vs. not present */ 127#define PGEX_W 0x02 /* during a Write cycle */ 128#define PGEX_U 0x04 /* access from User mode (UPL) */ 129#define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 130#define PGEX_I 0x10 /* during an instruction fetch */ 131 132/* 133 * undef the PG_xx macros that define bits in the regular x86 PTEs that 134 * have a different position in nested PTEs. This is done when compiling 135 * code that needs to be aware of the differences between regular x86 and 136 * nested PTEs. 137 * 138 * The appropriate bitmask will be calculated at runtime based on the pmap 139 * type. 140 */ 141#ifdef AMD64_NPT_AWARE 142#undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */ 143#undef PG_G 144#undef PG_A 145#undef PG_M 146#undef PG_PDE_PAT 147#undef PG_PDE_CACHE 148#undef PG_PTE_PAT 149#undef PG_PTE_CACHE 150#undef PG_RW 151#undef PG_V 152#endif 153 154/* 155 * Pte related macros. This is complicated by having to deal with 156 * the sign extension of the 48th bit. 157 */ 158#define KVADDR(l4, l3, l2, l1) ( \ 159 ((unsigned long)-1 << 47) | \ 160 ((unsigned long)(l4) << PML4SHIFT) | \ 161 ((unsigned long)(l3) << PDPSHIFT) | \ 162 ((unsigned long)(l2) << PDRSHIFT) | \ 163 ((unsigned long)(l1) << PAGE_SHIFT)) 164 165#define UVADDR(l4, l3, l2, l1) ( \ 166 ((unsigned long)(l4) << PML4SHIFT) | \ 167 ((unsigned long)(l3) << PDPSHIFT) | \ 168 ((unsigned long)(l2) << PDRSHIFT) | \ 169 ((unsigned long)(l1) << PAGE_SHIFT)) 170 171/* 172 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so, 173 * but setting it larger than NDMPML4E makes no sense. 174 * 175 * Each slot provides .5 TB of kernel virtual space. 176 */ 177#define NKPML4E 4 178 179#define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */ 180#define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */ 181#define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */ 182 183/* 184 * NDMPML4E is the maximum number of PML4 entries that will be 185 * used to implement the direct map. It must be a power of two, 186 * and should generally exceed NKPML4E. The maximum possible 187 * value is 64; using 128 will make the direct map intrude into 188 * the recursive page table map. 189 */ 190#define NDMPML4E 8 191 192/* 193 * These values control the layout of virtual memory. The starting address 194 * of the direct map, which is controlled by DMPML4I, must be a multiple of 195 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.) 196 * 197 * Note: KPML4I is the index of the (single) level 4 page that maps 198 * the KVA that holds KERNBASE, while KPML4BASE is the index of the 199 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E 200 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra 201 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to 202 * KERNBASE. 203 * 204 * (KPML4I combines with KPDPI to choose where KERNBASE starts. 205 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE, 206 * and KPDPI provides bits 30..38.) 207 */ 208#define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 209 210#define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */ 211#define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */ 212 213#define KPML4I (NPML4EPG-1) 214#define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 215 216/* 217 * XXX doesn't really belong here I guess... 218 */ 219#define ISA_HOLE_START 0xa0000 220#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 221 222#ifndef LOCORE 223 224#include <sys/queue.h> 225#include <sys/_cpuset.h> 226#include <sys/_lock.h> 227#include <sys/_mutex.h> 228 229#include <vm/_vm_radix.h> 230 231typedef u_int64_t pd_entry_t; 232typedef u_int64_t pt_entry_t; 233typedef u_int64_t pdp_entry_t; 234typedef u_int64_t pml4_entry_t; 235 236/* 237 * Address of current address space page table maps and directories. 238 */ 239#ifdef _KERNEL 240#define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0)) 241#define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0)) 242#define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 243#define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 244#define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 245#define PTmap ((pt_entry_t *)(addr_PTmap)) 246#define PDmap ((pd_entry_t *)(addr_PDmap)) 247#define PDPmap ((pd_entry_t *)(addr_PDPmap)) 248#define PML4map ((pd_entry_t *)(addr_PML4map)) 249#define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) 250 251extern int nkpt; /* Initial number of kernel page tables */ 252extern u_int64_t KPDPphys; /* physical address of kernel level 3 */ 253extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ 254 255/* 256 * virtual address to page table entry and 257 * to physical address. 258 * Note: these work recursively, thus vtopte of a pte will give 259 * the corresponding pde that in turn maps it. 260 */ 261pt_entry_t *vtopte(vm_offset_t); 262#define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 263 264#define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte) 265#define pte_load_clear(ptep) atomic_swap_long(ptep, 0) 266#define pte_store(ptep, pte) do { \ 267 *(u_long *)(ptep) = (u_long)(pte); \ 268} while (0) 269#define pte_clear(ptep) pte_store(ptep, 0) 270 271#define pde_store(pdep, pde) pte_store(pdep, pde) 272 273extern pt_entry_t pg_nx; 274 275#endif /* _KERNEL */ 276 277/* 278 * Pmap stuff 279 */ 280struct pv_entry; 281struct pv_chunk; 282 283struct md_page { 284 TAILQ_HEAD(,pv_entry) pv_list; 285 int pv_gen; 286 int pat_mode; 287}; 288 289enum pmap_type { 290 PT_X86, /* regular x86 page tables */ 291 PT_EPT, /* Intel's nested page tables */ 292 PT_RVI, /* AMD's nested page tables */ 293}; 294 295/* 296 * The kernel virtual address (KVA) of the level 4 page table page is always 297 * within the direct map (DMAP) region. 298 */ 299struct pmap { 300 struct mtx pm_mtx; 301 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 302 uint64_t pm_cr3; 303 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 304 cpuset_t pm_active; /* active on cpus */ 305 cpuset_t pm_save; /* Context valid on cpus mask */ 306 int pm_pcid; /* context id */ 307 enum pmap_type pm_type; /* regular or nested tables */ 308 struct pmap_statistics pm_stats; /* pmap statistics */ 309 struct vm_radix pm_root; /* spare page table pages */ 310 long pm_eptgen; /* EPT pmap generation id */ 311 int pm_flags; 312}; 313 314/* flags */ 315#define PMAP_PDE_SUPERPAGE (1 << 0) /* supports 2MB superpages */ 316#define PMAP_EMULATE_AD_BITS (1 << 1) /* needs A/D bits emulation */ 317#define PMAP_SUPPORTS_EXEC_ONLY (1 << 2) /* execute only mappings ok */ 318 319typedef struct pmap *pmap_t; 320 321#ifdef _KERNEL 322extern struct pmap kernel_pmap_store; 323#define kernel_pmap (&kernel_pmap_store) 324 325#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 326#define PMAP_LOCK_ASSERT(pmap, type) \ 327 mtx_assert(&(pmap)->pm_mtx, (type)) 328#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 329#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 330 NULL, MTX_DEF | MTX_DUPOK) 331#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 332#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 333#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 334#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 335 336int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags); 337int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype); 338#endif 339 340/* 341 * For each vm_page_t, there is a list of all currently valid virtual 342 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 343 */ 344typedef struct pv_entry { 345 vm_offset_t pv_va; /* virtual address for mapping */ 346 TAILQ_ENTRY(pv_entry) pv_next; 347} *pv_entry_t; 348 349/* 350 * pv_entries are allocated in chunks per-process. This avoids the 351 * need to track per-pmap assignments. 352 */ 353#define _NPCM 3 354#define _NPCPV 168 355struct pv_chunk { 356 pmap_t pc_pmap; 357 TAILQ_ENTRY(pv_chunk) pc_list; 358 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ 359 TAILQ_ENTRY(pv_chunk) pc_lru; 360 struct pv_entry pc_pventry[_NPCPV]; 361}; 362 363#ifdef _KERNEL 364 365extern caddr_t CADDR1; 366extern pt_entry_t *CMAP1; 367extern vm_paddr_t phys_avail[]; 368extern vm_paddr_t dump_avail[]; 369extern vm_offset_t virtual_avail; 370extern vm_offset_t virtual_end; 371extern vm_paddr_t dmaplimit; 372 373#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 374#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 375#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 376 377void pmap_bootstrap(vm_paddr_t *); 378int pmap_change_attr(vm_offset_t, vm_size_t, int); 379void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate); 380void pmap_init_pat(void); 381void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 382void *pmap_kenter_temporary(vm_paddr_t pa, int i); 383vm_paddr_t pmap_kextract(vm_offset_t); 384void pmap_kremove(vm_offset_t); 385void *pmap_mapbios(vm_paddr_t, vm_size_t); 386void *pmap_mapdev(vm_paddr_t, vm_size_t); 387void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 388boolean_t pmap_page_is_mapped(vm_page_t m); 389void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 390void pmap_unmapdev(vm_offset_t, vm_size_t); 391void pmap_invalidate_page(pmap_t, vm_offset_t); 392void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 393void pmap_invalidate_all(pmap_t); 394void pmap_invalidate_cache(void); 395void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 396void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 397void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num); 398#endif /* _KERNEL */ 399 400#endif /* !LOCORE */ 401 402#endif /* !_MACHINE_PMAP_H_ */ 403