1139825Simp/*- 281895Sjake * Copyright (c) 1991 Regents of the University of California. 380708Sjake * All rights reserved. 481895Sjake * Copyright (c) 1994 John S. Dyson 581895Sjake * All rights reserved. 681895Sjake * Copyright (c) 1994 David Greenman 781895Sjake * All rights reserved. 880708Sjake * 981895Sjake * This code is derived from software contributed to Berkeley by 1081895Sjake * the Systems Programming Group of the University of Utah Computer 1181895Sjake * Science Department and William Jolitz of UUNET Technologies Inc. 1281895Sjake * 1380708Sjake * Redistribution and use in source and binary forms, with or without 1480708Sjake * modification, are permitted provided that the following conditions 1580708Sjake * are met: 1680708Sjake * 1. Redistributions of source code must retain the above copyright 1780708Sjake * notice, this list of conditions and the following disclaimer. 1880708Sjake * 2. Redistributions in binary form must reproduce the above copyright 1980708Sjake * notice, this list of conditions and the following disclaimer in the 2080708Sjake * documentation and/or other materials provided with the distribution. 2181895Sjake * 4. Neither the name of the University nor the names of its contributors 2281895Sjake * may be used to endorse or promote products derived from this software 2381895Sjake * without specific prior written permission. 2480708Sjake * 2581895Sjake * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2680708Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2780708Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2881895Sjake * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2980708Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3080708Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3180708Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3280708Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3380708Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3480708Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3580708Sjake * SUCH DAMAGE. 3680708Sjake * 3781895Sjake * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 3880708Sjake */ 3980708Sjake 40176994Smarius#include <sys/cdefs.h> 41176994Smarius__FBSDID("$FreeBSD$"); 42176994Smarius 4380709Sjake/* 4480709Sjake * Manages physical address maps. 4580709Sjake * 4680709Sjake * Since the information managed by this module is also stored by the 4780709Sjake * logical address mapping module, this module may throw away valid virtual 4880709Sjake * to physical mappings at almost any time. However, invalidations of 4980709Sjake * mappings must be done as requested. 5080709Sjake * 5180709Sjake * In order to cope with hardware architectures which make virtual to 5280709Sjake * physical map invalidates expensive, this module may delay invalidate 5380709Sjake * reduced protection operations until such time as they are actually 5480709Sjake * necessary. This module is given full information as to which processors 5580709Sjake * are currently using which maps, and to when physical maps must be made 5680709Sjake * correct. 5780709Sjake */ 5880709Sjake 59118239Speter#include "opt_kstack_pages.h" 6091165Sjake#include "opt_pmap.h" 6185241Sjake 6280708Sjake#include <sys/param.h> 6388647Sjake#include <sys/kernel.h> 6485258Sjake#include <sys/ktr.h> 6580708Sjake#include <sys/lock.h> 6685241Sjake#include <sys/msgbuf.h> 6780708Sjake#include <sys/mutex.h> 6880709Sjake#include <sys/proc.h> 69236214Salc#include <sys/rwlock.h> 7091783Sjake#include <sys/smp.h> 7191165Sjake#include <sys/sysctl.h> 7280708Sjake#include <sys/systm.h> 7380709Sjake#include <sys/vmmeter.h> 7480708Sjake 7580709Sjake#include <dev/ofw/openfirm.h> 7680709Sjake 77176994Smarius#include <vm/vm.h> 7880708Sjake#include <vm/vm_param.h> 7980708Sjake#include <vm/vm_kern.h> 8080708Sjake#include <vm/vm_page.h> 8180708Sjake#include <vm/vm_map.h> 8280708Sjake#include <vm/vm_object.h> 8380708Sjake#include <vm/vm_extern.h> 8480708Sjake#include <vm/vm_pageout.h> 8580708Sjake#include <vm/vm_pager.h> 86243132Skib#include <vm/vm_phys.h> 8780708Sjake 8888647Sjake#include <machine/cache.h> 8980709Sjake#include <machine/frame.h> 90101653Sjake#include <machine/instr.h> 9188647Sjake#include <machine/md_var.h> 9297445Sjake#include <machine/metadata.h> 93105531Stmm#include <machine/ofw_mem.h> 9491783Sjake#include <machine/smp.h> 9580709Sjake#include <machine/tlb.h> 9680709Sjake#include <machine/tte.h> 9780709Sjake#include <machine/tsb.h> 98182767Smarius#include <machine/ver.h> 9980709Sjake 10080709Sjake/* 101195149Smarius * Virtual address of message buffer 10280709Sjake */ 10380708Sjakestruct msgbuf *msgbufp; 10480708Sjake 10580709Sjake/* 106181701Smarius * Map of physical memory reagions 10780709Sjake */ 108113238Sjakevm_paddr_t phys_avail[128]; 109105531Stmmstatic struct ofw_mem_region mra[128]; 110105531Stmmstruct ofw_mem_region sparc64_memreg[128]; 111105531Stmmint sparc64_nmemreg; 11288647Sjakestatic struct ofw_map translations[128]; 11388647Sjakestatic int translations_size; 11480709Sjake 115108245Sjakestatic vm_offset_t pmap_idle_map; 116108245Sjakestatic vm_offset_t pmap_temp_map_1; 117108245Sjakestatic vm_offset_t pmap_temp_map_2; 118108245Sjake 11980709Sjake/* 120181701Smarius * First and last available kernel virtual addresses 12180709Sjake */ 12280708Sjakevm_offset_t virtual_avail; 12380708Sjakevm_offset_t virtual_end; 12480709Sjakevm_offset_t kernel_vm_end; 12580708Sjake 126101653Sjakevm_offset_t vm_max_kernel_address; 127101653Sjake 12880709Sjake/* 129181701Smarius * Kernel pmap 13088647Sjake */ 13188647Sjakestruct pmap kernel_pmap_store; 13288647Sjake 133242534Sattiliostruct rwlock_padalign tte_list_global_lock; 134236214Salc 135236214Salc/* 13680709Sjake * Allocate physical memory for use in pmap_bootstrap. 13780709Sjake */ 138211049Smariusstatic vm_paddr_t pmap_bootstrap_alloc(vm_size_t size, uint32_t colors); 13980709Sjake 140216803Smariusstatic void pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data); 141223800Smariusstatic void pmap_cache_remove(vm_page_t m, vm_offset_t va); 142223800Smariusstatic int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2, 143223800Smarius struct tte *tp, vm_offset_t va); 144270920Skibstatic int pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp, 145270920Skib vm_offset_t va); 146216803Smarius 147159303Salc/* 148159303Salc * Map the given physical page at the specified virtual address in the 149159303Salc * target pmap with the protection requested. If specified the page 150159303Salc * will be wired down. 151159303Salc * 152159303Salc * The page queues and pmap must be locked. 153159303Salc */ 154270439Skibstatic int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, 155270439Skib vm_prot_t prot, u_int flags, int8_t psind); 156159303Salc 157216803Smariusextern int tl1_dmmu_miss_direct_patch_tsb_phys_1[]; 158216803Smariusextern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[]; 159216803Smariusextern int tl1_dmmu_miss_patch_asi_1[]; 160216803Smariusextern int tl1_dmmu_miss_patch_quad_ldd_1[]; 161216803Smariusextern int tl1_dmmu_miss_patch_tsb_1[]; 162216803Smariusextern int tl1_dmmu_miss_patch_tsb_2[]; 163216803Smariusextern int tl1_dmmu_miss_patch_tsb_mask_1[]; 164216803Smariusextern int tl1_dmmu_miss_patch_tsb_mask_2[]; 165216803Smariusextern int tl1_dmmu_prot_patch_asi_1[]; 166216803Smariusextern int tl1_dmmu_prot_patch_quad_ldd_1[]; 167216803Smariusextern int tl1_dmmu_prot_patch_tsb_1[]; 168216803Smariusextern int tl1_dmmu_prot_patch_tsb_2[]; 169216803Smariusextern int tl1_dmmu_prot_patch_tsb_mask_1[]; 170216803Smariusextern int tl1_dmmu_prot_patch_tsb_mask_2[]; 171216803Smariusextern int tl1_immu_miss_patch_asi_1[]; 172216803Smariusextern int tl1_immu_miss_patch_quad_ldd_1[]; 173216803Smariusextern int tl1_immu_miss_patch_tsb_1[]; 174216803Smariusextern int tl1_immu_miss_patch_tsb_2[]; 175216803Smariusextern int tl1_immu_miss_patch_tsb_mask_1[]; 176216803Smariusextern int tl1_immu_miss_patch_tsb_mask_2[]; 177101653Sjake 17880709Sjake/* 17988826Stmm * If user pmap is processed with pmap_remove and with pmap_remove and the 18088826Stmm * resident count drops to 0, there are no more pages to remove, so we 18188826Stmm * need not continue. 18288826Stmm */ 18388826Stmm#define PMAP_REMOVE_DONE(pm) \ 18488826Stmm ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0) 18588826Stmm 18688826Stmm/* 18788826Stmm * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove() 18888826Stmm * and pmap_protect() instead of trying each virtual address. 18988826Stmm */ 19088826Stmm#define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE) 19188826Stmm 192108700SjakeSYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, ""); 19391165Sjake 194108700SjakePMAP_STATS_VAR(pmap_nenter); 195108700SjakePMAP_STATS_VAR(pmap_nenter_update); 196108700SjakePMAP_STATS_VAR(pmap_nenter_replace); 197108700SjakePMAP_STATS_VAR(pmap_nenter_new); 198108700SjakePMAP_STATS_VAR(pmap_nkenter); 199112879SjakePMAP_STATS_VAR(pmap_nkenter_oc); 200112879SjakePMAP_STATS_VAR(pmap_nkenter_stupid); 201108700SjakePMAP_STATS_VAR(pmap_nkremove); 202108700SjakePMAP_STATS_VAR(pmap_nqenter); 203108700SjakePMAP_STATS_VAR(pmap_nqremove); 204108700SjakePMAP_STATS_VAR(pmap_ncache_enter); 205108700SjakePMAP_STATS_VAR(pmap_ncache_enter_c); 206108700SjakePMAP_STATS_VAR(pmap_ncache_enter_oc); 207108700SjakePMAP_STATS_VAR(pmap_ncache_enter_cc); 208108700SjakePMAP_STATS_VAR(pmap_ncache_enter_coc); 209108700SjakePMAP_STATS_VAR(pmap_ncache_enter_nc); 210108700SjakePMAP_STATS_VAR(pmap_ncache_enter_cnc); 211108700SjakePMAP_STATS_VAR(pmap_ncache_remove); 212108700SjakePMAP_STATS_VAR(pmap_ncache_remove_c); 213108700SjakePMAP_STATS_VAR(pmap_ncache_remove_oc); 214108700SjakePMAP_STATS_VAR(pmap_ncache_remove_cc); 215108700SjakePMAP_STATS_VAR(pmap_ncache_remove_coc); 216108700SjakePMAP_STATS_VAR(pmap_ncache_remove_nc); 217108700SjakePMAP_STATS_VAR(pmap_nzero_page); 218108700SjakePMAP_STATS_VAR(pmap_nzero_page_c); 219108700SjakePMAP_STATS_VAR(pmap_nzero_page_oc); 220108700SjakePMAP_STATS_VAR(pmap_nzero_page_nc); 221108700SjakePMAP_STATS_VAR(pmap_nzero_page_area); 222108700SjakePMAP_STATS_VAR(pmap_nzero_page_area_c); 223108700SjakePMAP_STATS_VAR(pmap_nzero_page_area_oc); 224108700SjakePMAP_STATS_VAR(pmap_nzero_page_area_nc); 225108700SjakePMAP_STATS_VAR(pmap_nzero_page_idle); 226108700SjakePMAP_STATS_VAR(pmap_nzero_page_idle_c); 227108700SjakePMAP_STATS_VAR(pmap_nzero_page_idle_oc); 228108700SjakePMAP_STATS_VAR(pmap_nzero_page_idle_nc); 229108700SjakePMAP_STATS_VAR(pmap_ncopy_page); 230108700SjakePMAP_STATS_VAR(pmap_ncopy_page_c); 231108700SjakePMAP_STATS_VAR(pmap_ncopy_page_oc); 232108700SjakePMAP_STATS_VAR(pmap_ncopy_page_nc); 233108700SjakePMAP_STATS_VAR(pmap_ncopy_page_dc); 234108700SjakePMAP_STATS_VAR(pmap_ncopy_page_doc); 235108700SjakePMAP_STATS_VAR(pmap_ncopy_page_sc); 236108700SjakePMAP_STATS_VAR(pmap_ncopy_page_soc); 23791165Sjake 238108700SjakePMAP_STATS_VAR(pmap_nnew_thread); 239108700SjakePMAP_STATS_VAR(pmap_nnew_thread_oc); 24091165Sjake 241223719Smariusstatic inline u_long dtlb_get_data(u_int tlb, u_int slot); 242205399Smarius 24388826Stmm/* 244181701Smarius * Quick sort callout for comparing memory regions 24580709Sjake */ 24680709Sjakestatic int mr_cmp(const void *a, const void *b); 24788647Sjakestatic int om_cmp(const void *a, const void *b); 248203839Smarius 24980709Sjakestatic int 25080709Sjakemr_cmp(const void *a, const void *b) 25180709Sjake{ 252105531Stmm const struct ofw_mem_region *mra; 253105531Stmm const struct ofw_mem_region *mrb; 25489036Sjake 25589036Sjake mra = a; 25689036Sjake mrb = b; 25789036Sjake if (mra->mr_start < mrb->mr_start) 25889036Sjake return (-1); 25989036Sjake else if (mra->mr_start > mrb->mr_start) 26089036Sjake return (1); 26189036Sjake else 26289036Sjake return (0); 26380709Sjake} 264203839Smarius 26588647Sjakestatic int 26688647Sjakeom_cmp(const void *a, const void *b) 26788647Sjake{ 26889036Sjake const struct ofw_map *oma; 26989036Sjake const struct ofw_map *omb; 27089036Sjake 27189036Sjake oma = a; 27289036Sjake omb = b; 27389036Sjake if (oma->om_start < omb->om_start) 27489036Sjake return (-1); 27589036Sjake else if (oma->om_start > omb->om_start) 27689036Sjake return (1); 27789036Sjake else 27889036Sjake return (0); 27988647Sjake} 28080709Sjake 281205399Smariusstatic inline u_long 282223719Smariusdtlb_get_data(u_int tlb, u_int slot) 283205399Smarius{ 284223719Smarius u_long data; 285223719Smarius register_t s; 286205399Smarius 287223719Smarius slot = TLB_DAR_SLOT(tlb, slot); 288205399Smarius /* 289223719Smarius * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to 290223719Smarius * work around errata of USIII and beyond. 291205399Smarius */ 292223719Smarius s = intr_disable(); 293223719Smarius (void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG); 294223719Smarius data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG); 295223719Smarius intr_restore(s); 296223719Smarius return (data); 297205399Smarius} 298205399Smarius 29980709Sjake/* 30080709Sjake * Bootstrap the system enough to run with virtual memory. 30180709Sjake */ 30280708Sjakevoid 303204152Smariuspmap_bootstrap(u_int cpu_impl) 30480708Sjake{ 30580709Sjake struct pmap *pm; 30688647Sjake struct tte *tp; 30784183Sjake vm_offset_t off; 30880709Sjake vm_offset_t va; 309113238Sjake vm_paddr_t pa; 31090625Stmm vm_size_t physsz; 311101653Sjake vm_size_t virtsz; 312205399Smarius u_long data; 313216803Smarius u_long vpn; 314181701Smarius phandle_t pmem; 315181701Smarius phandle_t vmem; 316205399Smarius u_int dtlb_slots_avail; 31780709Sjake int i; 31880709Sjake int j; 319205399Smarius int sz; 320216803Smarius uint32_t asi; 321211049Smarius uint32_t colors; 322216803Smarius uint32_t ldd; 32380709Sjake 324216803Smarius /* 325216803Smarius * Set the kernel context. 326216803Smarius */ 327216803Smarius pmap_set_kctx(); 328216803Smarius 329211049Smarius colors = dcache_color_ignore != 0 ? 1 : DCACHE_COLORS; 330211049Smarius 33180709Sjake /* 332181701Smarius * Find out what physical memory is available from the PROM and 33385241Sjake * initialize the phys_avail array. This must be done before 33485241Sjake * pmap_bootstrap_alloc is called. 33580709Sjake */ 33680709Sjake if ((pmem = OF_finddevice("/memory")) == -1) 337230634Smarius OF_panic("%s: finddevice /memory", __func__); 33880709Sjake if ((sz = OF_getproplen(pmem, "available")) == -1) 339230634Smarius OF_panic("%s: getproplen /memory/available", __func__); 34080709Sjake if (sizeof(phys_avail) < sz) 341230634Smarius OF_panic("%s: phys_avail too small", __func__); 34284183Sjake if (sizeof(mra) < sz) 343230634Smarius OF_panic("%s: mra too small", __func__); 34480709Sjake bzero(mra, sz); 34580709Sjake if (OF_getprop(pmem, "available", mra, sz) == -1) 346230634Smarius OF_panic("%s: getprop /memory/available", __func__); 34780709Sjake sz /= sizeof(*mra); 348293853Smarius#ifdef DIAGNOSTIC 349293853Smarius OF_printf("pmap_bootstrap: physical memory\n"); 350293853Smarius#endif 35188647Sjake qsort(mra, sz, sizeof (*mra), mr_cmp); 35290625Stmm physsz = 0; 353112879Sjake getenv_quad("hw.physmem", &physmem); 354162544Salc physmem = btoc(physmem); 35580709Sjake for (i = 0, j = 0; i < sz; i++, j += 2) { 356293853Smarius#ifdef DIAGNOSTIC 357293853Smarius OF_printf("start=%#lx size=%#lx\n", mra[i].mr_start, 35884183Sjake mra[i].mr_size); 359293853Smarius#endif 360112879Sjake if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) { 361112879Sjake if (btoc(physsz) < physmem) { 362112879Sjake phys_avail[j] = mra[i].mr_start; 363112879Sjake phys_avail[j + 1] = mra[i].mr_start + 364112879Sjake (ctob(physmem) - physsz); 365112879Sjake physsz = ctob(physmem); 366112879Sjake } 367112879Sjake break; 368112879Sjake } 36980709Sjake phys_avail[j] = mra[i].mr_start; 37080709Sjake phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size; 37190625Stmm physsz += mra[i].mr_size; 37280709Sjake } 37390625Stmm physmem = btoc(physsz); 37480709Sjake 375108193Sjake /* 376108193Sjake * Calculate the size of kernel virtual memory, and the size and mask 377186682Smarius * for the kernel TSB based on the phsyical memory size but limited 378216803Smarius * by the amount of dTLB slots available for locked entries if we have 379216803Smarius * to lock the TSB in the TLB (given that for spitfire-class CPUs all 380216803Smarius * of the dt64 slots can hold locked entries but there is no large 381216803Smarius * dTLB for unlocked ones, we don't use more than half of it for the 382216803Smarius * TSB). 383216803Smarius * Note that for reasons unknown OpenSolaris doesn't take advantage of 384216803Smarius * ASI_ATOMIC_QUAD_LDD_PHYS on UltraSPARC-III. However, given that no 385216803Smarius * public documentation is available for these, the latter just might 386216803Smarius * not support it, yet. 387108193Sjake */ 388218457Smarius if (cpu_impl == CPU_IMPL_SPARC64V || 389223377Smarius cpu_impl >= CPU_IMPL_ULTRASPARCIIIp) { 390216803Smarius tsb_kernel_ldd_phys = 1; 391223377Smarius virtsz = roundup(5 / 3 * physsz, PAGE_SIZE_4M << 392223377Smarius (PAGE_SHIFT - TTE_SHIFT)); 393223377Smarius } else { 394216803Smarius dtlb_slots_avail = 0; 395216803Smarius for (i = 0; i < dtlb_slots; i++) { 396223719Smarius data = dtlb_get_data(cpu_impl == 397223719Smarius CPU_IMPL_ULTRASPARCIII ? TLB_DAR_T16 : 398223719Smarius TLB_DAR_T32, i); 399216803Smarius if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) 400216803Smarius dtlb_slots_avail++; 401216803Smarius } 402205399Smarius#ifdef SMP 403216803Smarius dtlb_slots_avail -= PCPU_PAGES; 404205399Smarius#endif 405216803Smarius if (cpu_impl >= CPU_IMPL_ULTRASPARCI && 406216803Smarius cpu_impl < CPU_IMPL_ULTRASPARCIII) 407216803Smarius dtlb_slots_avail /= 2; 408223377Smarius virtsz = roundup(physsz, PAGE_SIZE_4M << 409223377Smarius (PAGE_SHIFT - TTE_SHIFT)); 410216803Smarius virtsz = MIN(virtsz, (dtlb_slots_avail * PAGE_SIZE_4M) << 411216803Smarius (PAGE_SHIFT - TTE_SHIFT)); 412216803Smarius } 413101653Sjake vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz; 414101653Sjake tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT); 415101653Sjake tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1; 416101653Sjake 41780709Sjake /* 418216803Smarius * Allocate the kernel TSB and lock it in the TLB if necessary. 419105531Stmm */ 420211049Smarius pa = pmap_bootstrap_alloc(tsb_kernel_size, colors); 42180709Sjake if (pa & PAGE_MASK_4M) 422230634Smarius OF_panic("%s: TSB unaligned", __func__); 42380709Sjake tsb_kernel_phys = pa; 424216803Smarius if (tsb_kernel_ldd_phys == 0) { 425216803Smarius tsb_kernel = 426216803Smarius (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size); 427216803Smarius pmap_map_tsb(); 428216803Smarius bzero(tsb_kernel, tsb_kernel_size); 429216803Smarius } else { 430216803Smarius tsb_kernel = 431216803Smarius (struct tte *)TLB_PHYS_TO_DIRECT(tsb_kernel_phys); 432216803Smarius aszero(ASI_PHYS_USE_EC, tsb_kernel_phys, tsb_kernel_size); 433216803Smarius } 434101653Sjake 435101653Sjake /* 436194784Sjeff * Allocate and map the dynamic per-CPU area for the BSP. 437194784Sjeff */ 438211049Smarius pa = pmap_bootstrap_alloc(DPCPU_SIZE, colors); 439195149Smarius dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa); 440194784Sjeff 441194784Sjeff /* 442108301Sjake * Allocate and map the message buffer. 443108193Sjake */ 444217688Spluknet pa = pmap_bootstrap_alloc(msgbufsize, colors); 445195149Smarius msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa); 446108193Sjake 447108193Sjake /* 448216803Smarius * Patch the TSB addresses and mask as well as the ASIs used to load 449216803Smarius * it into the trap table. 450101653Sjake */ 451101653Sjake 452216803Smarius#define LDDA_R_I_R(rd, imm_asi, rs1, rs2) \ 453216803Smarius (EIF_OP(IOP_LDST) | EIF_F3_RD(rd) | EIF_F3_OP3(INS3_LDDA) | \ 454216803Smarius EIF_F3_RS1(rs1) | EIF_F3_I(0) | EIF_F3_IMM_ASI(imm_asi) | \ 455216803Smarius EIF_F3_RS2(rs2)) 456216803Smarius#define OR_R_I_R(rd, imm13, rs1) \ 457216803Smarius (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \ 458216803Smarius EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13)) 459216803Smarius#define SETHI(rd, imm22) \ 460216803Smarius (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \ 461102040Sjake EIF_IMM((imm22) >> 10, 22)) 462216803Smarius#define WR_R_I(rd, imm13, rs1) \ 463216803Smarius (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_WR) | \ 464102040Sjake EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13)) 465101653Sjake 466216803Smarius#define PATCH_ASI(addr, asi) do { \ 467216803Smarius if (addr[0] != WR_R_I(IF_F3_RD(addr[0]), 0x0, \ 468216803Smarius IF_F3_RS1(addr[0]))) \ 469230634Smarius OF_panic("%s: patched instructions have changed", \ 470216803Smarius __func__); \ 471216803Smarius addr[0] |= EIF_IMM((asi), 13); \ 472216803Smarius flush(addr); \ 473102040Sjake} while (0) 474101653Sjake 475216803Smarius#define PATCH_LDD(addr, asi) do { \ 476216803Smarius if (addr[0] != LDDA_R_I_R(IF_F3_RD(addr[0]), 0x0, \ 477216803Smarius IF_F3_RS1(addr[0]), IF_F3_RS2(addr[0]))) \ 478230634Smarius OF_panic("%s: patched instructions have changed", \ 479216803Smarius __func__); \ 480216803Smarius addr[0] |= EIF_F3_IMM_ASI(asi); \ 481216803Smarius flush(addr); \ 482216803Smarius} while (0) 483176994Smarius 484216803Smarius#define PATCH_TSB(addr, val) do { \ 485216803Smarius if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \ 486216803Smarius addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \ 487217514Smarius IF_F3_RS1(addr[1])) || \ 488217514Smarius addr[3] != SETHI(IF_F2_RD(addr[3]), 0x0)) \ 489230634Smarius OF_panic("%s: patched instructions have changed", \ 490217514Smarius __func__); \ 491217514Smarius addr[0] |= EIF_IMM((val) >> 42, 22); \ 492217514Smarius addr[1] |= EIF_IMM((val) >> 32, 10); \ 493217514Smarius addr[3] |= EIF_IMM((val) >> 10, 22); \ 494217514Smarius flush(addr); \ 495217514Smarius flush(addr + 1); \ 496217514Smarius flush(addr + 3); \ 497217514Smarius} while (0) 498217514Smarius 499217514Smarius#define PATCH_TSB_MASK(addr, val) do { \ 500217514Smarius if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \ 501217514Smarius addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \ 502216803Smarius IF_F3_RS1(addr[1]))) \ 503230634Smarius OF_panic("%s: patched instructions have changed", \ 504216803Smarius __func__); \ 505216803Smarius addr[0] |= EIF_IMM((val) >> 10, 22); \ 506216803Smarius addr[1] |= EIF_IMM((val), 10); \ 507216803Smarius flush(addr); \ 508216803Smarius flush(addr + 1); \ 509216803Smarius} while (0) 510216803Smarius 511216803Smarius if (tsb_kernel_ldd_phys == 0) { 512216803Smarius asi = ASI_N; 513216803Smarius ldd = ASI_NUCLEUS_QUAD_LDD; 514216803Smarius off = (vm_offset_t)tsb_kernel; 515216803Smarius } else { 516216803Smarius asi = ASI_PHYS_USE_EC; 517216803Smarius ldd = ASI_ATOMIC_QUAD_LDD_PHYS; 518216803Smarius off = (vm_offset_t)tsb_kernel_phys; 519216803Smarius } 520216803Smarius PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_1, tsb_kernel_phys); 521216803Smarius PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_end_1, 522216803Smarius tsb_kernel_phys + tsb_kernel_size - 1); 523216803Smarius PATCH_ASI(tl1_dmmu_miss_patch_asi_1, asi); 524216803Smarius PATCH_LDD(tl1_dmmu_miss_patch_quad_ldd_1, ldd); 525216803Smarius PATCH_TSB(tl1_dmmu_miss_patch_tsb_1, off); 526216803Smarius PATCH_TSB(tl1_dmmu_miss_patch_tsb_2, off); 527217514Smarius PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_1, tsb_kernel_mask); 528217514Smarius PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_2, tsb_kernel_mask); 529216803Smarius PATCH_ASI(tl1_dmmu_prot_patch_asi_1, asi); 530216803Smarius PATCH_LDD(tl1_dmmu_prot_patch_quad_ldd_1, ldd); 531216803Smarius PATCH_TSB(tl1_dmmu_prot_patch_tsb_1, off); 532216803Smarius PATCH_TSB(tl1_dmmu_prot_patch_tsb_2, off); 533217514Smarius PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_1, tsb_kernel_mask); 534217514Smarius PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_2, tsb_kernel_mask); 535216803Smarius PATCH_ASI(tl1_immu_miss_patch_asi_1, asi); 536216803Smarius PATCH_LDD(tl1_immu_miss_patch_quad_ldd_1, ldd); 537216803Smarius PATCH_TSB(tl1_immu_miss_patch_tsb_1, off); 538216803Smarius PATCH_TSB(tl1_immu_miss_patch_tsb_2, off); 539217514Smarius PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_1, tsb_kernel_mask); 540217514Smarius PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_2, tsb_kernel_mask); 541216803Smarius 542101653Sjake /* 54393687Stmm * Enter fake 8k pages for the 4MB kernel pages, so that 54493687Stmm * pmap_kextract() will work for them. 54593687Stmm */ 54693687Stmm for (i = 0; i < kernel_tlb_slots; i++) { 54797445Sjake pa = kernel_tlbs[i].te_pa; 54897445Sjake va = kernel_tlbs[i].te_va; 54995232Sjake for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) { 55095232Sjake tp = tsb_kvtotte(va + off); 551216803Smarius vpn = TV_VPN(va + off, TS_8K); 552216803Smarius data = TD_V | TD_8K | TD_PA(pa + off) | TD_REF | 553216803Smarius TD_SW | TD_CP | TD_CV | TD_P | TD_W; 554216803Smarius pmap_bootstrap_set_tte(tp, vpn, data); 55595232Sjake } 55693687Stmm } 55793687Stmm 55893687Stmm /* 559195149Smarius * Set the start and end of KVA. The kernel is loaded starting 560195149Smarius * at the first available 4MB super page, so we advance to the 561195149Smarius * end of the last one used for it. 562108193Sjake */ 563195149Smarius virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M; 564108193Sjake virtual_end = vm_max_kernel_address; 565108193Sjake kernel_vm_end = vm_max_kernel_address; 566108193Sjake 567108193Sjake /* 568108245Sjake * Allocate kva space for temporary mappings. 569108245Sjake */ 570108245Sjake pmap_idle_map = virtual_avail; 571211049Smarius virtual_avail += PAGE_SIZE * colors; 572108245Sjake pmap_temp_map_1 = virtual_avail; 573211049Smarius virtual_avail += PAGE_SIZE * colors; 574108245Sjake pmap_temp_map_2 = virtual_avail; 575211049Smarius virtual_avail += PAGE_SIZE * colors; 576108245Sjake 577108245Sjake /* 578181701Smarius * Allocate a kernel stack with guard page for thread0 and map it 579181701Smarius * into the kernel TSB. We must ensure that the virtual address is 580211049Smarius * colored properly for corresponding CPUs, since we're allocating 581211049Smarius * from phys_avail so the memory won't have an associated vm_page_t. 58285241Sjake */ 583211049Smarius pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, colors); 58489038Sjake kstack0_phys = pa; 585211049Smarius virtual_avail += roundup(KSTACK_GUARD_PAGES, colors) * PAGE_SIZE; 586112330Sjake kstack0 = virtual_avail; 587211049Smarius virtual_avail += roundup(KSTACK_PAGES, colors) * PAGE_SIZE; 588211049Smarius if (dcache_color_ignore == 0) 589211049Smarius KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys), 590211049Smarius ("pmap_bootstrap: kstack0 miscolored")); 59189038Sjake for (i = 0; i < KSTACK_PAGES; i++) { 59289038Sjake pa = kstack0_phys + i * PAGE_SIZE; 59389038Sjake va = kstack0 + i * PAGE_SIZE; 59495232Sjake tp = tsb_kvtotte(va); 595216803Smarius vpn = TV_VPN(va, TS_8K); 596216803Smarius data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | 597216803Smarius TD_CV | TD_P | TD_W; 598216803Smarius pmap_bootstrap_set_tte(tp, vpn, data); 59989038Sjake } 60085241Sjake 60185241Sjake /* 602127875Salc * Calculate the last available physical address. 60385241Sjake */ 604108193Sjake for (i = 0; phys_avail[i + 2] != 0; i += 2) 605108193Sjake ; 606128103Salc Maxmem = sparc64_btop(phys_avail[i + 1]); 60785241Sjake 60885241Sjake /* 609181701Smarius * Add the PROM mappings to the kernel TSB. 61084183Sjake */ 61184183Sjake if ((vmem = OF_finddevice("/virtual-memory")) == -1) 612230634Smarius OF_panic("%s: finddevice /virtual-memory", __func__); 61384183Sjake if ((sz = OF_getproplen(vmem, "translations")) == -1) 614230634Smarius OF_panic("%s: getproplen translations", __func__); 61588647Sjake if (sizeof(translations) < sz) 616230634Smarius OF_panic("%s: translations too small", __func__); 61788647Sjake bzero(translations, sz); 61888647Sjake if (OF_getprop(vmem, "translations", translations, sz) == -1) 619230634Smarius OF_panic("%s: getprop /virtual-memory/translations", 620230634Smarius __func__); 62188647Sjake sz /= sizeof(*translations); 62288647Sjake translations_size = sz; 623293853Smarius#ifdef DIAGNOSTIC 624293853Smarius OF_printf("pmap_bootstrap: translations\n"); 625293853Smarius#endif 62688647Sjake qsort(translations, sz, sizeof (*translations), om_cmp); 62784183Sjake for (i = 0; i < sz; i++) { 628293853Smarius#ifdef DIAGNOSTIC 629293853Smarius OF_printf("translation: start=%#lx size=%#lx tte=%#lx\n", 63088647Sjake translations[i].om_start, translations[i].om_size, 63197027Sjake translations[i].om_tte); 632293853Smarius#endif 633182767Smarius if ((translations[i].om_tte & TD_V) == 0) 634182767Smarius continue; 63598350Sjake if (translations[i].om_start < VM_MIN_PROM_ADDRESS || 63698350Sjake translations[i].om_start > VM_MAX_PROM_ADDRESS) 63784183Sjake continue; 63888647Sjake for (off = 0; off < translations[i].om_size; 63988647Sjake off += PAGE_SIZE) { 64088647Sjake va = translations[i].om_start + off; 64188647Sjake tp = tsb_kvtotte(va); 642216803Smarius vpn = TV_VPN(va, TS_8K); 643216803Smarius data = ((translations[i].om_tte & 644182767Smarius ~((TD_SOFT2_MASK << TD_SOFT2_SHIFT) | 645207537Smarius (cpu_impl >= CPU_IMPL_ULTRASPARCI && 646207537Smarius cpu_impl < CPU_IMPL_ULTRASPARCIII ? 647182767Smarius (TD_DIAG_SF_MASK << TD_DIAG_SF_SHIFT) : 648182767Smarius (TD_RSVD_CH_MASK << TD_RSVD_CH_SHIFT)) | 649182767Smarius (TD_SOFT_MASK << TD_SOFT_SHIFT))) | TD_EXEC) + 650105346Stmm off; 651216803Smarius pmap_bootstrap_set_tte(tp, vpn, data); 65284183Sjake } 65384183Sjake } 65484183Sjake 65584183Sjake /* 656181701Smarius * Get the available physical memory ranges from /memory/reg. These 657181701Smarius * are only used for kernel dumps, but it may not be wise to do PROM 658108193Sjake * calls in that situation. 65980709Sjake */ 660108193Sjake if ((sz = OF_getproplen(pmem, "reg")) == -1) 661230634Smarius OF_panic("%s: getproplen /memory/reg", __func__); 662108193Sjake if (sizeof(sparc64_memreg) < sz) 663230634Smarius OF_panic("%s: sparc64_memreg too small", __func__); 664108193Sjake if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1) 665230634Smarius OF_panic("%s: getprop /memory/reg", __func__); 666108193Sjake sparc64_nmemreg = sz / sizeof(*sparc64_memreg); 66780709Sjake 66880709Sjake /* 66985241Sjake * Initialize the kernel pmap (which is statically allocated). 67080709Sjake */ 67188647Sjake pm = kernel_pmap; 672225675Sattilio PMAP_LOCK_INIT(pm); 67391288Sjake for (i = 0; i < MAXCPU; i++) 67491288Sjake pm->pm_context[i] = TLB_CTX_KERNEL; 675222813Sattilio CPU_FILL(&pm->pm_active); 67695232Sjake 677239079Smarius /* 678237623Salc * Initialize the global tte list lock, which is more commonly 679237623Salc * known as the pmap pv global lock. 680236214Salc */ 681237623Salc rw_init(&tte_list_global_lock, "pmap pv global"); 682236214Salc 683176994Smarius /* 684176994Smarius * Flush all non-locked TLB entries possibly left over by the 685176994Smarius * firmware. 686176994Smarius */ 687176994Smarius tlb_flush_nonlocked(); 68889038Sjake} 68980709Sjake 690216803Smarius/* 691216803Smarius * Map the 4MB kernel TSB pages. 692216803Smarius */ 69389038Sjakevoid 69489038Sjakepmap_map_tsb(void) 69589038Sjake{ 69689038Sjake vm_offset_t va; 697113238Sjake vm_paddr_t pa; 69891783Sjake u_long data; 69989038Sjake int i; 70089038Sjake 701101653Sjake for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) { 702101653Sjake va = (vm_offset_t)tsb_kernel + i; 703101653Sjake pa = tsb_kernel_phys + i; 70491783Sjake data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV | 70591783Sjake TD_P | TD_W; 70691783Sjake stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | 70791783Sjake TLB_TAR_CTX(TLB_CTX_KERNEL)); 70898350Sjake stxa_sync(0, ASI_DTLB_DATA_IN_REG, data); 70989038Sjake } 710216803Smarius} 71189038Sjake 712216803Smarius/* 713216803Smarius * Set the secondary context to be the kernel context (needed for FP block 714216803Smarius * operations in the kernel). 715216803Smarius */ 716216803Smariusvoid 717216803Smariuspmap_set_kctx(void) 718216803Smarius{ 719216803Smarius 720182878Smarius stxa(AA_DMMU_SCXR, ASI_DMMU, (ldxa(AA_DMMU_SCXR, ASI_DMMU) & 721205258Smarius TLB_CXR_PGSZ_MASK) | TLB_CTX_KERNEL); 722182877Smarius flush(KERNBASE); 72380708Sjake} 72480708Sjake 72580709Sjake/* 72680709Sjake * Allocate a physical page of memory directly from the phys_avail map. 72780709Sjake * Can only be called from pmap_bootstrap before avail start and end are 72880709Sjake * calculated. 72980709Sjake */ 730113238Sjakestatic vm_paddr_t 731211049Smariuspmap_bootstrap_alloc(vm_size_t size, uint32_t colors) 73280709Sjake{ 733113238Sjake vm_paddr_t pa; 73480709Sjake int i; 73580709Sjake 736211049Smarius size = roundup(size, PAGE_SIZE * colors); 73781384Sjake for (i = 0; phys_avail[i + 1] != 0; i += 2) { 73880709Sjake if (phys_avail[i + 1] - phys_avail[i] < size) 73980709Sjake continue; 74080709Sjake pa = phys_avail[i]; 74180709Sjake phys_avail[i] += size; 74280709Sjake return (pa); 74380709Sjake } 744230634Smarius OF_panic("%s: no suitable region found", __func__); 74580709Sjake} 74680709Sjake 74780709Sjake/* 748216803Smarius * Set a TTE. This function is intended as a helper when tsb_kernel is 749216803Smarius * direct-mapped but we haven't taken over the trap table, yet, as it's the 750216803Smarius * case when we are taking advantage of ASI_ATOMIC_QUAD_LDD_PHYS to access 751216803Smarius * the kernel TSB. 752216803Smarius */ 753216803Smariusvoid 754216803Smariuspmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data) 755216803Smarius{ 756216803Smarius 757216803Smarius if (tsb_kernel_ldd_phys == 0) { 758216803Smarius tp->tte_vpn = vpn; 759216803Smarius tp->tte_data = data; 760216803Smarius } else { 761216803Smarius stxa((vm_paddr_t)tp + offsetof(struct tte, tte_vpn), 762216803Smarius ASI_PHYS_USE_EC, vpn); 763216803Smarius stxa((vm_paddr_t)tp + offsetof(struct tte, tte_data), 764216803Smarius ASI_PHYS_USE_EC, data); 765216803Smarius } 766216803Smarius} 767216803Smarius 768216803Smarius/* 769147217Salc * Initialize a vm_page's machine-dependent fields. 770147217Salc */ 771147217Salcvoid 772147217Salcpmap_page_init(vm_page_t m) 773147217Salc{ 774147217Salc 775147217Salc TAILQ_INIT(&m->md.tte_list); 776147217Salc m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m)); 777147217Salc m->md.pmap = NULL; 778147217Salc} 779147217Salc 780147217Salc/* 78188647Sjake * Initialize the pmap module. 78280709Sjake */ 78388647Sjakevoid 784127869Salcpmap_init(void) 78580709Sjake{ 78688647Sjake vm_offset_t addr; 78788647Sjake vm_size_t size; 78888647Sjake int result; 78988647Sjake int i; 79080709Sjake 79188647Sjake for (i = 0; i < translations_size; i++) { 79288647Sjake addr = translations[i].om_start; 79388647Sjake size = translations[i].om_size; 794182767Smarius if ((translations[i].om_tte & TD_V) == 0) 795182767Smarius continue; 796101958Sjake if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS) 79788647Sjake continue; 798255426Sjhb result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0, 799194858Skib VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 80088647Sjake if (result != KERN_SUCCESS || addr != translations[i].om_start) 80188647Sjake panic("pmap_init: vm_map_find"); 80288647Sjake } 80380709Sjake} 80480709Sjake 80580709Sjake/* 80688647Sjake * Extract the physical page address associated with the given 80788647Sjake * map/virtual_address pair. 80880709Sjake */ 809113238Sjakevm_paddr_t 81088647Sjakepmap_extract(pmap_t pm, vm_offset_t va) 81180708Sjake{ 81288647Sjake struct tte *tp; 813133451Salc vm_paddr_t pa; 81480709Sjake 81588647Sjake if (pm == kernel_pmap) 81688647Sjake return (pmap_kextract(va)); 817133451Salc PMAP_LOCK(pm); 81888647Sjake tp = tsb_tte_lookup(pm, va); 81988647Sjake if (tp == NULL) 820133451Salc pa = 0; 82197027Sjake else 822133451Salc pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)); 823133451Salc PMAP_UNLOCK(pm); 824133451Salc return (pa); 82580708Sjake} 82680708Sjake 82780709Sjake/* 828119869Salc * Atomically extract and hold the physical page with the given 829119999Salc * pmap and virtual address pair if that mapping permits the given 830119999Salc * protection. 831119869Salc */ 832119869Salcvm_page_t 833133451Salcpmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot) 834119869Salc{ 835133451Salc struct tte *tp; 836119869Salc vm_page_t m; 837207410Skmacy vm_paddr_t pa; 838119869Salc 839119869Salc m = NULL; 840207410Skmacy pa = 0; 841207410Skmacy PMAP_LOCK(pm); 842207410Skmacyretry: 843133451Salc if (pm == kernel_pmap) { 844133451Salc if (va >= VM_MIN_DIRECT_ADDRESS) { 845133451Salc tp = NULL; 846133451Salc m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va)); 847207537Smarius (void)vm_page_pa_tryrelock(pm, TLB_DIRECT_TO_PHYS(va), 848207537Smarius &pa); 849133451Salc vm_page_hold(m); 850133451Salc } else { 851133451Salc tp = tsb_kvtotte(va); 852133451Salc if ((tp->tte_data & TD_V) == 0) 853133451Salc tp = NULL; 854133451Salc } 855207537Smarius } else 856133451Salc tp = tsb_tte_lookup(pm, va); 857133451Salc if (tp != NULL && ((tp->tte_data & TD_SW) || 858133451Salc (prot & VM_PROT_WRITE) == 0)) { 859207410Skmacy if (vm_page_pa_tryrelock(pm, TTE_GET_PA(tp), &pa)) 860207410Skmacy goto retry; 861133451Salc m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 862119869Salc vm_page_hold(m); 863119869Salc } 864207410Skmacy PA_UNLOCK_COND(pa); 865207410Skmacy PMAP_UNLOCK(pm); 866119869Salc return (m); 867119869Salc} 868119869Salc 869119869Salc/* 87088647Sjake * Extract the physical page address associated with the given kernel virtual 87188647Sjake * address. 87288647Sjake */ 873113238Sjakevm_paddr_t 87488647Sjakepmap_kextract(vm_offset_t va) 87588647Sjake{ 87688647Sjake struct tte *tp; 87788647Sjake 878108245Sjake if (va >= VM_MIN_DIRECT_ADDRESS) 879108245Sjake return (TLB_DIRECT_TO_PHYS(va)); 88088647Sjake tp = tsb_kvtotte(va); 88197027Sjake if ((tp->tte_data & TD_V) == 0) 88288647Sjake return (0); 88397027Sjake return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp))); 88488647Sjake} 88588647Sjake 88688647Sjakeint 88788647Sjakepmap_cache_enter(vm_page_t m, vm_offset_t va) 88888647Sjake{ 88988647Sjake struct tte *tp; 890100771Sjake int color; 89188647Sjake 892236214Salc rw_assert(&tte_list_global_lock, RA_WLOCKED); 893112697Sjake KASSERT((m->flags & PG_FICTITIOUS) == 0, 894112697Sjake ("pmap_cache_enter: fake page")); 89591165Sjake PMAP_STATS_INC(pmap_ncache_enter); 896101121Sjake 897211049Smarius if (dcache_color_ignore != 0) 898211049Smarius return (1); 899211049Smarius 900101121Sjake /* 901101121Sjake * Find the color for this virtual address and note the added mapping. 902101121Sjake */ 903100771Sjake color = DCACHE_COLOR(va); 904100771Sjake m->md.colors[color]++; 905101121Sjake 906101121Sjake /* 907101121Sjake * If all existing mappings have the same color, the mapping is 908101121Sjake * cacheable. 909101121Sjake */ 910100771Sjake if (m->md.color == color) { 911101121Sjake KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0, 912101121Sjake ("pmap_cache_enter: cacheable, mappings of other color")); 913108700Sjake if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 914108700Sjake PMAP_STATS_INC(pmap_ncache_enter_c); 915108700Sjake else 916108700Sjake PMAP_STATS_INC(pmap_ncache_enter_oc); 917100771Sjake return (1); 918100771Sjake } 919101121Sjake 920101121Sjake /* 921101121Sjake * If there are no mappings of the other color, and the page still has 922101121Sjake * the wrong color, this must be a new mapping. Change the color to 923101121Sjake * match the new mapping, which is cacheable. We must flush the page 924101121Sjake * from the cache now. 925101121Sjake */ 926101121Sjake if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) { 927101121Sjake KASSERT(m->md.colors[color] == 1, 928101121Sjake ("pmap_cache_enter: changing color, not new mapping")); 929100830Sjake dcache_page_inval(VM_PAGE_TO_PHYS(m)); 930100771Sjake m->md.color = color; 931108700Sjake if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 932108700Sjake PMAP_STATS_INC(pmap_ncache_enter_cc); 933108700Sjake else 934108700Sjake PMAP_STATS_INC(pmap_ncache_enter_coc); 93588647Sjake return (1); 93688647Sjake } 937101121Sjake 938101121Sjake /* 939101121Sjake * If the mapping is already non-cacheable, just return. 940176994Smarius */ 941108700Sjake if (m->md.color == -1) { 942108700Sjake PMAP_STATS_INC(pmap_ncache_enter_nc); 94388647Sjake return (0); 944108700Sjake } 945101121Sjake 946108700Sjake PMAP_STATS_INC(pmap_ncache_enter_cnc); 947108700Sjake 948101121Sjake /* 949101121Sjake * Mark all mappings as uncacheable, flush any lines with the other 950101121Sjake * color out of the dcache, and set the color to none (-1). 951101121Sjake */ 952108166Sjake TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 953108166Sjake atomic_clear_long(&tp->tte_data, TD_CV); 954100718Sjake tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 95588647Sjake } 95697446Sjake dcache_page_inval(VM_PAGE_TO_PHYS(m)); 957100771Sjake m->md.color = -1; 95888647Sjake return (0); 95988647Sjake} 96088647Sjake 961223800Smariusstatic void 96288647Sjakepmap_cache_remove(vm_page_t m, vm_offset_t va) 96388647Sjake{ 96497447Sjake struct tte *tp; 965100771Sjake int color; 96688647Sjake 967236214Salc rw_assert(&tte_list_global_lock, RA_WLOCKED); 96888647Sjake CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, 96988647Sjake m->md.colors[DCACHE_COLOR(va)]); 970112697Sjake KASSERT((m->flags & PG_FICTITIOUS) == 0, 971112697Sjake ("pmap_cache_remove: fake page")); 972101121Sjake PMAP_STATS_INC(pmap_ncache_remove); 973101121Sjake 974211049Smarius if (dcache_color_ignore != 0) 975211049Smarius return; 976211049Smarius 977211568Smarius KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0, 978211568Smarius ("pmap_cache_remove: no mappings %d <= 0", 979211568Smarius m->md.colors[DCACHE_COLOR(va)])); 980211568Smarius 981101121Sjake /* 982101121Sjake * Find the color for this virtual address and note the removal of 983101121Sjake * the mapping. 984101121Sjake */ 985100771Sjake color = DCACHE_COLOR(va); 986100771Sjake m->md.colors[color]--; 987101121Sjake 988101121Sjake /* 989101121Sjake * If the page is cacheable, just return and keep the same color, even 990101121Sjake * if there are no longer any mappings. 991101121Sjake */ 992101121Sjake if (m->md.color != -1) { 993108700Sjake if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 994108700Sjake PMAP_STATS_INC(pmap_ncache_remove_c); 995108700Sjake else 996108700Sjake PMAP_STATS_INC(pmap_ncache_remove_oc); 997100771Sjake return; 99897447Sjake } 999101121Sjake 1000101121Sjake KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0, 1001101121Sjake ("pmap_cache_remove: uncacheable, no mappings of other color")); 1002101121Sjake 1003101121Sjake /* 1004101121Sjake * If the page is not cacheable (color is -1), and the number of 1005101121Sjake * mappings for this color is not zero, just return. There are 1006101121Sjake * mappings of the other color still, so remain non-cacheable. 1007101121Sjake */ 1008101121Sjake if (m->md.colors[color] != 0) { 1009101121Sjake PMAP_STATS_INC(pmap_ncache_remove_nc); 101097447Sjake return; 1011101121Sjake } 1012101121Sjake 1013101121Sjake /* 1014101121Sjake * The number of mappings for this color is now zero. Recache the 1015101121Sjake * other colored mappings, and change the page color to the other 1016101121Sjake * color. There should be no lines in the data cache for this page, 1017101121Sjake * so flushing should not be needed. 1018101121Sjake */ 1019108166Sjake TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 1020108166Sjake atomic_set_long(&tp->tte_data, TD_CV); 1021100718Sjake tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 102297447Sjake } 1023101121Sjake m->md.color = DCACHE_OTHER_COLOR(color); 1024108700Sjake 1025108700Sjake if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 1026108700Sjake PMAP_STATS_INC(pmap_ncache_remove_cc); 1027108700Sjake else 1028108700Sjake PMAP_STATS_INC(pmap_ncache_remove_coc); 102988647Sjake} 103088647Sjake 103188647Sjake/* 103280709Sjake * Map a wired page into kernel virtual address space. 103380709Sjake */ 103480708Sjakevoid 1035113238Sjakepmap_kenter(vm_offset_t va, vm_page_t m) 103680708Sjake{ 103797447Sjake vm_offset_t ova; 103888647Sjake struct tte *tp; 103997447Sjake vm_page_t om; 104097447Sjake u_long data; 104180709Sjake 1042236214Salc rw_assert(&tte_list_global_lock, RA_WLOCKED); 1043108700Sjake PMAP_STATS_INC(pmap_nkenter); 104488647Sjake tp = tsb_kvtotte(va); 104588647Sjake CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", 1046113238Sjake va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data); 1047170249Salc if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) { 1048210334Sattilio CTR5(KTR_SPARE2, 1049211049Smarius "pmap_kenter: off color va=%#lx pa=%#lx o=%p ot=%d pi=%#lx", 1050113238Sjake va, VM_PAGE_TO_PHYS(m), m->object, 1051112879Sjake m->object ? m->object->type : -1, 1052112879Sjake m->pindex); 1053112879Sjake PMAP_STATS_INC(pmap_nkenter_oc); 1054112879Sjake } 105597447Sjake if ((tp->tte_data & TD_V) != 0) { 1056113238Sjake om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 105797447Sjake ova = TTE_GET_VA(tp); 1058113238Sjake if (m == om && va == ova) { 1059112879Sjake PMAP_STATS_INC(pmap_nkenter_stupid); 1060112879Sjake return; 1061112879Sjake } 1062108166Sjake TAILQ_REMOVE(&om->md.tte_list, tp, tte_link); 106397447Sjake pmap_cache_remove(om, ova); 106497447Sjake if (va != ova) 1065100718Sjake tlb_page_demap(kernel_pmap, ova); 106697447Sjake } 1067113238Sjake data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP | 1068113238Sjake TD_P | TD_W; 106997447Sjake if (pmap_cache_enter(m, va) != 0) 107097447Sjake data |= TD_CV; 1071102040Sjake tp->tte_vpn = TV_VPN(va, TS_8K); 107297447Sjake tp->tte_data = data; 1073108166Sjake TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); 107480708Sjake} 107580708Sjake 107680709Sjake/* 1077181701Smarius * Map a wired page into kernel virtual address space. This additionally 1078220939Smarius * takes a flag argument which is or'ed to the TTE data. This is used by 1079181701Smarius * sparc64_bus_mem_map(). 108088826Stmm * NOTE: if the mapping is non-cacheable, it's the caller's responsibility 108188826Stmm * to flush entries that might still be in the cache, if applicable. 108284844Stmm */ 108384844Stmmvoid 1084113238Sjakepmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags) 108584844Stmm{ 108688647Sjake struct tte *tp; 108784844Stmm 108888647Sjake tp = tsb_kvtotte(va); 108988647Sjake CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx", 109088647Sjake va, pa, tp, tp->tte_data); 1091102040Sjake tp->tte_vpn = TV_VPN(va, TS_8K); 109295232Sjake tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags; 109384844Stmm} 109484844Stmm 109584844Stmm/* 109680709Sjake * Remove a wired page from kernel virtual address space. 109780709Sjake */ 109880708Sjakevoid 109980709Sjakepmap_kremove(vm_offset_t va) 110080708Sjake{ 110188647Sjake struct tte *tp; 110297447Sjake vm_page_t m; 110381181Sjake 1104236214Salc rw_assert(&tte_list_global_lock, RA_WLOCKED); 1105108700Sjake PMAP_STATS_INC(pmap_nkremove); 110688647Sjake tp = tsb_kvtotte(va); 110788647Sjake CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, 110888647Sjake tp->tte_data); 1109113165Sjake if ((tp->tte_data & TD_V) == 0) 1110113165Sjake return; 111197447Sjake m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 1112108166Sjake TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 111397447Sjake pmap_cache_remove(m, va); 111497447Sjake TTE_ZERO(tp); 111580708Sjake} 111680708Sjake 111780709Sjake/* 111898813Sjake * Inverse of pmap_kenter_flags, used by bus_space_unmap(). 111998813Sjake */ 112098813Sjakevoid 112198813Sjakepmap_kremove_flags(vm_offset_t va) 112298813Sjake{ 112398813Sjake struct tte *tp; 112498813Sjake 112598813Sjake tp = tsb_kvtotte(va); 1126195149Smarius CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp, 112798813Sjake tp->tte_data); 112898813Sjake TTE_ZERO(tp); 112998813Sjake} 113098813Sjake 113198813Sjake/* 113288647Sjake * Map a range of physical addresses into kernel virtual address space. 113388647Sjake * 113488647Sjake * The value passed in *virt is a suggested virtual address for the mapping. 113588647Sjake * Architectures which can support a direct-mapped physical to virtual region 113688647Sjake * can return the appropriate address within that region, leaving '*virt' 1137108245Sjake * unchanged. 113888647Sjake */ 113988647Sjakevm_offset_t 1140113238Sjakepmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 114188647Sjake{ 114288647Sjake 1143108301Sjake return (TLB_PHYS_TO_DIRECT(start)); 114488647Sjake} 114588647Sjake 114688647Sjake/* 114780709Sjake * Map a list of wired pages into kernel virtual address space. This is 114880709Sjake * intended for temporary mappings which do not need page modification or 114980709Sjake * references recorded. Existing mappings in the region are overwritten. 115080709Sjake */ 115180708Sjakevoid 115291177Sjakepmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 115380708Sjake{ 115491177Sjake vm_offset_t va; 115580709Sjake 1156108700Sjake PMAP_STATS_INC(pmap_nqenter); 115791177Sjake va = sva; 1158236214Salc rw_wlock(&tte_list_global_lock); 1159108157Sjake while (count-- > 0) { 1160113238Sjake pmap_kenter(va, *m); 1161108157Sjake va += PAGE_SIZE; 1162108157Sjake m++; 1163108157Sjake } 1164236214Salc rw_wunlock(&tte_list_global_lock); 1165108157Sjake tlb_range_demap(kernel_pmap, sva, va); 116680708Sjake} 116780708Sjake 116880709Sjake/* 116980709Sjake * Remove page mappings from kernel virtual address space. Intended for 117080709Sjake * temporary mappings entered by pmap_qenter. 117180709Sjake */ 117280708Sjakevoid 117391177Sjakepmap_qremove(vm_offset_t sva, int count) 117480708Sjake{ 117591177Sjake vm_offset_t va; 117680709Sjake 1177108700Sjake PMAP_STATS_INC(pmap_nqremove); 117891177Sjake va = sva; 1179236214Salc rw_wlock(&tte_list_global_lock); 1180108157Sjake while (count-- > 0) { 118180709Sjake pmap_kremove(va); 1182108157Sjake va += PAGE_SIZE; 1183108157Sjake } 1184236214Salc rw_wunlock(&tte_list_global_lock); 1185108157Sjake tlb_range_demap(kernel_pmap, sva, va); 118680708Sjake} 118780708Sjake 118880709Sjake/* 118988647Sjake * Initialize the pmap associated with process 0. 119088647Sjake */ 119184183Sjakevoid 119288647Sjakepmap_pinit0(pmap_t pm) 119380708Sjake{ 119491613Sjake int i; 119580709Sjake 1196133451Salc PMAP_LOCK_INIT(pm); 119791613Sjake for (i = 0; i < MAXCPU; i++) 1198214528Smarius pm->pm_context[i] = TLB_CTX_KERNEL; 1199222813Sattilio CPU_ZERO(&pm->pm_active); 120088647Sjake pm->pm_tsb = NULL; 120188647Sjake pm->pm_tsb_obj = NULL; 120288647Sjake bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 120380708Sjake} 120480708Sjake 120588647Sjake/* 1206162544Salc * Initialize a preallocated and zeroed pmap structure, such as one in a 120788647Sjake * vmspace structure. 120888647Sjake */ 1209173361Skibint 121088647Sjakepmap_pinit(pmap_t pm) 121180708Sjake{ 121291274Sjake vm_page_t ma[TSB_PAGES]; 121388647Sjake vm_page_t m; 121491274Sjake int i; 121580709Sjake 121688647Sjake /* 1217181701Smarius * Allocate KVA space for the TSB. 121888647Sjake */ 121988647Sjake if (pm->pm_tsb == NULL) { 1220254025Sjeff pm->pm_tsb = (struct tte *)kva_alloc(TSB_BSIZE); 1221270441Skib if (pm->pm_tsb == NULL) 1222173361Skib return (0); 1223173361Skib } 122488647Sjake 122588647Sjake /* 122688647Sjake * Allocate an object for it. 122788647Sjake */ 122888647Sjake if (pm->pm_tsb_obj == NULL) 1229207649Salc pm->pm_tsb_obj = vm_object_allocate(OBJT_PHYS, TSB_PAGES); 123088647Sjake 1231214528Smarius for (i = 0; i < MAXCPU; i++) 1232214528Smarius pm->pm_context[i] = -1; 1233222813Sattilio CPU_ZERO(&pm->pm_active); 1234214528Smarius 1235248084Sattilio VM_OBJECT_WLOCK(pm->pm_tsb_obj); 123691274Sjake for (i = 0; i < TSB_PAGES; i++) { 1237138697Salc m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY | 1238254649Skib VM_ALLOC_WIRED | VM_ALLOC_ZERO); 123991274Sjake m->valid = VM_PAGE_BITS_ALL; 1240108166Sjake m->md.pmap = pm; 124191274Sjake ma[i] = m; 124291274Sjake } 1243248084Sattilio VM_OBJECT_WUNLOCK(pm->pm_tsb_obj); 124491274Sjake pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES); 124588647Sjake 124688647Sjake bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1247173361Skib return (1); 124888647Sjake} 124988647Sjake 125088647Sjake/* 125188647Sjake * Release any resources held by the given physical map. 125288647Sjake * Called when a pmap initialized by pmap_pinit is being released. 125388647Sjake * Should only be called if the map contains no valid mappings. 125488647Sjake */ 125588647Sjakevoid 125688647Sjakepmap_release(pmap_t pm) 125788647Sjake{ 125888647Sjake vm_object_t obj; 125988647Sjake vm_page_t m; 1260226054Smarius#ifdef SMP 1261129749Stmm struct pcpu *pc; 1262226054Smarius#endif 126388647Sjake 126491288Sjake CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", 1265181701Smarius pm->pm_context[curcpu], pm->pm_tsb); 126688647Sjake KASSERT(pmap_resident_count(pm) == 0, 126788647Sjake ("pmap_release: resident pages %ld != 0", 126888647Sjake pmap_resident_count(pm))); 1269129749Stmm 1270129749Stmm /* 1271129749Stmm * After the pmap was freed, it might be reallocated to a new process. 1272129749Stmm * When switching, this might lead us to wrongly assume that we need 1273129749Stmm * not switch contexts because old and new pmap pointer are equal. 1274129749Stmm * Therefore, make sure that this pmap is not referenced by any PCPU 1275181701Smarius * pointer any more. This could happen in two cases: 1276129749Stmm * - A process that referenced the pmap is currently exiting on a CPU. 1277129749Stmm * However, it is guaranteed to not switch in any more after setting 1278129749Stmm * its state to PRS_ZOMBIE. 1279129749Stmm * - A process that referenced this pmap ran on a CPU, but we switched 1280129749Stmm * to a kernel thread, leaving the pmap pointer unchanged. 1281129749Stmm */ 1282226054Smarius#ifdef SMP 1283226054Smarius sched_pin(); 1284222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) 1285226054Smarius atomic_cmpset_rel_ptr((uintptr_t *)&pc->pc_pmap, 1286226054Smarius (uintptr_t)pm, (uintptr_t)NULL); 1287226054Smarius sched_unpin(); 1288226054Smarius#else 1289226054Smarius critical_enter(); 1290226054Smarius if (PCPU_GET(pmap) == pm) 1291226054Smarius PCPU_SET(pmap, NULL); 1292226054Smarius critical_exit(); 1293226054Smarius#endif 1294129749Stmm 1295223795Smarius pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES); 1296120534Salc obj = pm->pm_tsb_obj; 1297248084Sattilio VM_OBJECT_WLOCK(obj); 1298120534Salc KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1")); 129999025Sjake while (!TAILQ_EMPTY(&obj->memq)) { 130099025Sjake m = TAILQ_FIRST(&obj->memq); 1301108166Sjake m->md.pmap = NULL; 130291274Sjake m->wire_count--; 1303170170Sattilio atomic_subtract_int(&cnt.v_wire_count, 1); 130491274Sjake vm_page_free_zero(m); 130591274Sjake } 1306248084Sattilio VM_OBJECT_WUNLOCK(obj); 130780708Sjake} 130880708Sjake 130988647Sjake/* 131088647Sjake * Grow the number of kernel page table entries. Unneeded. 131188647Sjake */ 131288647Sjakevoid 131388647Sjakepmap_growkernel(vm_offset_t addr) 131482903Sjake{ 1315101870Sjake 1316101870Sjake panic("pmap_growkernel: can't grow kernel"); 131788647Sjake} 131882903Sjake 131997447Sjakeint 132091335Sjakepmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, 1321203839Smarius vm_offset_t va) 132288826Stmm{ 132388826Stmm vm_page_t m; 1324108166Sjake u_long data; 132588826Stmm 1326236214Salc rw_assert(&tte_list_global_lock, RA_WLOCKED); 1327108166Sjake data = atomic_readandclear_long(&tp->tte_data); 1328112697Sjake if ((data & TD_FAKE) == 0) { 1329112697Sjake m = PHYS_TO_VM_PAGE(TD_PA(data)); 1330112697Sjake TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 1331112697Sjake if ((data & TD_WIRED) != 0) 1332112697Sjake pm->pm_stats.wired_count--; 1333112697Sjake if ((data & TD_PV) != 0) { 1334159031Salc if ((data & TD_W) != 0) 1335112697Sjake vm_page_dirty(m); 1336112697Sjake if ((data & TD_REF) != 0) 1337225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 1338112697Sjake if (TAILQ_EMPTY(&m->md.tte_list)) 1339225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1340112697Sjake pm->pm_stats.resident_count--; 1341112697Sjake } 1342112697Sjake pmap_cache_remove(m, va); 134388826Stmm } 134497446Sjake TTE_ZERO(tp); 134588826Stmm if (PMAP_REMOVE_DONE(pm)) 134688826Stmm return (0); 134788826Stmm return (1); 134888826Stmm} 134988826Stmm 135088647Sjake/* 135188647Sjake * Remove the given range of addresses from the specified map. 135288647Sjake */ 135388647Sjakevoid 135488647Sjakepmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end) 135582903Sjake{ 135688647Sjake struct tte *tp; 135791168Sjake vm_offset_t va; 135882903Sjake 135988647Sjake CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx", 1360181701Smarius pm->pm_context[curcpu], start, end); 136188826Stmm if (PMAP_REMOVE_DONE(pm)) 136288826Stmm return; 1363236214Salc rw_wlock(&tte_list_global_lock); 1364133451Salc PMAP_LOCK(pm); 136591177Sjake if (end - start > PMAP_TSB_THRESH) { 136691168Sjake tsb_foreach(pm, NULL, start, end, pmap_remove_tte); 136791782Sjake tlb_context_demap(pm); 136891177Sjake } else { 1369203839Smarius for (va = start; va < end; va += PAGE_SIZE) 1370203839Smarius if ((tp = tsb_tte_lookup(pm, va)) != NULL && 1371203839Smarius !pmap_remove_tte(pm, NULL, tp, va)) 1372203839Smarius break; 137391782Sjake tlb_range_demap(pm, start, end - 1); 137488647Sjake } 1375133451Salc PMAP_UNLOCK(pm); 1376236214Salc rw_wunlock(&tte_list_global_lock); 137782903Sjake} 137882903Sjake 137997447Sjakevoid 138097447Sjakepmap_remove_all(vm_page_t m) 138197447Sjake{ 138297447Sjake struct pmap *pm; 138397447Sjake struct tte *tpn; 138497447Sjake struct tte *tp; 138597447Sjake vm_offset_t va; 138697447Sjake 1387224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1388223798Smarius ("pmap_remove_all: page %p is not managed", m)); 1389236214Salc rw_wlock(&tte_list_global_lock); 1390108166Sjake for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { 1391108166Sjake tpn = TAILQ_NEXT(tp, tte_link); 139297447Sjake if ((tp->tte_data & TD_PV) == 0) 139397447Sjake continue; 139497447Sjake pm = TTE_GET_PMAP(tp); 139597447Sjake va = TTE_GET_VA(tp); 1396133663Salc PMAP_LOCK(pm); 139797447Sjake if ((tp->tte_data & TD_WIRED) != 0) 139897447Sjake pm->pm_stats.wired_count--; 139997447Sjake if ((tp->tte_data & TD_REF) != 0) 1400225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 1401159031Salc if ((tp->tte_data & TD_W) != 0) 140297447Sjake vm_page_dirty(m); 140397447Sjake tp->tte_data &= ~TD_V; 1404100718Sjake tlb_page_demap(pm, va); 1405108166Sjake TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 140697447Sjake pm->pm_stats.resident_count--; 140797447Sjake pmap_cache_remove(m, va); 140897447Sjake TTE_ZERO(tp); 1409133663Salc PMAP_UNLOCK(pm); 141097447Sjake } 1411225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1412236214Salc rw_wunlock(&tte_list_global_lock); 141397447Sjake} 141497447Sjake 1415223800Smariusstatic int 141691335Sjakepmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, 1417203839Smarius vm_offset_t va) 141888826Stmm{ 1419108166Sjake u_long data; 142088826Stmm vm_page_t m; 142188826Stmm 1422225841Skib PMAP_LOCK_ASSERT(pm, MA_OWNED); 1423207373Salc data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W); 1424207373Salc if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) { 1425108166Sjake m = PHYS_TO_VM_PAGE(TD_PA(data)); 1426207373Salc vm_page_dirty(m); 142788826Stmm } 1428118217Stmm return (1); 142988826Stmm} 143088826Stmm 143188647Sjake/* 143288647Sjake * Set the physical protection on the specified range of this map as requested. 143388647Sjake */ 143482903Sjakevoid 143588647Sjakepmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 143680708Sjake{ 143791168Sjake vm_offset_t va; 143888647Sjake struct tte *tp; 143984183Sjake 144088647Sjake CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx", 1441181701Smarius pm->pm_context[curcpu], sva, eva, prot); 144288647Sjake 144388647Sjake if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 144488647Sjake pmap_remove(pm, sva, eva); 144588647Sjake return; 144684183Sjake } 144788647Sjake 144888647Sjake if (prot & VM_PROT_WRITE) 144988647Sjake return; 145088647Sjake 1451133451Salc PMAP_LOCK(pm); 145291177Sjake if (eva - sva > PMAP_TSB_THRESH) { 145391168Sjake tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte); 145491782Sjake tlb_context_demap(pm); 145591177Sjake } else { 1456203839Smarius for (va = sva; va < eva; va += PAGE_SIZE) 145791168Sjake if ((tp = tsb_tte_lookup(pm, va)) != NULL) 145891168Sjake pmap_protect_tte(pm, NULL, tp, va); 145991782Sjake tlb_range_demap(pm, sva, eva - 1); 146088647Sjake } 1461133451Salc PMAP_UNLOCK(pm); 146280708Sjake} 146380708Sjake 146488647Sjake/* 146588647Sjake * Map the given physical page at the specified virtual address in the 146688647Sjake * target pmap with the protection requested. If specified the page 146788647Sjake * will be wired down. 146888647Sjake */ 1469270439Skibint 1470270439Skibpmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1471270439Skib u_int flags, int8_t psind) 147280708Sjake{ 1473270439Skib int rv; 1474159303Salc 1475236214Salc rw_wlock(&tte_list_global_lock); 1476159303Salc PMAP_LOCK(pm); 1477270439Skib rv = pmap_enter_locked(pm, va, m, prot, flags, psind); 1478236214Salc rw_wunlock(&tte_list_global_lock); 1479159303Salc PMAP_UNLOCK(pm); 1480270439Skib return (rv); 1481159303Salc} 1482159303Salc 1483159303Salc/* 1484159303Salc * Map the given physical page at the specified virtual address in the 1485159303Salc * target pmap with the protection requested. If specified the page 1486159303Salc * will be wired down. 1487159303Salc * 1488159303Salc * The page queues and pmap must be locked. 1489159303Salc */ 1490270439Skibstatic int 1491159303Salcpmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1492270439Skib u_int flags, int8_t psind __unused) 1493159303Salc{ 149488647Sjake struct tte *tp; 1495113238Sjake vm_paddr_t pa; 1496224746Skib vm_page_t real; 149797030Sjake u_long data; 1498270439Skib boolean_t wired; 149981895Sjake 1500236214Salc rw_assert(&tte_list_global_lock, RA_WLOCKED); 1501159303Salc PMAP_LOCK_ASSERT(pm, MA_OWNED); 1502254138Sattilio if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1503250747Salc VM_OBJECT_ASSERT_LOCKED(m->object); 1504108700Sjake PMAP_STATS_INC(pmap_nenter); 150588647Sjake pa = VM_PAGE_TO_PHYS(m); 1506270439Skib wired = (flags & PMAP_ENTER_WIRED) != 0; 1507112697Sjake 1508112697Sjake /* 1509112697Sjake * If this is a fake page from the device_pager, but it covers actual 1510112697Sjake * physical memory, convert to the real backing page. 1511112697Sjake */ 1512112697Sjake if ((m->flags & PG_FICTITIOUS) != 0) { 1513224746Skib real = vm_phys_paddr_to_vm_page(pa); 1514224746Skib if (real != NULL) 1515224746Skib m = real; 1516112697Sjake } 1517112697Sjake 151888647Sjake CTR6(KTR_PMAP, 1519195149Smarius "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", 1520181701Smarius pm->pm_context[curcpu], m, va, pa, prot, wired); 152188647Sjake 152288647Sjake /* 152388647Sjake * If there is an existing mapping, and the physical address has not 152488647Sjake * changed, must be protection or wiring change. 152588647Sjake */ 152697030Sjake if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) { 1527195149Smarius CTR0(KTR_PMAP, "pmap_enter_locked: update"); 1528108700Sjake PMAP_STATS_INC(pmap_nenter_update); 152988647Sjake 153097030Sjake /* 153197030Sjake * Wiring change, just update stats. 153297030Sjake */ 153397030Sjake if (wired) { 153497030Sjake if ((tp->tte_data & TD_WIRED) == 0) { 153597030Sjake tp->tte_data |= TD_WIRED; 153697030Sjake pm->pm_stats.wired_count++; 153797030Sjake } 153897030Sjake } else { 153997030Sjake if ((tp->tte_data & TD_WIRED) != 0) { 154097030Sjake tp->tte_data &= ~TD_WIRED; 154197030Sjake pm->pm_stats.wired_count--; 154297030Sjake } 154397030Sjake } 154488647Sjake 154597030Sjake /* 154697030Sjake * Save the old bits and clear the ones we're interested in. 154797030Sjake */ 154897030Sjake data = tp->tte_data; 154997030Sjake tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W); 155097030Sjake 155197030Sjake /* 155297030Sjake * If we're turning off write permissions, sense modify status. 155397030Sjake */ 155497030Sjake if ((prot & VM_PROT_WRITE) != 0) { 155597030Sjake tp->tte_data |= TD_SW; 1556203839Smarius if (wired) 155797030Sjake tp->tte_data |= TD_W; 1558224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) 1559225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1560203839Smarius } else if ((data & TD_W) != 0) 156197030Sjake vm_page_dirty(m); 156288647Sjake 156397030Sjake /* 156497030Sjake * If we're turning on execute permissions, flush the icache. 156597030Sjake */ 156697030Sjake if ((prot & VM_PROT_EXECUTE) != 0) { 1567203839Smarius if ((data & TD_EXEC) == 0) 156897030Sjake icache_page_inval(pa); 156997030Sjake tp->tte_data |= TD_EXEC; 157097030Sjake } 157197030Sjake 157297030Sjake /* 157397030Sjake * Delete the old mapping. 157497030Sjake */ 1575100718Sjake tlb_page_demap(pm, TTE_GET_VA(tp)); 157697030Sjake } else { 157797030Sjake /* 157897030Sjake * If there is an existing mapping, but its for a different 1579208846Salc * physical address, delete the old mapping. 158097030Sjake */ 158197030Sjake if (tp != NULL) { 1582195149Smarius CTR0(KTR_PMAP, "pmap_enter_locked: replace"); 1583108700Sjake PMAP_STATS_INC(pmap_nenter_replace); 158497030Sjake pmap_remove_tte(pm, NULL, tp, va); 1585100718Sjake tlb_page_demap(pm, va); 158697030Sjake } else { 1587195149Smarius CTR0(KTR_PMAP, "pmap_enter_locked: new"); 1588108700Sjake PMAP_STATS_INC(pmap_nenter_new); 158997030Sjake } 159088647Sjake 159197030Sjake /* 159297030Sjake * Now set up the data and install the new mapping. 159397030Sjake */ 1594112697Sjake data = TD_V | TD_8K | TD_PA(pa); 159597030Sjake if (pm == kernel_pmap) 159697030Sjake data |= TD_P; 1597164229Salc if ((prot & VM_PROT_WRITE) != 0) { 159897030Sjake data |= TD_SW; 1599224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) 1600225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1601164229Salc } 160297030Sjake if (prot & VM_PROT_EXECUTE) { 160397030Sjake data |= TD_EXEC; 160497030Sjake icache_page_inval(pa); 160597030Sjake } 160688647Sjake 160797030Sjake /* 160897030Sjake * If its wired update stats. We also don't need reference or 160997030Sjake * modify tracking for wired mappings, so set the bits now. 161097030Sjake */ 161197030Sjake if (wired) { 161297030Sjake pm->pm_stats.wired_count++; 161397030Sjake data |= TD_REF | TD_WIRED; 161497030Sjake if ((prot & VM_PROT_WRITE) != 0) 161597030Sjake data |= TD_W; 161688647Sjake } 161788647Sjake 1618102040Sjake tsb_tte_enter(pm, m, va, TS_8K, data); 161988647Sjake } 1620270439Skib 1621270439Skib return (KERN_SUCCESS); 1622159303Salc} 1623159303Salc 1624159303Salc/* 1625159303Salc * Maps a sequence of resident pages belonging to the same object. 1626159303Salc * The sequence begins with the given page m_start. This page is 1627159303Salc * mapped at the given virtual address start. Each subsequent page is 1628159303Salc * mapped at a virtual address that is offset from start by the same 1629159303Salc * amount as the page is offset from m_start within the object. The 1630159303Salc * last page in the sequence is the page with the largest offset from 1631159303Salc * m_start that can be mapped at a virtual address less than the given 1632159303Salc * virtual address end. Not every virtual page between start and end 1633159303Salc * is mapped; only those for which a resident page exists with the 1634159303Salc * corresponding offset from m_start are mapped. 1635159303Salc */ 1636159303Salcvoid 1637159303Salcpmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, 1638159303Salc vm_page_t m_start, vm_prot_t prot) 1639159303Salc{ 1640159303Salc vm_page_t m; 1641159303Salc vm_pindex_t diff, psize; 1642159303Salc 1643250884Sattilio VM_OBJECT_ASSERT_LOCKED(m_start->object); 1644250884Sattilio 1645159303Salc psize = atop(end - start); 1646159303Salc m = m_start; 1647236214Salc rw_wlock(&tte_list_global_lock); 1648159303Salc PMAP_LOCK(pm); 1649159303Salc while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1650159303Salc pmap_enter_locked(pm, start + ptoa(diff), m, prot & 1651270439Skib (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0); 1652159303Salc m = TAILQ_NEXT(m, listq); 1653159303Salc } 1654236214Salc rw_wunlock(&tte_list_global_lock); 1655133451Salc PMAP_UNLOCK(pm); 165680708Sjake} 165780708Sjake 1658159627Supsvoid 1659159627Supspmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) 1660117045Salc{ 1661117045Salc 1662236214Salc rw_wlock(&tte_list_global_lock); 1663159303Salc PMAP_LOCK(pm); 1664159303Salc pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1665270439Skib 0, 0); 1666236214Salc rw_wunlock(&tte_list_global_lock); 1667159303Salc PMAP_UNLOCK(pm); 1668117045Salc} 1669117045Salc 167080709Sjakevoid 167188647Sjakepmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1672203839Smarius vm_pindex_t pindex, vm_size_t size) 167380708Sjake{ 1674117206Salc 1675248084Sattilio VM_OBJECT_ASSERT_WLOCKED(object); 1676195840Sjhb KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 1677117206Salc ("pmap_object_init_pt: non-device object")); 167880708Sjake} 167980708Sjake 1680270920Skibstatic int 1681270920Skibpmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp, vm_offset_t va) 1682270920Skib{ 1683270920Skib 1684270920Skib PMAP_LOCK_ASSERT(pm, MA_OWNED); 1685270920Skib if ((tp->tte_data & TD_WIRED) == 0) 1686270920Skib panic("pmap_unwire_tte: tp %p is missing TD_WIRED", tp); 1687270920Skib atomic_clear_long(&tp->tte_data, TD_WIRED); 1688270920Skib pm->pm_stats.wired_count--; 1689270920Skib return (1); 1690270920Skib} 1691270920Skib 169288647Sjake/* 1693270920Skib * Clear the wired attribute from the mappings for the specified range of 1694270920Skib * addresses in the given pmap. Every valid mapping within that range must 1695270920Skib * have the wired attribute set. In contrast, invalid mappings cannot have 1696270920Skib * the wired attribute set, so they are ignored. 1697270920Skib * 1698270920Skib * The wired attribute of the translation table entry is not a hardware 1699270920Skib * feature, so there is no need to invalidate any TLB entries. 170088647Sjake */ 170180708Sjakevoid 1702270920Skibpmap_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 170388647Sjake{ 1704270920Skib vm_offset_t va; 170588647Sjake struct tte *tp; 170688647Sjake 1707133451Salc PMAP_LOCK(pm); 1708270920Skib if (eva - sva > PMAP_TSB_THRESH) 1709270920Skib tsb_foreach(pm, NULL, sva, eva, pmap_unwire_tte); 1710270920Skib else { 1711270920Skib for (va = sva; va < eva; va += PAGE_SIZE) 1712270920Skib if ((tp = tsb_tte_lookup(pm, va)) != NULL) 1713270920Skib pmap_unwire_tte(pm, NULL, tp, va); 171488647Sjake } 1715133451Salc PMAP_UNLOCK(pm); 171688647Sjake} 171788647Sjake 171891168Sjakestatic int 1719203839Smariuspmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, 1720203839Smarius vm_offset_t va) 172191168Sjake{ 172291168Sjake vm_page_t m; 172397030Sjake u_long data; 172491168Sjake 1725116543Sjake if ((tp->tte_data & TD_FAKE) != 0) 1726116543Sjake return (1); 172791168Sjake if (tsb_tte_lookup(dst_pmap, va) == NULL) { 172897030Sjake data = tp->tte_data & 172991224Sjake ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W); 173097027Sjake m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 1731102040Sjake tsb_tte_enter(dst_pmap, m, va, TS_8K, data); 173291168Sjake } 173391168Sjake return (1); 173491168Sjake} 173591168Sjake 173688647Sjakevoid 173780709Sjakepmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 1738204152Smarius vm_size_t len, vm_offset_t src_addr) 173980708Sjake{ 174091168Sjake struct tte *tp; 174191168Sjake vm_offset_t va; 174291168Sjake 174391168Sjake if (dst_addr != src_addr) 174491168Sjake return; 1745236214Salc rw_wlock(&tte_list_global_lock); 1746141370Salc if (dst_pmap < src_pmap) { 1747141370Salc PMAP_LOCK(dst_pmap); 1748141370Salc PMAP_LOCK(src_pmap); 1749141370Salc } else { 1750141370Salc PMAP_LOCK(src_pmap); 1751141370Salc PMAP_LOCK(dst_pmap); 1752141370Salc } 175391168Sjake if (len > PMAP_TSB_THRESH) { 175491288Sjake tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len, 175591288Sjake pmap_copy_tte); 175691782Sjake tlb_context_demap(dst_pmap); 175791168Sjake } else { 1758203839Smarius for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) 175991168Sjake if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL) 176091168Sjake pmap_copy_tte(src_pmap, dst_pmap, tp, va); 176191782Sjake tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1); 176291168Sjake } 1763236214Salc rw_wunlock(&tte_list_global_lock); 1764141370Salc PMAP_UNLOCK(src_pmap); 1765133451Salc PMAP_UNLOCK(dst_pmap); 176680708Sjake} 176780708Sjake 176880708Sjakevoid 176994777Speterpmap_zero_page(vm_page_t m) 177080708Sjake{ 1771113238Sjake struct tte *tp; 1772100771Sjake vm_offset_t va; 1773113238Sjake vm_paddr_t pa; 177482903Sjake 1775112697Sjake KASSERT((m->flags & PG_FICTITIOUS) == 0, 1776112697Sjake ("pmap_zero_page: fake page")); 1777108700Sjake PMAP_STATS_INC(pmap_nzero_page); 1778108245Sjake pa = VM_PAGE_TO_PHYS(m); 1779211049Smarius if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { 1780108700Sjake PMAP_STATS_INC(pmap_nzero_page_c); 1781108245Sjake va = TLB_PHYS_TO_DIRECT(pa); 1782113166Sjake cpu_block_zero((void *)va, PAGE_SIZE); 1783211049Smarius } else if (m->md.color == -1) { 1784211049Smarius PMAP_STATS_INC(pmap_nzero_page_nc); 1785211049Smarius aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); 1786108245Sjake } else { 1787108700Sjake PMAP_STATS_INC(pmap_nzero_page_oc); 1788142869Salc PMAP_LOCK(kernel_pmap); 1789108245Sjake va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE); 1790108245Sjake tp = tsb_kvtotte(va); 1791108245Sjake tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; 1792108245Sjake tp->tte_vpn = TV_VPN(va, TS_8K); 1793113166Sjake cpu_block_zero((void *)va, PAGE_SIZE); 1794108245Sjake tlb_page_demap(kernel_pmap, va); 1795142869Salc PMAP_UNLOCK(kernel_pmap); 1796108245Sjake } 179788647Sjake} 179885241Sjake 179988647Sjakevoid 180094777Speterpmap_zero_page_area(vm_page_t m, int off, int size) 180188647Sjake{ 1802113238Sjake struct tte *tp; 1803100771Sjake vm_offset_t va; 1804113238Sjake vm_paddr_t pa; 180582903Sjake 1806112697Sjake KASSERT((m->flags & PG_FICTITIOUS) == 0, 1807112697Sjake ("pmap_zero_page_area: fake page")); 180888647Sjake KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size")); 1809108700Sjake PMAP_STATS_INC(pmap_nzero_page_area); 1810108245Sjake pa = VM_PAGE_TO_PHYS(m); 1811211049Smarius if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { 1812108700Sjake PMAP_STATS_INC(pmap_nzero_page_area_c); 1813108245Sjake va = TLB_PHYS_TO_DIRECT(pa); 1814108245Sjake bzero((void *)(va + off), size); 1815211049Smarius } else if (m->md.color == -1) { 1816211049Smarius PMAP_STATS_INC(pmap_nzero_page_area_nc); 1817211049Smarius aszero(ASI_PHYS_USE_EC, pa + off, size); 1818108245Sjake } else { 1819108700Sjake PMAP_STATS_INC(pmap_nzero_page_area_oc); 1820142869Salc PMAP_LOCK(kernel_pmap); 1821108245Sjake va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE); 1822108245Sjake tp = tsb_kvtotte(va); 1823108245Sjake tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; 1824108245Sjake tp->tte_vpn = TV_VPN(va, TS_8K); 1825108245Sjake bzero((void *)(va + off), size); 1826108245Sjake tlb_page_demap(kernel_pmap, va); 1827142869Salc PMAP_UNLOCK(kernel_pmap); 1828108245Sjake } 182988647Sjake} 183082903Sjake 183199571Spetervoid 183299571Speterpmap_zero_page_idle(vm_page_t m) 183399571Speter{ 1834113238Sjake struct tte *tp; 1835100771Sjake vm_offset_t va; 1836113238Sjake vm_paddr_t pa; 183799571Speter 1838112697Sjake KASSERT((m->flags & PG_FICTITIOUS) == 0, 1839112697Sjake ("pmap_zero_page_idle: fake page")); 1840108700Sjake PMAP_STATS_INC(pmap_nzero_page_idle); 1841108245Sjake pa = VM_PAGE_TO_PHYS(m); 1842211049Smarius if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { 1843108700Sjake PMAP_STATS_INC(pmap_nzero_page_idle_c); 1844108245Sjake va = TLB_PHYS_TO_DIRECT(pa); 1845113166Sjake cpu_block_zero((void *)va, PAGE_SIZE); 1846211049Smarius } else if (m->md.color == -1) { 1847211049Smarius PMAP_STATS_INC(pmap_nzero_page_idle_nc); 1848211049Smarius aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); 1849108245Sjake } else { 1850108700Sjake PMAP_STATS_INC(pmap_nzero_page_idle_oc); 1851108245Sjake va = pmap_idle_map + (m->md.color * PAGE_SIZE); 1852108245Sjake tp = tsb_kvtotte(va); 1853108245Sjake tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; 1854108245Sjake tp->tte_vpn = TV_VPN(va, TS_8K); 1855113166Sjake cpu_block_zero((void *)va, PAGE_SIZE); 1856108245Sjake tlb_page_demap(kernel_pmap, va); 1857108245Sjake } 185899571Speter} 185999571Speter 186088647Sjakevoid 186194777Speterpmap_copy_page(vm_page_t msrc, vm_page_t mdst) 186288647Sjake{ 1863108245Sjake vm_offset_t vdst; 1864108245Sjake vm_offset_t vsrc; 1865113238Sjake vm_paddr_t pdst; 1866113238Sjake vm_paddr_t psrc; 1867108245Sjake struct tte *tp; 186882903Sjake 1869112697Sjake KASSERT((mdst->flags & PG_FICTITIOUS) == 0, 1870112697Sjake ("pmap_copy_page: fake dst page")); 1871112697Sjake KASSERT((msrc->flags & PG_FICTITIOUS) == 0, 1872112697Sjake ("pmap_copy_page: fake src page")); 1873108700Sjake PMAP_STATS_INC(pmap_ncopy_page); 1874108245Sjake pdst = VM_PAGE_TO_PHYS(mdst); 1875108245Sjake psrc = VM_PAGE_TO_PHYS(msrc); 1876211049Smarius if (dcache_color_ignore != 0 || 1877211049Smarius (msrc->md.color == DCACHE_COLOR(psrc) && 1878211049Smarius mdst->md.color == DCACHE_COLOR(pdst))) { 1879108700Sjake PMAP_STATS_INC(pmap_ncopy_page_c); 1880108245Sjake vdst = TLB_PHYS_TO_DIRECT(pdst); 1881108245Sjake vsrc = TLB_PHYS_TO_DIRECT(psrc); 1882113166Sjake cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE); 1883211049Smarius } else if (msrc->md.color == -1 && mdst->md.color == -1) { 1884211049Smarius PMAP_STATS_INC(pmap_ncopy_page_nc); 1885211049Smarius ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE); 1886108245Sjake } else if (msrc->md.color == -1) { 1887108245Sjake if (mdst->md.color == DCACHE_COLOR(pdst)) { 1888108700Sjake PMAP_STATS_INC(pmap_ncopy_page_dc); 1889108245Sjake vdst = TLB_PHYS_TO_DIRECT(pdst); 1890108245Sjake ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst, 1891108245Sjake PAGE_SIZE); 1892108245Sjake } else { 1893108700Sjake PMAP_STATS_INC(pmap_ncopy_page_doc); 1894142869Salc PMAP_LOCK(kernel_pmap); 1895108245Sjake vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE); 1896108245Sjake tp = tsb_kvtotte(vdst); 1897108245Sjake tp->tte_data = 1898108245Sjake TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W; 1899108245Sjake tp->tte_vpn = TV_VPN(vdst, TS_8K); 1900108245Sjake ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst, 1901108245Sjake PAGE_SIZE); 1902108245Sjake tlb_page_demap(kernel_pmap, vdst); 1903142869Salc PMAP_UNLOCK(kernel_pmap); 1904108245Sjake } 1905108245Sjake } else if (mdst->md.color == -1) { 1906108245Sjake if (msrc->md.color == DCACHE_COLOR(psrc)) { 1907108700Sjake PMAP_STATS_INC(pmap_ncopy_page_sc); 1908108245Sjake vsrc = TLB_PHYS_TO_DIRECT(psrc); 1909108245Sjake ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst, 1910108245Sjake PAGE_SIZE); 1911108245Sjake } else { 1912108700Sjake PMAP_STATS_INC(pmap_ncopy_page_soc); 1913142869Salc PMAP_LOCK(kernel_pmap); 1914108245Sjake vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE); 1915108245Sjake tp = tsb_kvtotte(vsrc); 1916108245Sjake tp->tte_data = 1917108245Sjake TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W; 1918108245Sjake tp->tte_vpn = TV_VPN(vsrc, TS_8K); 1919108245Sjake ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst, 1920108245Sjake PAGE_SIZE); 1921108245Sjake tlb_page_demap(kernel_pmap, vsrc); 1922142869Salc PMAP_UNLOCK(kernel_pmap); 1923108245Sjake } 1924108245Sjake } else { 1925108700Sjake PMAP_STATS_INC(pmap_ncopy_page_oc); 1926142869Salc PMAP_LOCK(kernel_pmap); 1927108245Sjake vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE); 1928108245Sjake tp = tsb_kvtotte(vdst); 1929108245Sjake tp->tte_data = 1930108245Sjake TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W; 1931108245Sjake tp->tte_vpn = TV_VPN(vdst, TS_8K); 1932108245Sjake vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE); 1933108245Sjake tp = tsb_kvtotte(vsrc); 1934108245Sjake tp->tte_data = 1935108245Sjake TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W; 1936108245Sjake tp->tte_vpn = TV_VPN(vsrc, TS_8K); 1937113166Sjake cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE); 1938108245Sjake tlb_page_demap(kernel_pmap, vdst); 1939108245Sjake tlb_page_demap(kernel_pmap, vsrc); 1940142869Salc PMAP_UNLOCK(kernel_pmap); 1941108245Sjake } 194280708Sjake} 194380708Sjake 1944248508Skibint unmapped_buf_allowed; 1945248508Skib 1946248280Skibvoid 1947248280Skibpmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 1948248280Skib vm_offset_t b_offset, int xfersize) 1949248280Skib{ 1950248280Skib 1951248280Skib panic("pmap_copy_pages: not implemented"); 1952248280Skib} 1953248280Skib 195482903Sjake/* 195591403Ssilby * Returns true if the pmap's pv is one of the first 195691403Ssilby * 16 pvs linked to from this page. This count may 195791403Ssilby * be changed upwards or downwards in the future; it 195891403Ssilby * is only necessary that true be returned for a small 195991403Ssilby * subset of pmaps for proper page aging. 196088647Sjake */ 196188647Sjakeboolean_t 196291403Ssilbypmap_page_exists_quick(pmap_t pm, vm_page_t m) 196388647Sjake{ 196497447Sjake struct tte *tp; 196597447Sjake int loops; 1966208990Salc boolean_t rv; 196785241Sjake 1968224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1969208990Salc ("pmap_page_exists_quick: page %p is not managed", m)); 197097447Sjake loops = 0; 1971208990Salc rv = FALSE; 1972236214Salc rw_wlock(&tte_list_global_lock); 1973108166Sjake TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 197497447Sjake if ((tp->tte_data & TD_PV) == 0) 197597447Sjake continue; 1976208990Salc if (TTE_GET_PMAP(tp) == pm) { 1977208990Salc rv = TRUE; 1978208990Salc break; 1979208990Salc } 198097447Sjake if (++loops >= 16) 198197447Sjake break; 198297447Sjake } 1983236214Salc rw_wunlock(&tte_list_global_lock); 1984208990Salc return (rv); 198582903Sjake} 198682903Sjake 198788647Sjake/* 1988173708Salc * Return the number of managed mappings to the given physical page 1989173708Salc * that are wired. 1990173708Salc */ 1991173708Salcint 1992173708Salcpmap_page_wired_mappings(vm_page_t m) 1993173708Salc{ 1994173708Salc struct tte *tp; 1995173708Salc int count; 1996173708Salc 1997173708Salc count = 0; 1998224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 1999173708Salc return (count); 2000236214Salc rw_wlock(&tte_list_global_lock); 2001173708Salc TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) 2002173708Salc if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED)) 2003173708Salc count++; 2004236214Salc rw_wunlock(&tte_list_global_lock); 2005173708Salc return (count); 2006173708Salc} 2007173708Salc 2008173708Salc/* 200988647Sjake * Remove all pages from specified address space, this aids process exit 2010223800Smarius * speeds. This is much faster than pmap_remove in the case of running down 201188647Sjake * an entire address space. Only works for the current pmap. 201288647Sjake */ 201382903Sjakevoid 2014157443Speterpmap_remove_pages(pmap_t pm) 201580708Sjake{ 2016181701Smarius 201780708Sjake} 201880708Sjake 201988647Sjake/* 2020129068Salc * Returns TRUE if the given page has a managed mapping. 2021129068Salc */ 2022129068Salcboolean_t 2023129068Salcpmap_page_is_mapped(vm_page_t m) 2024129068Salc{ 2025129068Salc struct tte *tp; 2026207702Salc boolean_t rv; 2027129068Salc 2028207702Salc rv = FALSE; 2029224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 2030207702Salc return (rv); 2031236214Salc rw_wlock(&tte_list_global_lock); 2032203839Smarius TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) 2033207702Salc if ((tp->tte_data & TD_PV) != 0) { 2034207702Salc rv = TRUE; 2035207702Salc break; 2036207702Salc } 2037236214Salc rw_wunlock(&tte_list_global_lock); 2038207702Salc return (rv); 2039129068Salc} 2040129068Salc 2041129068Salc/* 2042181701Smarius * Return a count of reference bits for a page, clearing those bits. 2043181701Smarius * It is not necessary for every reference bit to be cleared, but it 2044181701Smarius * is necessary that 0 only be returned when there are truly no 2045181701Smarius * reference bits set. 204691403Ssilby * 2047181701Smarius * XXX: The exact number of bits to check and clear is a matter that 2048181701Smarius * should be tested and standardized at some point in the future for 2049181701Smarius * optimal aging of shared pages. 205091403Ssilby */ 205180709Sjakeint 205288647Sjakepmap_ts_referenced(vm_page_t m) 205380708Sjake{ 205497447Sjake struct tte *tpf; 205597447Sjake struct tte *tpn; 205697447Sjake struct tte *tp; 2057108166Sjake u_long data; 205897447Sjake int count; 205988647Sjake 2060224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2061208990Salc ("pmap_ts_referenced: page %p is not managed", m)); 206297447Sjake count = 0; 2063236214Salc rw_wlock(&tte_list_global_lock); 2064108166Sjake if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) { 206597447Sjake tpf = tp; 206697447Sjake do { 2067108166Sjake tpn = TAILQ_NEXT(tp, tte_link); 2068108166Sjake TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 2069108166Sjake TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); 2070159031Salc if ((tp->tte_data & TD_PV) == 0) 207197447Sjake continue; 2072108166Sjake data = atomic_clear_long(&tp->tte_data, TD_REF); 2073108166Sjake if ((data & TD_REF) != 0 && ++count > 4) 2074108166Sjake break; 207597447Sjake } while ((tp = tpn) != NULL && tp != tpf); 207697447Sjake } 2077236214Salc rw_wunlock(&tte_list_global_lock); 207897447Sjake return (count); 207980708Sjake} 208080708Sjake 208188647Sjakeboolean_t 208288647Sjakepmap_is_modified(vm_page_t m) 208380708Sjake{ 208497447Sjake struct tte *tp; 2085208504Salc boolean_t rv; 208688647Sjake 2087224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2088208504Salc ("pmap_is_modified: page %p is not managed", m)); 2089208504Salc rv = FALSE; 2090208504Salc 2091208504Salc /* 2092254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2093225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2094208504Salc * is clear, no TTEs can have TD_W set. 2095208504Salc */ 2096248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2097254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2098208504Salc return (rv); 2099236214Salc rw_wlock(&tte_list_global_lock); 2100108166Sjake TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 2101159031Salc if ((tp->tte_data & TD_PV) == 0) 210297447Sjake continue; 2103208504Salc if ((tp->tte_data & TD_W) != 0) { 2104208504Salc rv = TRUE; 2105208504Salc break; 2106208504Salc } 210797447Sjake } 2108236214Salc rw_wunlock(&tte_list_global_lock); 2109208504Salc return (rv); 211080708Sjake} 211180708Sjake 2112120722Salc/* 2113120722Salc * pmap_is_prefaultable: 2114120722Salc * 2115120722Salc * Return whether or not the specified virtual address is elgible 2116120722Salc * for prefault. 2117120722Salc */ 2118120722Salcboolean_t 2119120722Salcpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2120120722Salc{ 2121214879Smarius boolean_t rv; 2122120722Salc 2123214879Smarius PMAP_LOCK(pmap); 2124214879Smarius rv = tsb_tte_lookup(pmap, addr) == NULL; 2125214879Smarius PMAP_UNLOCK(pmap); 2126214879Smarius return (rv); 2127120722Salc} 2128120722Salc 2129207155Salc/* 2130207155Salc * Return whether or not the specified physical page was referenced 2131207155Salc * in any physical maps. 2132207155Salc */ 2133207155Salcboolean_t 2134207155Salcpmap_is_referenced(vm_page_t m) 2135207155Salc{ 2136207155Salc struct tte *tp; 2137208574Salc boolean_t rv; 2138207155Salc 2139224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2140208574Salc ("pmap_is_referenced: page %p is not managed", m)); 2141208574Salc rv = FALSE; 2142236214Salc rw_wlock(&tte_list_global_lock); 2143207155Salc TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 2144207155Salc if ((tp->tte_data & TD_PV) == 0) 2145207155Salc continue; 2146208574Salc if ((tp->tte_data & TD_REF) != 0) { 2147208574Salc rv = TRUE; 2148208574Salc break; 2149208574Salc } 2150207155Salc } 2151236214Salc rw_wunlock(&tte_list_global_lock); 2152208574Salc return (rv); 2153207155Salc} 2154207155Salc 2155255028Salc/* 2156255028Salc * This function is advisory. 2157255028Salc */ 215888647Sjakevoid 2159255028Salcpmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 2160255028Salc{ 2161255028Salc} 2162255028Salc 2163255028Salcvoid 216488647Sjakepmap_clear_modify(vm_page_t m) 216580708Sjake{ 216697447Sjake struct tte *tp; 2167108166Sjake u_long data; 216884183Sjake 2169224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2170208504Salc ("pmap_clear_modify: page %p is not managed", m)); 2171248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2172254138Sattilio KASSERT(!vm_page_xbusied(m), 2173254138Sattilio ("pmap_clear_modify: page %p is exclusive busied", m)); 2174208504Salc 2175208504Salc /* 2176225418Skib * If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set. 2177208504Salc * If the object containing the page is locked and the page is not 2178254138Sattilio * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 2179208504Salc */ 2180225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 218188647Sjake return; 2182236214Salc rw_wlock(&tte_list_global_lock); 2183108166Sjake TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 218497447Sjake if ((tp->tte_data & TD_PV) == 0) 218597447Sjake continue; 2186108166Sjake data = atomic_clear_long(&tp->tte_data, TD_W); 2187108166Sjake if ((data & TD_W) != 0) 2188100718Sjake tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 218997447Sjake } 2190236214Salc rw_wunlock(&tte_list_global_lock); 219180708Sjake} 219280708Sjake 219380708Sjakevoid 2194160889Salcpmap_remove_write(vm_page_t m) 219597447Sjake{ 219697447Sjake struct tte *tp; 2197108166Sjake u_long data; 219897447Sjake 2199224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2200208175Salc ("pmap_remove_write: page %p is not managed", m)); 2201208175Salc 2202208175Salc /* 2203254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2204254138Sattilio * set by another thread while the object is locked. Thus, 2205254138Sattilio * if PGA_WRITEABLE is clear, no page table entries need updating. 2206208175Salc */ 2207248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2208254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 220997447Sjake return; 2210236214Salc rw_wlock(&tte_list_global_lock); 2211108166Sjake TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 221297447Sjake if ((tp->tte_data & TD_PV) == 0) 221397447Sjake continue; 2214108166Sjake data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W); 2215108166Sjake if ((data & TD_W) != 0) { 2216159031Salc vm_page_dirty(m); 2217100718Sjake tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 221897447Sjake } 221997447Sjake } 2220225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2221236214Salc rw_wunlock(&tte_list_global_lock); 222297447Sjake} 222397447Sjake 222488647Sjakeint 2225208504Salcpmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa) 222688647Sjake{ 2227176994Smarius 2228104271Sjake /* TODO; */ 222988647Sjake return (0); 223088647Sjake} 223188647Sjake 223288647Sjake/* 223388647Sjake * Activate a user pmap. The pmap must be activated before its address space 223488647Sjake * can be accessed in any way. 223588647Sjake */ 223680708Sjakevoid 223788647Sjakepmap_activate(struct thread *td) 223880708Sjake{ 223991613Sjake struct vmspace *vm; 2240113453Sjake struct pmap *pm; 2241113453Sjake int context; 224285241Sjake 2243223347Smarius critical_enter(); 224491613Sjake vm = td->td_proc->p_vmspace; 2245113453Sjake pm = vmspace_pmap(vm); 224685241Sjake 2247113453Sjake context = PCPU_GET(tlb_ctx); 2248113453Sjake if (context == PCPU_GET(tlb_ctx_max)) { 2249113453Sjake tlb_flush_user(); 2250113453Sjake context = PCPU_GET(tlb_ctx_min); 2251113453Sjake } 2252113453Sjake PCPU_SET(tlb_ctx, context + 1); 2253113453Sjake 2254181701Smarius pm->pm_context[curcpu] = context; 2255226054Smarius#ifdef SMP 2256226054Smarius CPU_SET_ATOMIC(PCPU_GET(cpuid), &pm->pm_active); 2257253994Smarius atomic_store_acq_ptr((uintptr_t *)PCPU_PTR(pmap), (uintptr_t)pm); 2258226054Smarius#else 2259223346Smarius CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 2260129749Stmm PCPU_SET(pmap, pm); 2261226054Smarius#endif 2262113453Sjake 2263113453Sjake stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb); 2264113453Sjake stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb); 2265182878Smarius stxa(AA_DMMU_PCXR, ASI_DMMU, (ldxa(AA_DMMU_PCXR, ASI_DMMU) & 2266205258Smarius TLB_CXR_PGSZ_MASK) | context); 2267182877Smarius flush(KERNBASE); 2268223347Smarius critical_exit(); 226980708Sjake} 227080708Sjake 2271198341Smarcelvoid 2272198341Smarcelpmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 2273198341Smarcel{ 2274203839Smarius 2275198341Smarcel} 2276198341Smarcel 2277178893Salc/* 2278181701Smarius * Increase the starting virtual address of the given mapping if a 2279181701Smarius * different alignment might result in more superpage mappings. 2280178893Salc */ 2281178893Salcvoid 2282178893Salcpmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 2283178893Salc vm_offset_t *addr, vm_size_t size) 2284178893Salc{ 2285181701Smarius 2286178893Salc} 2287