mmu_oea64.c revision 204268
11573Srgrimes/*- 21573Srgrimes * Copyright (c) 2001 The NetBSD Foundation, Inc. 31573Srgrimes * All rights reserved. 41573Srgrimes * 51573Srgrimes * This code is derived from software contributed to The NetBSD Foundation 61573Srgrimes * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 71573Srgrimes * 81573Srgrimes * Redistribution and use in source and binary forms, with or without 91573Srgrimes * modification, are permitted provided that the following conditions 101573Srgrimes * are met: 111573Srgrimes * 1. Redistributions of source code must retain the above copyright 121573Srgrimes * notice, this list of conditions and the following disclaimer. 131573Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141573Srgrimes * notice, this list of conditions and the following disclaimer in the 151573Srgrimes * documentation and/or other materials provided with the distribution. 161573Srgrimes * 3. All advertising materials mentioning features or use of this software 171573Srgrimes * must display the following acknowledgement: 181573Srgrimes * This product includes software developed by the NetBSD 191573Srgrimes * Foundation, Inc. and its contributors. 201573Srgrimes * 4. Neither the name of The NetBSD Foundation nor the names of its 211573Srgrimes * contributors may be used to endorse or promote products derived 221573Srgrimes * from this software without specific prior written permission. 231573Srgrimes * 241573Srgrimes * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 251573Srgrimes * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 261573Srgrimes * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 271573Srgrimes * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 281573Srgrimes * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 291573Srgrimes * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 301573Srgrimes * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 311573Srgrimes * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 321573Srgrimes * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390039Sobrien * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3490039Sobrien * POSSIBILITY OF SUCH DAMAGE. 351573Srgrimes */ 3671579Sdeischen/*- 371573Srgrimes * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3811677Sbde * Copyright (C) 1995, 1996 TooLs GmbH. 391573Srgrimes * All rights reserved. 401573Srgrimes * 411573Srgrimes * Redistribution and use in source and binary forms, with or without 421573Srgrimes * modification, are permitted provided that the following conditions 43214680Sed * are met: 44214680Sed * 1. Redistributions of source code must retain the above copyright 451573Srgrimes * notice, this list of conditions and the following disclaimer. 461573Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 4771579Sdeischen * notice, this list of conditions and the following disclaimer in the 481573Srgrimes * documentation and/or other materials provided with the distribution. 491573Srgrimes * 3. All advertising materials mentioning features or use of this software 50200134Sed * must display the following acknowledgement: 511573Srgrimes * This product includes software developed by TooLs GmbH. 521573Srgrimes * 4. The name of TooLs GmbH may not be used to endorse or promote products 5371579Sdeischen * derived from this software without specific prior written permission. 541573Srgrimes * 551573Srgrimes * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 561573Srgrimes * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57200134Sed * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 581573Srgrimes * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 591573Srgrimes * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 601573Srgrimes * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 611573Srgrimes * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 621573Srgrimes * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 631573Srgrimes * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 641573Srgrimes * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 651573Srgrimes * 661573Srgrimes * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 671573Srgrimes */ 6871579Sdeischen/*- 691573Srgrimes * Copyright (C) 2001 Benno Rice. 7071579Sdeischen * All rights reserved. 711573Srgrimes * 7271579Sdeischen * Redistribution and use in source and binary forms, with or without 731573Srgrimes * modification, are permitted provided that the following conditions 741573Srgrimes * are met: 751573Srgrimes * 1. Redistributions of source code must retain the above copyright 761573Srgrimes * notice, this list of conditions and the following disclaimer. 771573Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 781573Srgrimes * notice, this list of conditions and the following disclaimer in the 791573Srgrimes * documentation and/or other materials provided with the distribution. 801573Srgrimes * 811573Srgrimes * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 821573Srgrimes * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 831573Srgrimes * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 841573Srgrimes * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 8571579Sdeischen * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 861573Srgrimes * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 871573Srgrimes * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 881573Srgrimes * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89200134Sed * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 901573Srgrimes * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 911573Srgrimes */ 921573Srgrimes 9371579Sdeischen#include <sys/cdefs.h> 941573Srgrimes__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 204268 2010-02-24 00:54:37Z nwhitehorn $"); 951573Srgrimes 961573Srgrimes/* 971573Srgrimes * Manages physical address maps. 981573Srgrimes * 99178219Sdavidxu * In addition to hardware address maps, this module is called upon to 100178219Sdavidxu * provide software-use-only maps which may or may not be stored in the 101178219Sdavidxu * same form as hardware maps. These pseudo-maps are used to store 102178219Sdavidxu * intermediate results from copy operations to and from address spaces. 103178219Sdavidxu * 104178219Sdavidxu * Since the information managed by this module is also stored by the 105178219Sdavidxu * logical address mapping module, this module may throw away valid virtual 106178219Sdavidxu * to physical mappings at almost any time. However, invalidations of 107178219Sdavidxu * mappings must be done as requested. 108178219Sdavidxu * 109178219Sdavidxu * In order to cope with hardware architectures which make virtual to 110191882Sed * physical map invalidates expensive, this module may delay invalidate 111191882Sed * reduced protection operations until such time as they are actually 112191882Sed * necessary. This module is given full information as to which processors 113191882Sed * are currently using which maps, and to when physical maps must be made 114191882Sed * correct. 115191882Sed */ 116191882Sed 117191882Sed#include "opt_kstack_pages.h" 118191882Sed 119191882Sed#include <sys/param.h> 120191882Sed#include <sys/kernel.h> 121191882Sed#include <sys/ktr.h> 1221573Srgrimes#include <sys/lock.h> 123200134Sed#include <sys/msgbuf.h> 1241573Srgrimes#include <sys/mutex.h> 1251573Srgrimes#include <sys/proc.h> 1261573Srgrimes#include <sys/sysctl.h> 1271573Srgrimes#include <sys/systm.h> 1281573Srgrimes#include <sys/vmmeter.h> 1291573Srgrimes 130200134Sed#include <sys/kdb.h> 1311573Srgrimes 1321573Srgrimes#include <dev/ofw/openfirm.h> 1331573Srgrimes 1341573Srgrimes#include <vm/vm.h> 1351573Srgrimes#include <vm/vm_param.h> 1361573Srgrimes#include <vm/vm_kern.h> 137200134Sed#include <vm/vm_page.h> 1381573Srgrimes#include <vm/vm_map.h> 1391573Srgrimes#include <vm/vm_object.h> 1401573Srgrimes#include <vm/vm_extern.h> 1411573Srgrimes#include <vm/vm_pageout.h> 1421573Srgrimes#include <vm/vm_pager.h> 1431573Srgrimes#include <vm/uma.h> 1441573Srgrimes 145200134Sed#include <machine/cpu.h> 1461573Srgrimes#include <machine/platform.h> 1471573Srgrimes#include <machine/frame.h> 1481573Srgrimes#include <machine/md_var.h> 1491573Srgrimes#include <machine/psl.h> 1501573Srgrimes#include <machine/bat.h> 1511573Srgrimes#include <machine/pte.h> 1521573Srgrimes#include <machine/sr.h> 153200134Sed#include <machine/trap.h> 1541573Srgrimes#include <machine/mmuvar.h> 1551573Srgrimes 1561573Srgrimes#include "mmu_if.h" 1571573Srgrimes 1581573Srgrimes#define MOEA_DEBUG 1591573Srgrimes 1601573Srgrimes#define TODO panic("%s: not implemented", __func__); 1611573Srgrimes 1621573Srgrimesstatic __inline u_int32_t 1631573Srgrimescntlzw(volatile u_int32_t a) { 1641573Srgrimes u_int32_t b; 165200134Sed __asm ("cntlzw %0, %1" : "=r"(b) : "r"(a)); 1661573Srgrimes return b; 1671573Srgrimes} 1687471Sache 1697471Sachestatic __inline uint64_t 1701573Srgrimesva_to_vsid(pmap_t pm, vm_offset_t va) 1713799Sache{ 17214870Sache return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 17314858Sache} 1743760Sache 1753760Sache#define PTESYNC() __asm __volatile("ptesync"); 1761573Srgrimes#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 1771573Srgrimes#define SYNC() __asm __volatile("sync"); 178214680Sed#define EIEIO() __asm __volatile("eieio"); 179214680Sed 180214680Sed/* 181214680Sed * The tlbie instruction must be executed in 64-bit mode 182214680Sed * so we have to twiddle MSR[SF] around every invocation. 183214680Sed * Just to add to the fun, exceptions must be off as well 184214680Sed * so that we can't trap in 64-bit mode. What a pain. 185214680Sed */ 186214680Sedstruct mtx tlbie_mutex; 187214680Sed 188214680Sedstatic __inline void 189214680SedTLBIE(pmap_t pmap, vm_offset_t va) { 190214680Sed uint64_t vpn; 191214680Sed register_t vpn_hi, vpn_lo; 192214680Sed register_t msr; 193214680Sed register_t scratch; 194214680Sed 19511677Sbde vpn = (uint64_t)(va & ADDR_PIDX); 196200134Sed if (pmap != NULL) 1971573Srgrimes vpn |= (va_to_vsid(pmap,va) << 28); 1981573Srgrimes vpn &= ~(0xffffULL << 48); 1991573Srgrimes 2001573Srgrimes vpn_hi = (uint32_t)(vpn >> 32); 2011573Srgrimes vpn_lo = (uint32_t)vpn; 20271579Sdeischen 2031573Srgrimes mtx_lock_spin(&tlbie_mutex); 20471579Sdeischen __asm __volatile("\ 20571579Sdeischen mfmsr %0; \ 2061573Srgrimes mr %1, %0; \ 2071573Srgrimes insrdi %1,%5,1,0; \ 2081573Srgrimes mtmsrd %1; \ 2091573Srgrimes ptesync; \ 21011677Sbde \ 211200134Sed sld %1,%2,%4; \ 2121573Srgrimes or %1,%1,%3; \ 21371579Sdeischen tlbie %1; \ 2141573Srgrimes \ 2151573Srgrimes mtmsrd %0; \ 21656698Sjasone eieio; \ 21771579Sdeischen tlbsync; \ 21855837Sjasone ptesync;" 21911677Sbde : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 220200134Sed : "memory"); 2211573Srgrimes mtx_unlock_spin(&tlbie_mutex); 2221573Srgrimes} 2231573Srgrimes 2241573Srgrimes#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 2251573Srgrimes#define ENABLE_TRANS(msr) mtmsr(msr); isync() 2261573Srgrimes 2271573Srgrimes#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 2281573Srgrimes#define VSID_TO_SR(vsid) ((vsid) & 0xf) 2291573Srgrimes#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 2301573Srgrimes#define VSID_HASH_MASK 0x0000007fffffffffULL 2311573Srgrimes 2321573Srgrimes#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 2331573Srgrimes#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 2341573Srgrimes#define PVO_WIRED 0x010UL /* PVO entry is wired */ 2351573Srgrimes#define PVO_MANAGED 0x020UL /* PVO entry is managed */ 2361573Srgrimes#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 2371573Srgrimes bootstrap */ 23871579Sdeischen#define PVO_FAKE 0x100UL /* fictitious phys page */ 2391573Srgrimes#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 2401573Srgrimes#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 24111677Sbde#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 242200134Sed#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 2431573Srgrimes#define PVO_PTEGIDX_CLR(pvo) \ 2441573Srgrimes ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 2451573Srgrimes#define PVO_PTEGIDX_SET(pvo, i) \ 2461573Srgrimes ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 2471573Srgrimes 2481573Srgrimes#define MOEA_PVO_CHECK(pvo) 24971579Sdeischen 2501573Srgrimes#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 25171579Sdeischen#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 2521573Srgrimes#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 2531573Srgrimes 2541573Srgrimesstruct ofw_map { 2551573Srgrimes vm_offset_t om_va; 2561573Srgrimes vm_size_t om_len; 25756698Sjasone vm_offset_t om_pa_hi; 2581573Srgrimes vm_offset_t om_pa_lo; 2591573Srgrimes u_int om_mode; 2601573Srgrimes}; 2611573Srgrimes 2621573Srgrimes/* 2631573Srgrimes * Map of physical memory regions. 2641573Srgrimes */ 2651573Srgrimesstatic struct mem_region *regions; 266static struct mem_region *pregions; 267extern u_int phys_avail_count; 268extern int regions_sz, pregions_sz; 269extern int ofw_real_mode; 270 271extern struct pmap ofw_pmap; 272 273extern void bs_remap_earlyboot(void); 274 275 276/* 277 * Lock for the pteg and pvo tables. 278 */ 279struct mtx moea64_table_mutex; 280 281/* 282 * PTEG data. 283 */ 284static struct lpteg *moea64_pteg_table; 285u_int moea64_pteg_count; 286u_int moea64_pteg_mask; 287 288/* 289 * PVO data. 290 */ 291struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 292/* lists of unmanaged pages */ 293struct pvo_head moea64_pvo_kunmanaged = 294 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 295struct pvo_head moea64_pvo_unmanaged = 296 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged); 297 298uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 299uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 300 301#define BPVO_POOL_SIZE 327680 302static struct pvo_entry *moea64_bpvo_pool; 303static int moea64_bpvo_pool_index = 0; 304 305#define VSID_NBPW (sizeof(u_int32_t) * 8) 306static u_int moea64_vsid_bitmap[NPMAPS / VSID_NBPW]; 307 308static boolean_t moea64_initialized = FALSE; 309 310/* 311 * Statistics. 312 */ 313u_int moea64_pte_valid = 0; 314u_int moea64_pte_overflow = 0; 315u_int moea64_pvo_entries = 0; 316u_int moea64_pvo_enter_calls = 0; 317u_int moea64_pvo_remove_calls = 0; 318SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 319 &moea64_pte_valid, 0, ""); 320SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 321 &moea64_pte_overflow, 0, ""); 322SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 323 &moea64_pvo_entries, 0, ""); 324SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 325 &moea64_pvo_enter_calls, 0, ""); 326SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 327 &moea64_pvo_remove_calls, 0, ""); 328 329vm_offset_t moea64_scratchpage_va[2]; 330struct pvo_entry *moea64_scratchpage_pvo[2]; 331struct lpte *moea64_scratchpage_pte[2]; 332struct mtx moea64_scratchpage_mtx; 333 334/* 335 * Allocate physical memory for use in moea64_bootstrap. 336 */ 337static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 338 339/* 340 * PTE calls. 341 */ 342static int moea64_pte_insert(u_int, struct lpte *); 343 344/* 345 * PVO calls. 346 */ 347static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 348 vm_offset_t, vm_offset_t, uint64_t, int); 349static void moea64_pvo_remove(struct pvo_entry *, int); 350static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); 351static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); 352 353/* 354 * Utility routines. 355 */ 356static void moea64_bridge_bootstrap(mmu_t mmup, 357 vm_offset_t kernelstart, vm_offset_t kernelend); 358static void moea64_bridge_cpu_bootstrap(mmu_t, int ap); 359static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 360 vm_prot_t, boolean_t); 361static boolean_t moea64_query_bit(vm_page_t, u_int64_t); 362static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *); 363static void moea64_kremove(mmu_t, vm_offset_t); 364static void moea64_syncicache(pmap_t pmap, vm_offset_t va, 365 vm_offset_t pa, vm_size_t sz); 366static void tlbia(void); 367 368/* 369 * Kernel MMU interface 370 */ 371void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 372void moea64_clear_modify(mmu_t, vm_page_t); 373void moea64_clear_reference(mmu_t, vm_page_t); 374void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 375void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 376void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 377 vm_prot_t); 378void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 379vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 380vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 381void moea64_init(mmu_t); 382boolean_t moea64_is_modified(mmu_t, vm_page_t); 383boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 384vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 385boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 386int moea64_page_wired_mappings(mmu_t, vm_page_t); 387void moea64_pinit(mmu_t, pmap_t); 388void moea64_pinit0(mmu_t, pmap_t); 389void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 390void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 391void moea64_qremove(mmu_t, vm_offset_t, int); 392void moea64_release(mmu_t, pmap_t); 393void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 394void moea64_remove_all(mmu_t, vm_page_t); 395void moea64_remove_write(mmu_t, vm_page_t); 396void moea64_zero_page(mmu_t, vm_page_t); 397void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 398void moea64_zero_page_idle(mmu_t, vm_page_t); 399void moea64_activate(mmu_t, struct thread *); 400void moea64_deactivate(mmu_t, struct thread *); 401void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 402void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 403vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 404void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 405boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 406static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 407 408static mmu_method_t moea64_bridge_methods[] = { 409 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 410 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 411 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 412 MMUMETHOD(mmu_copy_page, moea64_copy_page), 413 MMUMETHOD(mmu_enter, moea64_enter), 414 MMUMETHOD(mmu_enter_object, moea64_enter_object), 415 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 416 MMUMETHOD(mmu_extract, moea64_extract), 417 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 418 MMUMETHOD(mmu_init, moea64_init), 419 MMUMETHOD(mmu_is_modified, moea64_is_modified), 420 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 421 MMUMETHOD(mmu_map, moea64_map), 422 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 423 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 424 MMUMETHOD(mmu_pinit, moea64_pinit), 425 MMUMETHOD(mmu_pinit0, moea64_pinit0), 426 MMUMETHOD(mmu_protect, moea64_protect), 427 MMUMETHOD(mmu_qenter, moea64_qenter), 428 MMUMETHOD(mmu_qremove, moea64_qremove), 429 MMUMETHOD(mmu_release, moea64_release), 430 MMUMETHOD(mmu_remove, moea64_remove), 431 MMUMETHOD(mmu_remove_all, moea64_remove_all), 432 MMUMETHOD(mmu_remove_write, moea64_remove_write), 433 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 434 MMUMETHOD(mmu_zero_page, moea64_zero_page), 435 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 436 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 437 MMUMETHOD(mmu_activate, moea64_activate), 438 MMUMETHOD(mmu_deactivate, moea64_deactivate), 439 440 /* Internal interfaces */ 441 MMUMETHOD(mmu_bootstrap, moea64_bridge_bootstrap), 442 MMUMETHOD(mmu_cpu_bootstrap, moea64_bridge_cpu_bootstrap), 443 MMUMETHOD(mmu_mapdev, moea64_mapdev), 444 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 445 MMUMETHOD(mmu_kextract, moea64_kextract), 446 MMUMETHOD(mmu_kenter, moea64_kenter), 447 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 448 449 { 0, 0 } 450}; 451 452static mmu_def_t oea64_bridge_mmu = { 453 MMU_TYPE_G5, 454 moea64_bridge_methods, 455 0 456}; 457MMU_DEF(oea64_bridge_mmu); 458 459static __inline u_int 460va_to_pteg(uint64_t vsid, vm_offset_t addr) 461{ 462 uint64_t hash; 463 464 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 465 ADDR_PIDX_SHFT); 466 return (hash & moea64_pteg_mask); 467} 468 469static __inline struct pvo_head * 470pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 471{ 472 struct vm_page *pg; 473 474 pg = PHYS_TO_VM_PAGE(pa); 475 476 if (pg_p != NULL) 477 *pg_p = pg; 478 479 if (pg == NULL) 480 return (&moea64_pvo_unmanaged); 481 482 return (&pg->md.mdpg_pvoh); 483} 484 485static __inline struct pvo_head * 486vm_page_to_pvoh(vm_page_t m) 487{ 488 489 return (&m->md.mdpg_pvoh); 490} 491 492static __inline void 493moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 494{ 495 496 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 497 m->md.mdpg_attrs &= ~ptebit; 498} 499 500static __inline u_int64_t 501moea64_attr_fetch(vm_page_t m) 502{ 503 504 return (m->md.mdpg_attrs); 505} 506 507static __inline void 508moea64_attr_save(vm_page_t m, u_int64_t ptebit) 509{ 510 511 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 512 m->md.mdpg_attrs |= ptebit; 513} 514 515static __inline void 516moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 517 uint64_t pte_lo) 518{ 519 ASSERT_TABLE_LOCK(); 520 521 /* 522 * Construct a PTE. Default to IMB initially. Valid bit only gets 523 * set when the real pte is set in memory. 524 * 525 * Note: Don't set the valid bit for correct operation of tlb update. 526 */ 527 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 528 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 529 530 pt->pte_lo = pte_lo; 531} 532 533static __inline void 534moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 535{ 536 537 ASSERT_TABLE_LOCK(); 538 539 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 540} 541 542static __inline void 543moea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit) 544{ 545 ASSERT_TABLE_LOCK(); 546 547 /* 548 * As shown in Section 7.6.3.2.3 549 */ 550 pt->pte_lo &= ~ptebit; 551 TLBIE(pmap,va); 552} 553 554static __inline void 555moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 556{ 557 558 ASSERT_TABLE_LOCK(); 559 pvo_pt->pte_hi |= LPTE_VALID; 560 561 /* 562 * Update the PTE as defined in section 7.6.3.1. 563 * Note that the REF/CHG bits are from pvo_pt and thus should have 564 * been saved so this routine can restore them (if desired). 565 */ 566 pt->pte_lo = pvo_pt->pte_lo; 567 EIEIO(); 568 pt->pte_hi = pvo_pt->pte_hi; 569 PTESYNC(); 570 moea64_pte_valid++; 571} 572 573static __inline void 574moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 575{ 576 ASSERT_TABLE_LOCK(); 577 pvo_pt->pte_hi &= ~LPTE_VALID; 578 579 /* 580 * Force the reg & chg bits back into the PTEs. 581 */ 582 SYNC(); 583 584 /* 585 * Invalidate the pte. 586 */ 587 pt->pte_hi &= ~LPTE_VALID; 588 TLBIE(pmap,va); 589 590 /* 591 * Save the reg & chg bits. 592 */ 593 moea64_pte_synch(pt, pvo_pt); 594 moea64_pte_valid--; 595} 596 597static __inline void 598moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 599{ 600 601 /* 602 * Invalidate the PTE 603 */ 604 moea64_pte_unset(pt, pvo_pt, pmap, va); 605 moea64_pte_set(pt, pvo_pt); 606 if (pmap == kernel_pmap) 607 isync(); 608} 609 610static __inline uint64_t 611moea64_calc_wimg(vm_offset_t pa) 612{ 613 uint64_t pte_lo; 614 int i; 615 616 /* 617 * Assume the page is cache inhibited and access is guarded unless 618 * it's in our available memory array. 619 */ 620 pte_lo = LPTE_I | LPTE_G; 621 for (i = 0; i < pregions_sz; i++) { 622 if ((pa >= pregions[i].mr_start) && 623 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 624 pte_lo &= ~(LPTE_I | LPTE_G); 625 pte_lo |= LPTE_M; 626 break; 627 } 628 } 629 630 return pte_lo; 631} 632 633/* 634 * Quick sort callout for comparing memory regions. 635 */ 636static int mr_cmp(const void *a, const void *b); 637static int om_cmp(const void *a, const void *b); 638 639static int 640mr_cmp(const void *a, const void *b) 641{ 642 const struct mem_region *regiona; 643 const struct mem_region *regionb; 644 645 regiona = a; 646 regionb = b; 647 if (regiona->mr_start < regionb->mr_start) 648 return (-1); 649 else if (regiona->mr_start > regionb->mr_start) 650 return (1); 651 else 652 return (0); 653} 654 655static int 656om_cmp(const void *a, const void *b) 657{ 658 const struct ofw_map *mapa; 659 const struct ofw_map *mapb; 660 661 mapa = a; 662 mapb = b; 663 if (mapa->om_pa_hi < mapb->om_pa_hi) 664 return (-1); 665 else if (mapa->om_pa_hi > mapb->om_pa_hi) 666 return (1); 667 else if (mapa->om_pa_lo < mapb->om_pa_lo) 668 return (-1); 669 else if (mapa->om_pa_lo > mapb->om_pa_lo) 670 return (1); 671 else 672 return (0); 673} 674 675static void 676moea64_bridge_cpu_bootstrap(mmu_t mmup, int ap) 677{ 678 int i = 0; 679 680 /* 681 * Initialize segment registers and MMU 682 */ 683 684 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 685 for (i = 0; i < 16; i++) { 686 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 687 } 688 __asm __volatile ("ptesync; mtsdr1 %0; isync" 689 :: "r"((u_int)moea64_pteg_table 690 | (32 - cntlzw(moea64_pteg_mask >> 11)))); 691 tlbia(); 692} 693 694static void 695moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 696{ 697 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 698 register_t msr; 699 vm_offset_t off; 700 vm_paddr_t pa_base; 701 int i, ofw_mappings; 702 703 bzero(translations, sz); 704 if (OF_getprop(mmu, "translations", translations, sz) == -1) 705 panic("moea64_bootstrap: can't get ofw translations"); 706 707 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 708 sz /= sizeof(*translations); 709 qsort(translations, sz, sizeof (*translations), om_cmp); 710 711 for (i = 0, ofw_mappings = 0; i < sz; i++) { 712 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 713 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 714 translations[i].om_len); 715 716 if (translations[i].om_pa_lo % PAGE_SIZE) 717 panic("OFW translation not page-aligned!"); 718 719 if (translations[i].om_pa_hi) 720 panic("OFW translations above 32-bit boundary!"); 721 722 pa_base = translations[i].om_pa_lo; 723 724 /* Now enter the pages for this mapping */ 725 726 DISABLE_TRANS(msr); 727 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 728 moea64_kenter(mmup, translations[i].om_va + off, 729 pa_base + off); 730 731 ofw_mappings++; 732 } 733 ENABLE_TRANS(msr); 734 } 735} 736 737static void 738moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 739{ 740 ihandle_t mmui; 741 phandle_t chosen; 742 phandle_t mmu; 743 size_t sz; 744 int i, j; 745 vm_size_t size, physsz, hwphyssz; 746 vm_offset_t pa, va, off; 747 register_t msr; 748 void *dpcpu; 749 750 /* We don't have a direct map since there is no BAT */ 751 hw_direct_map = 0; 752 753 /* Make sure battable is zero, since we have no BAT */ 754 for (i = 0; i < 16; i++) { 755 battable[i].batu = 0; 756 battable[i].batl = 0; 757 } 758 759 /* Get physical memory regions from firmware */ 760 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 761 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 762 763 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 764 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 765 panic("moea64_bootstrap: phys_avail too small"); 766 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 767 phys_avail_count = 0; 768 physsz = 0; 769 hwphyssz = 0; 770 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 771 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 772 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 773 regions[i].mr_start + regions[i].mr_size, 774 regions[i].mr_size); 775 if (hwphyssz != 0 && 776 (physsz + regions[i].mr_size) >= hwphyssz) { 777 if (physsz < hwphyssz) { 778 phys_avail[j] = regions[i].mr_start; 779 phys_avail[j + 1] = regions[i].mr_start + 780 hwphyssz - physsz; 781 physsz = hwphyssz; 782 phys_avail_count++; 783 } 784 break; 785 } 786 phys_avail[j] = regions[i].mr_start; 787 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 788 phys_avail_count++; 789 physsz += regions[i].mr_size; 790 } 791 physmem = btoc(physsz); 792 793 /* 794 * Allocate PTEG table. 795 */ 796#ifdef PTEGCOUNT 797 moea64_pteg_count = PTEGCOUNT; 798#else 799 moea64_pteg_count = 0x1000; 800 801 while (moea64_pteg_count < physmem) 802 moea64_pteg_count <<= 1; 803#endif /* PTEGCOUNT */ 804 805 size = moea64_pteg_count * sizeof(struct lpteg); 806 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 807 moea64_pteg_count, size); 808 809 /* 810 * We now need to allocate memory. This memory, to be allocated, 811 * has to reside in a page table. The page table we are about to 812 * allocate. We don't have BAT. So drop to data real mode for a minute 813 * as a measure of last resort. We do this a couple times. 814 */ 815 816 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 817 DISABLE_TRANS(msr); 818 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 819 ENABLE_TRANS(msr); 820 821 moea64_pteg_mask = moea64_pteg_count - 1; 822 823 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 824 825 /* 826 * Allocate pv/overflow lists. 827 */ 828 size = sizeof(struct pvo_head) * moea64_pteg_count; 829 830 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 831 PAGE_SIZE); 832 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 833 834 DISABLE_TRANS(msr); 835 for (i = 0; i < moea64_pteg_count; i++) 836 LIST_INIT(&moea64_pvo_table[i]); 837 ENABLE_TRANS(msr); 838 839 /* 840 * Initialize the lock that synchronizes access to the pteg and pvo 841 * tables. 842 */ 843 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 844 MTX_RECURSE); 845 846 /* 847 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 848 */ 849 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 850 851 /* 852 * Initialise the unmanaged pvo pool. 853 */ 854 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 855 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 856 moea64_bpvo_pool_index = 0; 857 858 /* 859 * Make sure kernel vsid is allocated as well as VSID 0. 860 */ 861 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 862 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 863 moea64_vsid_bitmap[0] |= 1; 864 865 /* 866 * Initialize the kernel pmap (which is statically allocated). 867 */ 868 for (i = 0; i < 16; i++) 869 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 870 871 kernel_pmap->pmap_phys = kernel_pmap; 872 kernel_pmap->pm_active = ~0; 873 874 PMAP_LOCK_INIT(kernel_pmap); 875 876 /* 877 * Now map in all the other buffers we allocated earlier 878 */ 879 880 DISABLE_TRANS(msr); 881 size = moea64_pteg_count * sizeof(struct lpteg); 882 off = (vm_offset_t)(moea64_pteg_table); 883 for (pa = off; pa < off + size; pa += PAGE_SIZE) 884 moea64_kenter(mmup, pa, pa); 885 size = sizeof(struct pvo_head) * moea64_pteg_count; 886 off = (vm_offset_t)(moea64_pvo_table); 887 for (pa = off; pa < off + size; pa += PAGE_SIZE) 888 moea64_kenter(mmup, pa, pa); 889 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 890 off = (vm_offset_t)(moea64_bpvo_pool); 891 for (pa = off; pa < off + size; pa += PAGE_SIZE) 892 moea64_kenter(mmup, pa, pa); 893 894 /* 895 * Map certain important things, like ourselves. 896 * 897 * NOTE: We do not map the exception vector space. That code is 898 * used only in real mode, and leaving it unmapped allows us to 899 * catch NULL pointer deferences, instead of making NULL a valid 900 * address. 901 */ 902 903 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE) 904 moea64_kenter(mmup, pa, pa); 905 ENABLE_TRANS(msr); 906 907 if (!ofw_real_mode) { 908 /* 909 * Set up the Open Firmware pmap and add its mappings. 910 */ 911 912 moea64_pinit(mmup, &ofw_pmap); 913 for (i = 0; i < 16; i++) 914 ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i]; 915 916 if ((chosen = OF_finddevice("/chosen")) == -1) 917 panic("moea64_bootstrap: can't find /chosen"); 918 OF_getprop(chosen, "mmu", &mmui, 4); 919 if ((mmu = OF_instance_to_package(mmui)) == -1) 920 panic("moea64_bootstrap: can't get mmu package"); 921 if ((sz = OF_getproplen(mmu, "translations")) == -1) 922 panic("moea64_bootstrap: can't get ofw translation count"); 923 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 924 panic("moea64_bootstrap: too many ofw translations"); 925 926 moea64_add_ofw_mappings(mmup, mmu, sz); 927 } 928 929#ifdef SMP 930 TLBSYNC(); 931#endif 932 933 /* 934 * Calculate the last available physical address. 935 */ 936 for (i = 0; phys_avail[i + 2] != 0; i += 2) 937 ; 938 Maxmem = powerpc_btop(phys_avail[i + 1]); 939 940 /* 941 * Initialize MMU and remap early physical mappings 942 */ 943 moea64_bridge_cpu_bootstrap(mmup,0); 944 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 945 pmap_bootstrapped++; 946 bs_remap_earlyboot(); 947 948 /* 949 * Set the start and end of kva. 950 */ 951 virtual_avail = VM_MIN_KERNEL_ADDRESS; 952 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 953 954 /* 955 * Figure out how far we can extend virtual_end into segment 16 956 * without running into existing mappings. Segment 16 is guaranteed 957 * to contain neither RAM nor devices (at least on Apple hardware), 958 * but will generally contain some OFW mappings we should not 959 * step on. 960 */ 961 962 PMAP_LOCK(kernel_pmap); 963 while (moea64_pvo_find_va(kernel_pmap, virtual_end+1, NULL) == NULL) 964 virtual_end += PAGE_SIZE; 965 PMAP_UNLOCK(kernel_pmap); 966 967 /* 968 * Allocate some things for page zeroing 969 */ 970 971 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF); 972 for (i = 0; i < 2; i++) { 973 moea64_scratchpage_va[i] = virtual_avail; 974 virtual_avail += PAGE_SIZE; 975 976 moea64_kenter(mmup,moea64_scratchpage_va[i],kernelstart); 977 978 LOCK_TABLE(); 979 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(kernel_pmap, 980 moea64_scratchpage_va[i],&j); 981 moea64_scratchpage_pte[i] = moea64_pvo_to_pte( 982 moea64_scratchpage_pvo[i],j); 983 moea64_scratchpage_pte[i]->pte_hi |= LPTE_LOCKED; 984 UNLOCK_TABLE(); 985 } 986 987 /* 988 * Allocate a kernel stack with a guard page for thread0 and map it 989 * into the kernel page map. 990 */ 991 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 992 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 993 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 994 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 995 thread0.td_kstack = va; 996 thread0.td_kstack_pages = KSTACK_PAGES; 997 for (i = 0; i < KSTACK_PAGES; i++) { 998 moea64_kenter(mmup, va, pa); 999 pa += PAGE_SIZE; 1000 va += PAGE_SIZE; 1001 } 1002 1003 /* 1004 * Allocate virtual address space for the message buffer. 1005 */ 1006 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1007 msgbufp = (struct msgbuf *)msgbuf_phys; 1008 while (pa - msgbuf_phys < MSGBUF_SIZE) { 1009 moea64_kenter(mmup, pa, pa); 1010 pa += PAGE_SIZE; 1011 } 1012 1013 /* 1014 * Allocate virtual address space for the dynamic percpu area. 1015 */ 1016 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1017 dpcpu = (void *)pa; 1018 while (pa - (vm_offset_t)dpcpu < DPCPU_SIZE) { 1019 moea64_kenter(mmup, pa, pa); 1020 pa += PAGE_SIZE; 1021 } 1022 dpcpu_init(dpcpu, 0); 1023} 1024 1025/* 1026 * Activate a user pmap. The pmap must be activated before it's address 1027 * space can be accessed in any way. 1028 */ 1029void 1030moea64_activate(mmu_t mmu, struct thread *td) 1031{ 1032 pmap_t pm, pmr; 1033 1034 /* 1035 * Load all the data we need up front to encourage the compiler to 1036 * not issue any loads while we have interrupts disabled below. 1037 */ 1038 pm = &td->td_proc->p_vmspace->vm_pmap; 1039 pmr = pm->pmap_phys; 1040 1041 pm->pm_active |= PCPU_GET(cpumask); 1042 PCPU_SET(curpmap, pmr); 1043} 1044 1045void 1046moea64_deactivate(mmu_t mmu, struct thread *td) 1047{ 1048 pmap_t pm; 1049 1050 pm = &td->td_proc->p_vmspace->vm_pmap; 1051 pm->pm_active &= ~(PCPU_GET(cpumask)); 1052 PCPU_SET(curpmap, NULL); 1053} 1054 1055void 1056moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1057{ 1058 struct pvo_entry *pvo; 1059 1060 PMAP_LOCK(pm); 1061 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1062 1063 if (pvo != NULL) { 1064 if (wired) { 1065 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1066 pm->pm_stats.wired_count++; 1067 pvo->pvo_vaddr |= PVO_WIRED; 1068 } else { 1069 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1070 pm->pm_stats.wired_count--; 1071 pvo->pvo_vaddr &= ~PVO_WIRED; 1072 } 1073 } 1074 PMAP_UNLOCK(pm); 1075} 1076 1077/* 1078 * Zero a page of physical memory by temporarily mapping it into the tlb. 1079 */ 1080void 1081moea64_zero_page(mmu_t mmu, vm_page_t m) 1082{ 1083 moea64_zero_page_area(mmu,m,0,PAGE_SIZE); 1084} 1085 1086/* 1087 * This goes through and sets the physical address of our 1088 * special scratch PTE to the PA we want to zero or copy. Because 1089 * of locking issues (this can get called in pvo_enter() by 1090 * the UMA allocator), we can't use most other utility functions here 1091 */ 1092 1093static __inline 1094void moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1095 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1096 1097 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1098 ~(LPTE_WIMG | LPTE_RPGN); 1099 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1100 moea64_calc_wimg(pa) | (uint64_t)pa; 1101 1102 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1103 TLBIE(kernel_pmap, moea64_scratchpage_va[which]); 1104 1105 moea64_scratchpage_pte[which]->pte_lo = 1106 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo; 1107 EIEIO(); 1108 1109 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1110 PTESYNC(); isync(); 1111} 1112 1113void 1114moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1115{ 1116 vm_offset_t dst; 1117 vm_offset_t src; 1118 1119 dst = VM_PAGE_TO_PHYS(mdst); 1120 src = VM_PAGE_TO_PHYS(msrc); 1121 1122 mtx_lock(&moea64_scratchpage_mtx); 1123 1124 moea64_set_scratchpage_pa(0,src); 1125 moea64_set_scratchpage_pa(1,dst); 1126 1127 kcopy((void *)moea64_scratchpage_va[0], 1128 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1129 1130 mtx_unlock(&moea64_scratchpage_mtx); 1131} 1132 1133void 1134moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1135{ 1136 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1137 1138 if (!moea64_initialized) 1139 panic("moea64_zero_page: can't zero pa %#x", pa); 1140 if (size + off > PAGE_SIZE) 1141 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1142 1143 mtx_lock(&moea64_scratchpage_mtx); 1144 1145 moea64_set_scratchpage_pa(0,pa); 1146 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1147 mtx_unlock(&moea64_scratchpage_mtx); 1148} 1149 1150void 1151moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1152{ 1153 1154 moea64_zero_page(mmu, m); 1155} 1156 1157/* 1158 * Map the given physical page at the specified virtual address in the 1159 * target pmap with the protection requested. If specified the page 1160 * will be wired down. 1161 */ 1162void 1163moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1164 vm_prot_t prot, boolean_t wired) 1165{ 1166 1167 vm_page_lock_queues(); 1168 PMAP_LOCK(pmap); 1169 moea64_enter_locked(pmap, va, m, prot, wired); 1170 vm_page_unlock_queues(); 1171 PMAP_UNLOCK(pmap); 1172} 1173 1174/* 1175 * Map the given physical page at the specified virtual address in the 1176 * target pmap with the protection requested. If specified the page 1177 * will be wired down. 1178 * 1179 * The page queues and pmap must be locked. 1180 */ 1181 1182static void 1183moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1184 boolean_t wired) 1185{ 1186 struct pvo_head *pvo_head; 1187 uma_zone_t zone; 1188 vm_page_t pg; 1189 uint64_t pte_lo; 1190 u_int pvo_flags; 1191 int error; 1192 1193 if (!moea64_initialized) { 1194 pvo_head = &moea64_pvo_kunmanaged; 1195 pg = NULL; 1196 zone = moea64_upvo_zone; 1197 pvo_flags = 0; 1198 } else { 1199 pvo_head = vm_page_to_pvoh(m); 1200 pg = m; 1201 zone = moea64_mpvo_zone; 1202 pvo_flags = PVO_MANAGED; 1203 } 1204 1205 if (pmap_bootstrapped) 1206 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1207 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1208 1209 /* XXX change the pvo head for fake pages */ 1210 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1211 pvo_flags &= ~PVO_MANAGED; 1212 pvo_head = &moea64_pvo_kunmanaged; 1213 zone = moea64_upvo_zone; 1214 } 1215 1216 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m)); 1217 1218 if (prot & VM_PROT_WRITE) { 1219 pte_lo |= LPTE_BW; 1220 if (pmap_bootstrapped) 1221 vm_page_flag_set(m, PG_WRITEABLE); 1222 } else 1223 pte_lo |= LPTE_BR; 1224 1225 if (prot & VM_PROT_EXECUTE) 1226 pvo_flags |= VM_PROT_EXECUTE; 1227 1228 if (wired) 1229 pvo_flags |= PVO_WIRED; 1230 1231 if ((m->flags & PG_FICTITIOUS) != 0) 1232 pvo_flags |= PVO_FAKE; 1233 1234 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1235 pte_lo, pvo_flags); 1236 1237 /* 1238 * Flush the page from the instruction cache if this page is 1239 * mapped executable and cacheable. 1240 */ 1241 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1242 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1243 } 1244} 1245 1246static void 1247moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1248{ 1249 1250 /* 1251 * This is much trickier than on older systems because 1252 * we can't sync the icache on physical addresses directly 1253 * without a direct map. Instead we check a couple of cases 1254 * where the memory is already mapped in and, failing that, 1255 * use the same trick we use for page zeroing to create 1256 * a temporary mapping for this physical address. 1257 */ 1258 1259 if (!pmap_bootstrapped) { 1260 /* 1261 * If PMAP is not bootstrapped, we are likely to be 1262 * in real mode. 1263 */ 1264 __syncicache((void *)pa, sz); 1265 } else if (pmap == kernel_pmap) { 1266 __syncicache((void *)va, sz); 1267 } else { 1268 /* Use the scratch page to set up a temp mapping */ 1269 1270 mtx_lock(&moea64_scratchpage_mtx); 1271 1272 moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); 1273 __syncicache((void *)(moea64_scratchpage_va[1] + 1274 (va & ADDR_POFF)), sz); 1275 1276 mtx_unlock(&moea64_scratchpage_mtx); 1277 } 1278} 1279 1280/* 1281 * Maps a sequence of resident pages belonging to the same object. 1282 * The sequence begins with the given page m_start. This page is 1283 * mapped at the given virtual address start. Each subsequent page is 1284 * mapped at a virtual address that is offset from start by the same 1285 * amount as the page is offset from m_start within the object. The 1286 * last page in the sequence is the page with the largest offset from 1287 * m_start that can be mapped at a virtual address less than the given 1288 * virtual address end. Not every virtual page between start and end 1289 * is mapped; only those for which a resident page exists with the 1290 * corresponding offset from m_start are mapped. 1291 */ 1292void 1293moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1294 vm_page_t m_start, vm_prot_t prot) 1295{ 1296 vm_page_t m; 1297 vm_pindex_t diff, psize; 1298 1299 psize = atop(end - start); 1300 m = m_start; 1301 PMAP_LOCK(pm); 1302 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1303 moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1304 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1305 m = TAILQ_NEXT(m, listq); 1306 } 1307 PMAP_UNLOCK(pm); 1308} 1309 1310void 1311moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1312 vm_prot_t prot) 1313{ 1314 PMAP_LOCK(pm); 1315 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1316 FALSE); 1317 PMAP_UNLOCK(pm); 1318 1319} 1320 1321vm_paddr_t 1322moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1323{ 1324 struct pvo_entry *pvo; 1325 vm_paddr_t pa; 1326 1327 PMAP_LOCK(pm); 1328 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1329 if (pvo == NULL) 1330 pa = 0; 1331 else 1332 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1333 PMAP_UNLOCK(pm); 1334 return (pa); 1335} 1336 1337/* 1338 * Atomically extract and hold the physical page with the given 1339 * pmap and virtual address pair if that mapping permits the given 1340 * protection. 1341 */ 1342vm_page_t 1343moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1344{ 1345 struct pvo_entry *pvo; 1346 vm_page_t m; 1347 1348 m = NULL; 1349 vm_page_lock_queues(); 1350 PMAP_LOCK(pmap); 1351 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1352 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1353 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1354 (prot & VM_PROT_WRITE) == 0)) { 1355 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1356 vm_page_hold(m); 1357 } 1358 vm_page_unlock_queues(); 1359 PMAP_UNLOCK(pmap); 1360 return (m); 1361} 1362 1363static void * 1364moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1365{ 1366 /* 1367 * This entire routine is a horrible hack to avoid bothering kmem 1368 * for new KVA addresses. Because this can get called from inside 1369 * kmem allocation routines, calling kmem for a new address here 1370 * can lead to multiply locking non-recursive mutexes. 1371 */ 1372 static vm_pindex_t color; 1373 vm_offset_t va; 1374 1375 vm_page_t m; 1376 int pflags, needed_lock; 1377 1378 *flags = UMA_SLAB_PRIV; 1379 needed_lock = !PMAP_LOCKED(kernel_pmap); 1380 1381 if (needed_lock) 1382 PMAP_LOCK(kernel_pmap); 1383 1384 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1385 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1386 else 1387 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1388 if (wait & M_ZERO) 1389 pflags |= VM_ALLOC_ZERO; 1390 1391 for (;;) { 1392 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1393 if (m == NULL) { 1394 if (wait & M_NOWAIT) 1395 return (NULL); 1396 VM_WAIT; 1397 } else 1398 break; 1399 } 1400 1401 va = VM_PAGE_TO_PHYS(m); 1402 1403 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1404 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1405 PVO_WIRED | PVO_BOOTSTRAP); 1406 1407 if (needed_lock) 1408 PMAP_UNLOCK(kernel_pmap); 1409 1410 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1411 bzero((void *)va, PAGE_SIZE); 1412 1413 return (void *)va; 1414} 1415 1416void 1417moea64_init(mmu_t mmu) 1418{ 1419 1420 CTR0(KTR_PMAP, "moea64_init"); 1421 1422 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1423 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1424 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1425 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1426 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1427 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1428 1429 if (!hw_direct_map) { 1430 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1431 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1432 } 1433 1434 moea64_initialized = TRUE; 1435} 1436 1437boolean_t 1438moea64_is_modified(mmu_t mmu, vm_page_t m) 1439{ 1440 1441 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1442 return (FALSE); 1443 1444 return (moea64_query_bit(m, LPTE_CHG)); 1445} 1446 1447void 1448moea64_clear_reference(mmu_t mmu, vm_page_t m) 1449{ 1450 1451 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1452 return; 1453 moea64_clear_bit(m, LPTE_REF, NULL); 1454} 1455 1456void 1457moea64_clear_modify(mmu_t mmu, vm_page_t m) 1458{ 1459 1460 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1461 return; 1462 moea64_clear_bit(m, LPTE_CHG, NULL); 1463} 1464 1465/* 1466 * Clear the write and modified bits in each of the given page's mappings. 1467 */ 1468void 1469moea64_remove_write(mmu_t mmu, vm_page_t m) 1470{ 1471 struct pvo_entry *pvo; 1472 struct lpte *pt; 1473 pmap_t pmap; 1474 uint64_t lo; 1475 1476 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1477 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1478 (m->flags & PG_WRITEABLE) == 0) 1479 return; 1480 lo = moea64_attr_fetch(m); 1481 SYNC(); 1482 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1483 pmap = pvo->pvo_pmap; 1484 PMAP_LOCK(pmap); 1485 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1486 LOCK_TABLE(); 1487 pt = moea64_pvo_to_pte(pvo, -1); 1488 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1489 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1490 if (pt != NULL) { 1491 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1492 lo |= pvo->pvo_pte.lpte.pte_lo; 1493 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1494 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1495 pvo->pvo_pmap, PVO_VADDR(pvo)); 1496 } 1497 UNLOCK_TABLE(); 1498 } 1499 PMAP_UNLOCK(pmap); 1500 } 1501 if ((lo & LPTE_CHG) != 0) { 1502 moea64_attr_clear(m, LPTE_CHG); 1503 vm_page_dirty(m); 1504 } 1505 vm_page_flag_clear(m, PG_WRITEABLE); 1506} 1507 1508/* 1509 * moea64_ts_referenced: 1510 * 1511 * Return a count of reference bits for a page, clearing those bits. 1512 * It is not necessary for every reference bit to be cleared, but it 1513 * is necessary that 0 only be returned when there are truly no 1514 * reference bits set. 1515 * 1516 * XXX: The exact number of bits to check and clear is a matter that 1517 * should be tested and standardized at some point in the future for 1518 * optimal aging of shared pages. 1519 */ 1520boolean_t 1521moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1522{ 1523 int count; 1524 1525 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1526 return (0); 1527 1528 count = moea64_clear_bit(m, LPTE_REF, NULL); 1529 1530 return (count); 1531} 1532 1533/* 1534 * Map a wired page into kernel virtual address space. 1535 */ 1536void 1537moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1538{ 1539 uint64_t pte_lo; 1540 int error; 1541 1542#if 0 1543 if (!pmap_bootstrapped) { 1544 if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end) 1545 panic("Trying to enter an address in KVA -- %#x!\n",pa); 1546 } 1547#endif 1548 1549 pte_lo = moea64_calc_wimg(pa); 1550 1551 PMAP_LOCK(kernel_pmap); 1552 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1553 &moea64_pvo_kunmanaged, va, pa, pte_lo, 1554 PVO_WIRED | VM_PROT_EXECUTE); 1555 1556 if (error != 0 && error != ENOENT) 1557 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va, 1558 pa, error); 1559 1560 /* 1561 * Flush the memory from the instruction cache. 1562 */ 1563 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1564 __syncicache((void *)va, PAGE_SIZE); 1565 } 1566 PMAP_UNLOCK(kernel_pmap); 1567} 1568 1569/* 1570 * Extract the physical page address associated with the given kernel virtual 1571 * address. 1572 */ 1573vm_offset_t 1574moea64_kextract(mmu_t mmu, vm_offset_t va) 1575{ 1576 struct pvo_entry *pvo; 1577 vm_paddr_t pa; 1578 1579 PMAP_LOCK(kernel_pmap); 1580 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1581 KASSERT(pvo != NULL, ("moea64_kextract: no addr found")); 1582 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1583 PMAP_UNLOCK(kernel_pmap); 1584 return (pa); 1585} 1586 1587/* 1588 * Remove a wired page from kernel virtual address space. 1589 */ 1590void 1591moea64_kremove(mmu_t mmu, vm_offset_t va) 1592{ 1593 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1594} 1595 1596/* 1597 * Map a range of physical addresses into kernel virtual address space. 1598 * 1599 * The value passed in *virt is a suggested virtual address for the mapping. 1600 * Architectures which can support a direct-mapped physical to virtual region 1601 * can return the appropriate address within that region, leaving '*virt' 1602 * unchanged. We cannot and therefore do not; *virt is updated with the 1603 * first usable address after the mapped region. 1604 */ 1605vm_offset_t 1606moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1607 vm_offset_t pa_end, int prot) 1608{ 1609 vm_offset_t sva, va; 1610 1611 sva = *virt; 1612 va = sva; 1613 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1614 moea64_kenter(mmu, va, pa_start); 1615 *virt = va; 1616 1617 return (sva); 1618} 1619 1620/* 1621 * Returns true if the pmap's pv is one of the first 1622 * 16 pvs linked to from this page. This count may 1623 * be changed upwards or downwards in the future; it 1624 * is only necessary that true be returned for a small 1625 * subset of pmaps for proper page aging. 1626 */ 1627boolean_t 1628moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1629{ 1630 int loops; 1631 struct pvo_entry *pvo; 1632 1633 if (!moea64_initialized || (m->flags & PG_FICTITIOUS)) 1634 return FALSE; 1635 1636 loops = 0; 1637 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1638 if (pvo->pvo_pmap == pmap) 1639 return (TRUE); 1640 if (++loops >= 16) 1641 break; 1642 } 1643 1644 return (FALSE); 1645} 1646 1647/* 1648 * Return the number of managed mappings to the given physical page 1649 * that are wired. 1650 */ 1651int 1652moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1653{ 1654 struct pvo_entry *pvo; 1655 int count; 1656 1657 count = 0; 1658 if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0) 1659 return (count); 1660 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1661 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1662 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1663 count++; 1664 return (count); 1665} 1666 1667static u_int moea64_vsidcontext; 1668 1669void 1670moea64_pinit(mmu_t mmu, pmap_t pmap) 1671{ 1672 int i, mask; 1673 u_int entropy; 1674 1675 PMAP_LOCK_INIT(pmap); 1676 1677 entropy = 0; 1678 __asm __volatile("mftb %0" : "=r"(entropy)); 1679 1680 if (pmap_bootstrapped) 1681 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap); 1682 else 1683 pmap->pmap_phys = pmap; 1684 1685 /* 1686 * Allocate some segment registers for this pmap. 1687 */ 1688 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1689 u_int hash, n; 1690 1691 /* 1692 * Create a new value by mutiplying by a prime and adding in 1693 * entropy from the timebase register. This is to make the 1694 * VSID more random so that the PT hash function collides 1695 * less often. (Note that the prime casues gcc to do shifts 1696 * instead of a multiply.) 1697 */ 1698 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1699 hash = moea64_vsidcontext & (NPMAPS - 1); 1700 if (hash == 0) /* 0 is special, avoid it */ 1701 continue; 1702 n = hash >> 5; 1703 mask = 1 << (hash & (VSID_NBPW - 1)); 1704 hash = (moea64_vsidcontext & 0xfffff); 1705 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1706 /* anything free in this bucket? */ 1707 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1708 entropy = (moea64_vsidcontext >> 20); 1709 continue; 1710 } 1711 i = ffs(~moea64_vsid_bitmap[i]) - 1; 1712 mask = 1 << i; 1713 hash &= 0xfffff & ~(VSID_NBPW - 1); 1714 hash |= i; 1715 } 1716 moea64_vsid_bitmap[n] |= mask; 1717 for (i = 0; i < 16; i++) { 1718 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1719 } 1720 return; 1721 } 1722 1723 panic("moea64_pinit: out of segments"); 1724} 1725 1726/* 1727 * Initialize the pmap associated with process 0. 1728 */ 1729void 1730moea64_pinit0(mmu_t mmu, pmap_t pm) 1731{ 1732 moea64_pinit(mmu, pm); 1733 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1734} 1735 1736/* 1737 * Set the physical protection on the specified range of this map as requested. 1738 */ 1739void 1740moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1741 vm_prot_t prot) 1742{ 1743 struct pvo_entry *pvo; 1744 struct lpte *pt; 1745 int pteidx; 1746 1747 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1748 eva, prot); 1749 1750 1751 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1752 ("moea64_protect: non current pmap")); 1753 1754 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1755 moea64_remove(mmu, pm, sva, eva); 1756 return; 1757 } 1758 1759 vm_page_lock_queues(); 1760 PMAP_LOCK(pm); 1761 for (; sva < eva; sva += PAGE_SIZE) { 1762 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1763 if (pvo == NULL) 1764 continue; 1765 1766 /* 1767 * Grab the PTE pointer before we diddle with the cached PTE 1768 * copy. 1769 */ 1770 LOCK_TABLE(); 1771 pt = moea64_pvo_to_pte(pvo, pteidx); 1772 1773 /* 1774 * Change the protection of the page. 1775 */ 1776 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1777 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1778 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1779 if ((prot & VM_PROT_EXECUTE) == 0) 1780 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1781 1782 /* 1783 * If the PVO is in the page table, update that pte as well. 1784 */ 1785 if (pt != NULL) { 1786 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1787 pvo->pvo_pmap, PVO_VADDR(pvo)); 1788 if ((pvo->pvo_pte.lpte.pte_lo & 1789 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1790 moea64_syncicache(pm, sva, 1791 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1792 PAGE_SIZE); 1793 } 1794 } 1795 UNLOCK_TABLE(); 1796 } 1797 vm_page_unlock_queues(); 1798 PMAP_UNLOCK(pm); 1799} 1800 1801/* 1802 * Map a list of wired pages into kernel virtual address space. This is 1803 * intended for temporary mappings which do not need page modification or 1804 * references recorded. Existing mappings in the region are overwritten. 1805 */ 1806void 1807moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1808{ 1809 while (count-- > 0) { 1810 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1811 va += PAGE_SIZE; 1812 m++; 1813 } 1814} 1815 1816/* 1817 * Remove page mappings from kernel virtual address space. Intended for 1818 * temporary mappings entered by moea64_qenter. 1819 */ 1820void 1821moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1822{ 1823 while (count-- > 0) { 1824 moea64_kremove(mmu, va); 1825 va += PAGE_SIZE; 1826 } 1827} 1828 1829void 1830moea64_release(mmu_t mmu, pmap_t pmap) 1831{ 1832 int idx, mask; 1833 1834 /* 1835 * Free segment register's VSID 1836 */ 1837 if (pmap->pm_sr[0] == 0) 1838 panic("moea64_release"); 1839 1840 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1841 mask = 1 << (idx % VSID_NBPW); 1842 idx /= VSID_NBPW; 1843 moea64_vsid_bitmap[idx] &= ~mask; 1844 PMAP_LOCK_DESTROY(pmap); 1845} 1846 1847/* 1848 * Remove the given range of addresses from the specified map. 1849 */ 1850void 1851moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1852{ 1853 struct pvo_entry *pvo; 1854 int pteidx; 1855 1856 vm_page_lock_queues(); 1857 PMAP_LOCK(pm); 1858 for (; sva < eva; sva += PAGE_SIZE) { 1859 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1860 if (pvo != NULL) { 1861 moea64_pvo_remove(pvo, pteidx); 1862 } 1863 } 1864 vm_page_unlock_queues(); 1865 PMAP_UNLOCK(pm); 1866} 1867 1868/* 1869 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 1870 * will reflect changes in pte's back to the vm_page. 1871 */ 1872void 1873moea64_remove_all(mmu_t mmu, vm_page_t m) 1874{ 1875 struct pvo_head *pvo_head; 1876 struct pvo_entry *pvo, *next_pvo; 1877 pmap_t pmap; 1878 1879 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1880 1881 pvo_head = vm_page_to_pvoh(m); 1882 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1883 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1884 1885 MOEA_PVO_CHECK(pvo); /* sanity check */ 1886 pmap = pvo->pvo_pmap; 1887 PMAP_LOCK(pmap); 1888 moea64_pvo_remove(pvo, -1); 1889 PMAP_UNLOCK(pmap); 1890 } 1891 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 1892 moea64_attr_clear(m, LPTE_CHG); 1893 vm_page_dirty(m); 1894 } 1895 vm_page_flag_clear(m, PG_WRITEABLE); 1896} 1897 1898/* 1899 * Allocate a physical page of memory directly from the phys_avail map. 1900 * Can only be called from moea64_bootstrap before avail start and end are 1901 * calculated. 1902 */ 1903static vm_offset_t 1904moea64_bootstrap_alloc(vm_size_t size, u_int align) 1905{ 1906 vm_offset_t s, e; 1907 int i, j; 1908 1909 size = round_page(size); 1910 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1911 if (align != 0) 1912 s = (phys_avail[i] + align - 1) & ~(align - 1); 1913 else 1914 s = phys_avail[i]; 1915 e = s + size; 1916 1917 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1918 continue; 1919 1920 if (s == phys_avail[i]) { 1921 phys_avail[i] += size; 1922 } else if (e == phys_avail[i + 1]) { 1923 phys_avail[i + 1] -= size; 1924 } else { 1925 for (j = phys_avail_count * 2; j > i; j -= 2) { 1926 phys_avail[j] = phys_avail[j - 2]; 1927 phys_avail[j + 1] = phys_avail[j - 1]; 1928 } 1929 1930 phys_avail[i + 3] = phys_avail[i + 1]; 1931 phys_avail[i + 1] = s; 1932 phys_avail[i + 2] = e; 1933 phys_avail_count++; 1934 } 1935 1936 return (s); 1937 } 1938 panic("moea64_bootstrap_alloc: could not allocate memory"); 1939} 1940 1941static void 1942tlbia(void) 1943{ 1944 vm_offset_t i; 1945 register_t msr, scratch; 1946 1947 for (i = 0; i < 0xFF000; i += 0x00001000) { 1948 __asm __volatile("\ 1949 mfmsr %0; \ 1950 mr %1, %0; \ 1951 insrdi %1,%3,1,0; \ 1952 mtmsrd %1; \ 1953 ptesync; \ 1954 \ 1955 tlbiel %2; \ 1956 \ 1957 mtmsrd %0; \ 1958 eieio; \ 1959 tlbsync; \ 1960 ptesync;" 1961 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 1962 } 1963} 1964 1965static int 1966moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1967 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 1968{ 1969 struct pvo_entry *pvo; 1970 uint64_t vsid; 1971 int first; 1972 u_int ptegidx; 1973 int i; 1974 int bootstrap; 1975 1976 /* 1977 * One nasty thing that can happen here is that the UMA calls to 1978 * allocate new PVOs need to map more memory, which calls pvo_enter(), 1979 * which calls UMA... 1980 * 1981 * We break the loop by detecting recursion and allocating out of 1982 * the bootstrap pool. 1983 */ 1984 1985 moea64_pvo_enter_calls++; 1986 first = 0; 1987 bootstrap = (flags & PVO_BOOTSTRAP); 1988 1989 if (!moea64_initialized) 1990 bootstrap = 1; 1991 1992 /* 1993 * Compute the PTE Group index. 1994 */ 1995 va &= ~ADDR_POFF; 1996 vsid = va_to_vsid(pm, va); 1997 ptegidx = va_to_pteg(vsid, va); 1998 1999 /* 2000 * Remove any existing mapping for this page. Reuse the pvo entry if 2001 * there is a mapping. 2002 */ 2003 LOCK_TABLE(); 2004 2005 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2006 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2007 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2008 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2009 (pte_lo & LPTE_PP)) { 2010 UNLOCK_TABLE(); 2011 return (0); 2012 } 2013 moea64_pvo_remove(pvo, -1); 2014 break; 2015 } 2016 } 2017 2018 /* 2019 * If we aren't overwriting a mapping, try to allocate. 2020 */ 2021 if (bootstrap) { 2022 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2023 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d", 2024 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2025 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2026 } 2027 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2028 moea64_bpvo_pool_index++; 2029 bootstrap = 1; 2030 } else { 2031 /* 2032 * Note: drop the table around the UMA allocation in 2033 * case the UMA allocator needs to manipulate the page 2034 * table. The mapping we are working with is already 2035 * protected by the PMAP lock. 2036 */ 2037 UNLOCK_TABLE(); 2038 pvo = uma_zalloc(zone, M_NOWAIT); 2039 LOCK_TABLE(); 2040 } 2041 2042 if (pvo == NULL) { 2043 UNLOCK_TABLE(); 2044 return (ENOMEM); 2045 } 2046 2047 moea64_pvo_entries++; 2048 pvo->pvo_vaddr = va; 2049 pvo->pvo_pmap = pm; 2050 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2051 pvo->pvo_vaddr &= ~ADDR_POFF; 2052 2053 if (!(flags & VM_PROT_EXECUTE)) 2054 pte_lo |= LPTE_NOEXEC; 2055 if (flags & PVO_WIRED) 2056 pvo->pvo_vaddr |= PVO_WIRED; 2057 if (pvo_head != &moea64_pvo_kunmanaged) 2058 pvo->pvo_vaddr |= PVO_MANAGED; 2059 if (bootstrap) 2060 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2061 if (flags & PVO_FAKE) 2062 pvo->pvo_vaddr |= PVO_FAKE; 2063 2064 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2065 (uint64_t)(pa) | pte_lo); 2066 2067 /* 2068 * Remember if the list was empty and therefore will be the first 2069 * item. 2070 */ 2071 if (LIST_FIRST(pvo_head) == NULL) 2072 first = 1; 2073 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2074 2075 if (pvo->pvo_vaddr & PVO_WIRED) 2076 pm->pm_stats.wired_count++; 2077 pm->pm_stats.resident_count++; 2078 2079 /* 2080 * We hope this succeeds but it isn't required. 2081 */ 2082 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2083 if (i >= 0) { 2084 PVO_PTEGIDX_SET(pvo, i); 2085 } else { 2086 panic("moea64_pvo_enter: overflow"); 2087 moea64_pte_overflow++; 2088 } 2089 2090 if (pm == kernel_pmap) 2091 isync(); 2092 2093 UNLOCK_TABLE(); 2094 2095 return (first ? ENOENT : 0); 2096} 2097 2098static void 2099moea64_pvo_remove(struct pvo_entry *pvo, int pteidx) 2100{ 2101 struct lpte *pt; 2102 2103 /* 2104 * If there is an active pte entry, we need to deactivate it (and 2105 * save the ref & cfg bits). 2106 */ 2107 LOCK_TABLE(); 2108 pt = moea64_pvo_to_pte(pvo, pteidx); 2109 if (pt != NULL) { 2110 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap, 2111 PVO_VADDR(pvo)); 2112 PVO_PTEGIDX_CLR(pvo); 2113 } else { 2114 moea64_pte_overflow--; 2115 } 2116 UNLOCK_TABLE(); 2117 2118 /* 2119 * Update our statistics. 2120 */ 2121 pvo->pvo_pmap->pm_stats.resident_count--; 2122 if (pvo->pvo_vaddr & PVO_WIRED) 2123 pvo->pvo_pmap->pm_stats.wired_count--; 2124 2125 /* 2126 * Save the REF/CHG bits into their cache if the page is managed. 2127 */ 2128 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2129 struct vm_page *pg; 2130 2131 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2132 if (pg != NULL) { 2133 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2134 (LPTE_REF | LPTE_CHG)); 2135 } 2136 } 2137 2138 /* 2139 * Remove this PVO from the PV list. 2140 */ 2141 LIST_REMOVE(pvo, pvo_vlink); 2142 2143 /* 2144 * Remove this from the overflow list and return it to the pool 2145 * if we aren't going to reuse it. 2146 */ 2147 LIST_REMOVE(pvo, pvo_olink); 2148 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2149 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2150 moea64_upvo_zone, pvo); 2151 moea64_pvo_entries--; 2152 moea64_pvo_remove_calls++; 2153} 2154 2155static __inline int 2156moea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2157{ 2158 2159 /* 2160 * We can find the actual pte entry without searching by grabbing 2161 * the PTEG index from 3 unused bits in pvo_vaddr and by 2162 * noticing the HID bit. 2163 */ 2164 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2165 ptegidx ^= moea64_pteg_mask; 2166 2167 return ((ptegidx << 3) | PVO_PTEGIDX_GET(pvo)); 2168} 2169 2170static struct pvo_entry * 2171moea64_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2172{ 2173 struct pvo_entry *pvo; 2174 int ptegidx; 2175 uint64_t vsid; 2176 2177 va &= ~ADDR_POFF; 2178 vsid = va_to_vsid(pm, va); 2179 ptegidx = va_to_pteg(vsid, va); 2180 2181 LOCK_TABLE(); 2182 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2183 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2184 if (pteidx_p) 2185 *pteidx_p = moea64_pvo_pte_index(pvo, ptegidx); 2186 break; 2187 } 2188 } 2189 UNLOCK_TABLE(); 2190 2191 return (pvo); 2192} 2193 2194static struct lpte * 2195moea64_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2196{ 2197 struct lpte *pt; 2198 2199 /* 2200 * If we haven't been supplied the ptegidx, calculate it. 2201 */ 2202 if (pteidx == -1) { 2203 int ptegidx; 2204 uint64_t vsid; 2205 2206 vsid = va_to_vsid(pvo->pvo_pmap, PVO_VADDR(pvo)); 2207 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo)); 2208 pteidx = moea64_pvo_pte_index(pvo, ptegidx); 2209 } 2210 2211 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2212 2213 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2214 !PVO_PTEGIDX_ISSET(pvo)) { 2215 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2216 "valid pte index", pvo); 2217 } 2218 2219 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2220 PVO_PTEGIDX_ISSET(pvo)) { 2221 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2222 "pvo but no valid pte", pvo); 2223 } 2224 2225 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2226 LPTE_VALID) { 2227 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2228 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2229 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2230 } 2231 2232 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2233 ~(LPTE_CHG|LPTE_REF)) != 0) { 2234 panic("moea64_pvo_to_pte: pvo %p pte does not match " 2235 "pte %p in moea64_pteg_table difference is %#x", 2236 pvo, pt, 2237 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2238 } 2239 2240 ASSERT_TABLE_LOCK(); 2241 return (pt); 2242 } 2243 2244 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2245 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2246 "moea64_pteg_table but valid in pvo", pvo, pt); 2247 } 2248 2249 return (NULL); 2250} 2251 2252static int 2253moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2254{ 2255 struct lpte *pt; 2256 int i; 2257 2258 ASSERT_TABLE_LOCK(); 2259 2260 /* 2261 * First try primary hash. 2262 */ 2263 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2264 if ((pt->pte_hi & LPTE_VALID) == 0 && 2265 (pt->pte_hi & LPTE_LOCKED) == 0) { 2266 pvo_pt->pte_hi &= ~LPTE_HID; 2267 moea64_pte_set(pt, pvo_pt); 2268 return (i); 2269 } 2270 } 2271 2272 /* 2273 * Now try secondary hash. 2274 */ 2275 ptegidx ^= moea64_pteg_mask; 2276 2277 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2278 if ((pt->pte_hi & LPTE_VALID) == 0 && 2279 (pt->pte_hi & LPTE_LOCKED) == 0) { 2280 pvo_pt->pte_hi |= LPTE_HID; 2281 moea64_pte_set(pt, pvo_pt); 2282 return (i); 2283 } 2284 } 2285 2286 panic("moea64_pte_insert: overflow"); 2287 return (-1); 2288} 2289 2290static boolean_t 2291moea64_query_bit(vm_page_t m, u_int64_t ptebit) 2292{ 2293 struct pvo_entry *pvo; 2294 struct lpte *pt; 2295 2296 if (moea64_attr_fetch(m) & ptebit) 2297 return (TRUE); 2298 2299 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2300 MOEA_PVO_CHECK(pvo); /* sanity check */ 2301 2302 /* 2303 * See if we saved the bit off. If so, cache it and return 2304 * success. 2305 */ 2306 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2307 moea64_attr_save(m, ptebit); 2308 MOEA_PVO_CHECK(pvo); /* sanity check */ 2309 return (TRUE); 2310 } 2311 } 2312 2313 /* 2314 * No luck, now go through the hard part of looking at the PTEs 2315 * themselves. Sync so that any pending REF/CHG bits are flushed to 2316 * the PTEs. 2317 */ 2318 SYNC(); 2319 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2320 MOEA_PVO_CHECK(pvo); /* sanity check */ 2321 2322 /* 2323 * See if this pvo has a valid PTE. if so, fetch the 2324 * REF/CHG bits from the valid PTE. If the appropriate 2325 * ptebit is set, cache it and return success. 2326 */ 2327 LOCK_TABLE(); 2328 pt = moea64_pvo_to_pte(pvo, -1); 2329 if (pt != NULL) { 2330 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2331 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2332 UNLOCK_TABLE(); 2333 2334 moea64_attr_save(m, ptebit); 2335 MOEA_PVO_CHECK(pvo); /* sanity check */ 2336 return (TRUE); 2337 } 2338 } 2339 UNLOCK_TABLE(); 2340 } 2341 2342 return (FALSE); 2343} 2344 2345static u_int 2346moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit) 2347{ 2348 u_int count; 2349 struct pvo_entry *pvo; 2350 struct lpte *pt; 2351 uint64_t rv; 2352 2353 /* 2354 * Clear the cached value. 2355 */ 2356 rv = moea64_attr_fetch(m); 2357 moea64_attr_clear(m, ptebit); 2358 2359 /* 2360 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2361 * we can reset the right ones). note that since the pvo entries and 2362 * list heads are accessed via BAT0 and are never placed in the page 2363 * table, we don't have to worry about further accesses setting the 2364 * REF/CHG bits. 2365 */ 2366 SYNC(); 2367 2368 /* 2369 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2370 * valid pte clear the ptebit from the valid pte. 2371 */ 2372 count = 0; 2373 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2374 MOEA_PVO_CHECK(pvo); /* sanity check */ 2375 2376 LOCK_TABLE(); 2377 pt = moea64_pvo_to_pte(pvo, -1); 2378 if (pt != NULL) { 2379 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2380 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2381 count++; 2382 moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit); 2383 } 2384 } 2385 UNLOCK_TABLE(); 2386 rv |= pvo->pvo_pte.lpte.pte_lo; 2387 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2388 MOEA_PVO_CHECK(pvo); /* sanity check */ 2389 } 2390 2391 if (origbit != NULL) { 2392 *origbit = rv; 2393 } 2394 2395 return (count); 2396} 2397 2398boolean_t 2399moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2400{ 2401 return (EFAULT); 2402} 2403 2404/* 2405 * Map a set of physical memory pages into the kernel virtual 2406 * address space. Return a pointer to where it is mapped. This 2407 * routine is intended to be used for mapping device memory, 2408 * NOT real memory. 2409 */ 2410void * 2411moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2412{ 2413 vm_offset_t va, tmpva, ppa, offset; 2414 2415 ppa = trunc_page(pa); 2416 offset = pa & PAGE_MASK; 2417 size = roundup(offset + size, PAGE_SIZE); 2418 2419 va = kmem_alloc_nofault(kernel_map, size); 2420 2421 if (!va) 2422 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2423 2424 for (tmpva = va; size > 0;) { 2425 moea64_kenter(mmu, tmpva, ppa); 2426 size -= PAGE_SIZE; 2427 tmpva += PAGE_SIZE; 2428 ppa += PAGE_SIZE; 2429 } 2430 2431 return ((void *)(va + offset)); 2432} 2433 2434void 2435moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2436{ 2437 vm_offset_t base, offset; 2438 2439 base = trunc_page(va); 2440 offset = va & PAGE_MASK; 2441 size = roundup(offset + size, PAGE_SIZE); 2442 2443 kmem_free(kernel_map, base, size); 2444} 2445 2446static void 2447moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2448{ 2449 struct pvo_entry *pvo; 2450 vm_offset_t lim; 2451 vm_paddr_t pa; 2452 vm_size_t len; 2453 2454 PMAP_LOCK(pm); 2455 while (sz > 0) { 2456 lim = round_page(va); 2457 len = MIN(lim - va, sz); 2458 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2459 if (pvo != NULL) { 2460 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2461 (va & ADDR_POFF); 2462 moea64_syncicache(pm, va, pa, len); 2463 } 2464 va += len; 2465 sz -= len; 2466 } 2467 PMAP_UNLOCK(pm); 2468} 2469