mmu_oea64.c revision 209975
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 209975 2010-07-13 05:32:19Z nwhitehorn $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_kstack_pages.h" 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <sys/kdb.h> 131 132#include <dev/ofw/openfirm.h> 133 134#include <vm/vm.h> 135#include <vm/vm_param.h> 136#include <vm/vm_kern.h> 137#include <vm/vm_page.h> 138#include <vm/vm_map.h> 139#include <vm/vm_object.h> 140#include <vm/vm_extern.h> 141#include <vm/vm_pageout.h> 142#include <vm/vm_pager.h> 143#include <vm/uma.h> 144 145#include <machine/_inttypes.h> 146#include <machine/cpu.h> 147#include <machine/platform.h> 148#include <machine/frame.h> 149#include <machine/md_var.h> 150#include <machine/psl.h> 151#include <machine/bat.h> 152#include <machine/hid.h> 153#include <machine/pte.h> 154#include <machine/sr.h> 155#include <machine/trap.h> 156#include <machine/mmuvar.h> 157 158#include "mmu_if.h" 159 160#define MOEA_DEBUG 161 162#define TODO panic("%s: not implemented", __func__); 163void moea64_release_vsid(uint64_t vsid); 164uintptr_t moea64_get_unique_vsid(void); 165 166static __inline register_t 167cntlzd(volatile register_t a) { 168 register_t b; 169 __asm ("cntlzd %0, %1" : "=r"(b) : "r"(a)); 170 return b; 171} 172 173#define PTESYNC() __asm __volatile("ptesync"); 174#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 175#define SYNC() __asm __volatile("sync"); 176#define EIEIO() __asm __volatile("eieio"); 177 178/* 179 * The tlbie instruction must be executed in 64-bit mode 180 * so we have to twiddle MSR[SF] around every invocation. 181 * Just to add to the fun, exceptions must be off as well 182 * so that we can't trap in 64-bit mode. What a pain. 183 */ 184struct mtx tlbie_mutex; 185 186static __inline void 187TLBIE(uint64_t vpn) { 188#ifndef __powerpc64__ 189 register_t vpn_hi, vpn_lo; 190 register_t msr; 191 register_t scratch; 192#endif 193 194 vpn <<= ADDR_PIDX_SHFT; 195 vpn &= ~(0xffffULL << 48); 196 197 mtx_lock_spin(&tlbie_mutex); 198#ifdef __powerpc64__ 199 __asm __volatile("\ 200 ptesync; \ 201 tlbie %0; \ 202 eieio; \ 203 tlbsync; \ 204 ptesync;" 205 :: "r"(vpn) : "memory"); 206#else 207 vpn_hi = (uint32_t)(vpn >> 32); 208 vpn_lo = (uint32_t)vpn; 209 210 __asm __volatile("\ 211 mfmsr %0; \ 212 mr %1, %0; \ 213 insrdi %1,%5,1,0; \ 214 mtmsrd %1; \ 215 ptesync; \ 216 \ 217 sld %1,%2,%4; \ 218 or %1,%1,%3; \ 219 tlbie %1; \ 220 \ 221 mtmsrd %0; \ 222 eieio; \ 223 tlbsync; \ 224 ptesync;" 225 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 226 : "memory"); 227#endif 228 mtx_unlock_spin(&tlbie_mutex); 229} 230 231#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 232#define ENABLE_TRANS(msr) mtmsr(msr); isync() 233 234#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 235#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 236#define VSID_HASH_MASK 0x0000007fffffffffULL 237 238#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 239#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 240#define PVO_WIRED 0x010UL /* PVO entry is wired */ 241#define PVO_MANAGED 0x020UL /* PVO entry is managed */ 242#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 243 bootstrap */ 244#define PVO_FAKE 0x100UL /* fictitious phys page */ 245#define PVO_LARGE 0x200UL /* large page */ 246#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 247#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 248#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 249#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 250#define PVO_PTEGIDX_CLR(pvo) \ 251 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 252#define PVO_PTEGIDX_SET(pvo, i) \ 253 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 254#define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16) 255 256#define MOEA_PVO_CHECK(pvo) 257 258#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 259#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 260#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 261 262struct ofw_map { 263 cell_t om_va; 264 cell_t om_len; 265 cell_t om_pa_hi; 266 cell_t om_pa_lo; 267 cell_t om_mode; 268}; 269 270/* 271 * Map of physical memory regions. 272 */ 273static struct mem_region *regions; 274static struct mem_region *pregions; 275static u_int phys_avail_count; 276static int regions_sz, pregions_sz; 277extern int ofw_real_mode; 278 279extern struct pmap ofw_pmap; 280 281extern void bs_remap_earlyboot(void); 282 283 284/* 285 * Lock for the pteg and pvo tables. 286 */ 287struct mtx moea64_table_mutex; 288 289/* 290 * PTEG data. 291 */ 292static struct lpteg *moea64_pteg_table; 293u_int moea64_pteg_count; 294u_int moea64_pteg_mask; 295 296/* 297 * PVO data. 298 */ 299struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 300/* lists of unmanaged pages */ 301struct pvo_head moea64_pvo_kunmanaged = 302 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 303struct pvo_head moea64_pvo_unmanaged = 304 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged); 305 306uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 307uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 308 309#define BPVO_POOL_SIZE 327680 310static struct pvo_entry *moea64_bpvo_pool; 311static int moea64_bpvo_pool_index = 0; 312 313#define VSID_NBPW (sizeof(u_int32_t) * 8) 314#ifdef __powerpc64__ 315#define NVSIDS (NPMAPS * 16) 316#define VSID_HASHMASK 0xffffffffUL 317#else 318#define NVSIDS NPMAPS 319#define VSID_HASHMASK 0xfffffUL 320#endif 321static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 322 323static boolean_t moea64_initialized = FALSE; 324 325/* 326 * Statistics. 327 */ 328u_int moea64_pte_valid = 0; 329u_int moea64_pte_overflow = 0; 330u_int moea64_pvo_entries = 0; 331u_int moea64_pvo_enter_calls = 0; 332u_int moea64_pvo_remove_calls = 0; 333SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 334 &moea64_pte_valid, 0, ""); 335SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 336 &moea64_pte_overflow, 0, ""); 337SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 338 &moea64_pvo_entries, 0, ""); 339SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 340 &moea64_pvo_enter_calls, 0, ""); 341SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 342 &moea64_pvo_remove_calls, 0, ""); 343 344vm_offset_t moea64_scratchpage_va[2]; 345uint64_t moea64_scratchpage_vpn[2]; 346struct lpte *moea64_scratchpage_pte[2]; 347struct mtx moea64_scratchpage_mtx; 348 349uint64_t moea64_large_page_mask = 0; 350int moea64_large_page_size = 0; 351int moea64_large_page_shift = 0; 352 353/* 354 * Allocate physical memory for use in moea64_bootstrap. 355 */ 356static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 357 358/* 359 * PTE calls. 360 */ 361static int moea64_pte_insert(u_int, struct lpte *); 362 363/* 364 * PVO calls. 365 */ 366static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 367 vm_offset_t, vm_offset_t, uint64_t, int); 368static void moea64_pvo_remove(struct pvo_entry *); 369static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 370static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *); 371 372/* 373 * Utility routines. 374 */ 375static void moea64_bootstrap(mmu_t mmup, 376 vm_offset_t kernelstart, vm_offset_t kernelend); 377static void moea64_cpu_bootstrap(mmu_t, int ap); 378static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 379 vm_prot_t, boolean_t); 380static boolean_t moea64_query_bit(vm_page_t, u_int64_t); 381static u_int moea64_clear_bit(vm_page_t, u_int64_t); 382static void moea64_kremove(mmu_t, vm_offset_t); 383static void moea64_syncicache(pmap_t pmap, vm_offset_t va, 384 vm_offset_t pa, vm_size_t sz); 385static void tlbia(void); 386#ifdef __powerpc64__ 387static void slbia(void); 388#endif 389 390/* 391 * Kernel MMU interface 392 */ 393void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 394void moea64_clear_modify(mmu_t, vm_page_t); 395void moea64_clear_reference(mmu_t, vm_page_t); 396void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 397void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 398void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 399 vm_prot_t); 400void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 401vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 402vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 403void moea64_init(mmu_t); 404boolean_t moea64_is_modified(mmu_t, vm_page_t); 405boolean_t moea64_is_referenced(mmu_t, vm_page_t); 406boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 407vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 408boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 409int moea64_page_wired_mappings(mmu_t, vm_page_t); 410void moea64_pinit(mmu_t, pmap_t); 411void moea64_pinit0(mmu_t, pmap_t); 412void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 413void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 414void moea64_qremove(mmu_t, vm_offset_t, int); 415void moea64_release(mmu_t, pmap_t); 416void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 417void moea64_remove_all(mmu_t, vm_page_t); 418void moea64_remove_write(mmu_t, vm_page_t); 419void moea64_zero_page(mmu_t, vm_page_t); 420void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 421void moea64_zero_page_idle(mmu_t, vm_page_t); 422void moea64_activate(mmu_t, struct thread *); 423void moea64_deactivate(mmu_t, struct thread *); 424void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 425void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 426vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 427void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 428boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 429static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 430 431static mmu_method_t moea64_methods[] = { 432 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 433 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 434 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 435 MMUMETHOD(mmu_copy_page, moea64_copy_page), 436 MMUMETHOD(mmu_enter, moea64_enter), 437 MMUMETHOD(mmu_enter_object, moea64_enter_object), 438 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 439 MMUMETHOD(mmu_extract, moea64_extract), 440 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 441 MMUMETHOD(mmu_init, moea64_init), 442 MMUMETHOD(mmu_is_modified, moea64_is_modified), 443 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 444 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 445 MMUMETHOD(mmu_map, moea64_map), 446 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 447 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 448 MMUMETHOD(mmu_pinit, moea64_pinit), 449 MMUMETHOD(mmu_pinit0, moea64_pinit0), 450 MMUMETHOD(mmu_protect, moea64_protect), 451 MMUMETHOD(mmu_qenter, moea64_qenter), 452 MMUMETHOD(mmu_qremove, moea64_qremove), 453 MMUMETHOD(mmu_release, moea64_release), 454 MMUMETHOD(mmu_remove, moea64_remove), 455 MMUMETHOD(mmu_remove_all, moea64_remove_all), 456 MMUMETHOD(mmu_remove_write, moea64_remove_write), 457 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 458 MMUMETHOD(mmu_zero_page, moea64_zero_page), 459 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 460 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 461 MMUMETHOD(mmu_activate, moea64_activate), 462 MMUMETHOD(mmu_deactivate, moea64_deactivate), 463 464 /* Internal interfaces */ 465 MMUMETHOD(mmu_bootstrap, moea64_bootstrap), 466 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap), 467 MMUMETHOD(mmu_mapdev, moea64_mapdev), 468 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 469 MMUMETHOD(mmu_kextract, moea64_kextract), 470 MMUMETHOD(mmu_kenter, moea64_kenter), 471 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 472 473 { 0, 0 } 474}; 475 476static mmu_def_t oea64_mmu = { 477 MMU_TYPE_G5, 478 moea64_methods, 479 0 480}; 481MMU_DEF(oea64_mmu); 482 483static __inline u_int 484va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 485{ 486 uint64_t hash; 487 int shift; 488 489 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 490 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 491 shift); 492 return (hash & moea64_pteg_mask); 493} 494 495static __inline struct pvo_head * 496pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 497{ 498 struct vm_page *pg; 499 500 pg = PHYS_TO_VM_PAGE(pa); 501 502 if (pg_p != NULL) 503 *pg_p = pg; 504 505 if (pg == NULL) 506 return (&moea64_pvo_unmanaged); 507 508 return (&pg->md.mdpg_pvoh); 509} 510 511static __inline struct pvo_head * 512vm_page_to_pvoh(vm_page_t m) 513{ 514 515 return (&m->md.mdpg_pvoh); 516} 517 518static __inline void 519moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 520{ 521 522 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 523 m->md.mdpg_attrs &= ~ptebit; 524} 525 526static __inline u_int64_t 527moea64_attr_fetch(vm_page_t m) 528{ 529 530 return (m->md.mdpg_attrs); 531} 532 533static __inline void 534moea64_attr_save(vm_page_t m, u_int64_t ptebit) 535{ 536 537 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 538 m->md.mdpg_attrs |= ptebit; 539} 540 541static __inline void 542moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 543 uint64_t pte_lo, int flags) 544{ 545 546 ASSERT_TABLE_LOCK(); 547 548 /* 549 * Construct a PTE. Default to IMB initially. Valid bit only gets 550 * set when the real pte is set in memory. 551 * 552 * Note: Don't set the valid bit for correct operation of tlb update. 553 */ 554 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 555 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 556 557 if (flags & PVO_LARGE) 558 pt->pte_hi |= LPTE_BIG; 559 560 pt->pte_lo = pte_lo; 561} 562 563static __inline void 564moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 565{ 566 567 ASSERT_TABLE_LOCK(); 568 569 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 570} 571 572static __inline void 573moea64_pte_clear(struct lpte *pt, uint64_t vpn, u_int64_t ptebit) 574{ 575 ASSERT_TABLE_LOCK(); 576 577 /* 578 * As shown in Section 7.6.3.2.3 579 */ 580 pt->pte_lo &= ~ptebit; 581 TLBIE(vpn); 582} 583 584static __inline void 585moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 586{ 587 588 ASSERT_TABLE_LOCK(); 589 pvo_pt->pte_hi |= LPTE_VALID; 590 591 /* 592 * Update the PTE as defined in section 7.6.3.1. 593 * Note that the REF/CHG bits are from pvo_pt and thus should have 594 * been saved so this routine can restore them (if desired). 595 */ 596 pt->pte_lo = pvo_pt->pte_lo; 597 EIEIO(); 598 pt->pte_hi = pvo_pt->pte_hi; 599 PTESYNC(); 600 moea64_pte_valid++; 601} 602 603static __inline void 604moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) 605{ 606 ASSERT_TABLE_LOCK(); 607 pvo_pt->pte_hi &= ~LPTE_VALID; 608 609 /* 610 * Force the reg & chg bits back into the PTEs. 611 */ 612 SYNC(); 613 614 /* 615 * Invalidate the pte. 616 */ 617 pt->pte_hi &= ~LPTE_VALID; 618 TLBIE(vpn); 619 620 /* 621 * Save the reg & chg bits. 622 */ 623 moea64_pte_synch(pt, pvo_pt); 624 moea64_pte_valid--; 625} 626 627static __inline void 628moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) 629{ 630 631 /* 632 * Invalidate the PTE 633 */ 634 moea64_pte_unset(pt, pvo_pt, vpn); 635 moea64_pte_set(pt, pvo_pt); 636} 637 638static __inline uint64_t 639moea64_calc_wimg(vm_offset_t pa) 640{ 641 uint64_t pte_lo; 642 int i; 643 644 /* 645 * Assume the page is cache inhibited and access is guarded unless 646 * it's in our available memory array. 647 */ 648 pte_lo = LPTE_I | LPTE_G; 649 for (i = 0; i < pregions_sz; i++) { 650 if ((pa >= pregions[i].mr_start) && 651 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 652 pte_lo &= ~(LPTE_I | LPTE_G); 653 pte_lo |= LPTE_M; 654 break; 655 } 656 } 657 658 return pte_lo; 659} 660 661/* 662 * Quick sort callout for comparing memory regions. 663 */ 664static int mr_cmp(const void *a, const void *b); 665static int om_cmp(const void *a, const void *b); 666 667static int 668mr_cmp(const void *a, const void *b) 669{ 670 const struct mem_region *regiona; 671 const struct mem_region *regionb; 672 673 regiona = a; 674 regionb = b; 675 if (regiona->mr_start < regionb->mr_start) 676 return (-1); 677 else if (regiona->mr_start > regionb->mr_start) 678 return (1); 679 else 680 return (0); 681} 682 683static int 684om_cmp(const void *a, const void *b) 685{ 686 const struct ofw_map *mapa; 687 const struct ofw_map *mapb; 688 689 mapa = a; 690 mapb = b; 691 if (mapa->om_pa_hi < mapb->om_pa_hi) 692 return (-1); 693 else if (mapa->om_pa_hi > mapb->om_pa_hi) 694 return (1); 695 else if (mapa->om_pa_lo < mapb->om_pa_lo) 696 return (-1); 697 else if (mapa->om_pa_lo > mapb->om_pa_lo) 698 return (1); 699 else 700 return (0); 701} 702 703static void 704moea64_cpu_bootstrap(mmu_t mmup, int ap) 705{ 706 int i = 0; 707 #ifdef __powerpc64__ 708 struct slb *slb = PCPU_GET(slb); 709 #endif 710 711 /* 712 * Initialize segment registers and MMU 713 */ 714 715 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 716 717 /* 718 * Install kernel SLB entries 719 */ 720 721 #ifdef __powerpc64__ 722 slbia(); 723 724 for (i = 0; i < 64; i++) { 725 if (!(slb[i].slbe & SLBE_VALID)) 726 continue; 727 728 __asm __volatile ("slbmte %0, %1" :: 729 "r"(slb[i].slbv), "r"(slb[i].slbe)); 730 } 731 #else 732 for (i = 0; i < 16; i++) 733 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 734 #endif 735 736 /* 737 * Install page table 738 */ 739 740 __asm __volatile ("ptesync; mtsdr1 %0; isync" 741 :: "r"((uintptr_t)moea64_pteg_table 742 | (64 - cntlzd(moea64_pteg_mask >> 11)))); 743 tlbia(); 744} 745 746static void 747moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 748{ 749 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 750 register_t msr; 751 vm_offset_t off; 752 vm_paddr_t pa_base; 753 int i, ofw_mappings; 754 755 bzero(translations, sz); 756 if (OF_getprop(mmu, "translations", translations, sz) == -1) 757 panic("moea64_bootstrap: can't get ofw translations"); 758 759 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 760 sz /= sizeof(*translations); 761 qsort(translations, sz, sizeof (*translations), om_cmp); 762 763 for (i = 0, ofw_mappings = 0; i < sz; i++) { 764 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 765 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 766 translations[i].om_len); 767 768 if (translations[i].om_pa_lo % PAGE_SIZE) 769 panic("OFW translation not page-aligned!"); 770 771 pa_base = translations[i].om_pa_lo; 772 773 #ifdef __powerpc64__ 774 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 775 #else 776 if (translations[i].om_pa_hi) 777 panic("OFW translations above 32-bit boundary!"); 778 #endif 779 780 /* Now enter the pages for this mapping */ 781 782 DISABLE_TRANS(msr); 783 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 784 if (moea64_pvo_find_va(kernel_pmap, 785 translations[i].om_va + off) != NULL) 786 continue; 787 788 moea64_kenter(mmup, translations[i].om_va + off, 789 pa_base + off); 790 791 ofw_mappings++; 792 } 793 ENABLE_TRANS(msr); 794 } 795} 796 797#ifdef __powerpc64__ 798static void 799moea64_probe_large_page(void) 800{ 801 uint16_t pvr = mfpvr() >> 16; 802 803 switch (pvr) { 804 case IBM970: 805 case IBM970FX: 806 case IBM970MP: 807 powerpc_sync(); isync(); 808 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 809 powerpc_sync(); isync(); 810 811 /* FALLTHROUGH */ 812 case IBMCELLBE: 813 moea64_large_page_size = 0x1000000; /* 16 MB */ 814 moea64_large_page_shift = 24; 815 break; 816 default: 817 moea64_large_page_size = 0; 818 } 819 820 moea64_large_page_mask = moea64_large_page_size - 1; 821} 822 823static void 824moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 825{ 826 struct slb *cache; 827 struct slb entry; 828 uint64_t esid, slbe; 829 uint64_t i; 830 831 cache = PCPU_GET(slb); 832 esid = va >> ADDR_SR_SHFT; 833 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 834 835 for (i = 0; i < 64; i++) { 836 if (cache[i].slbe == (slbe | i)) 837 return; 838 } 839 840 entry.slbe = slbe; 841 entry.slbv = KERNEL_VSID(esid, large) << SLBV_VSID_SHIFT; 842 if (large) 843 entry.slbv |= SLBV_L; 844 845 slb_insert(kernel_pmap, cache, &entry); 846} 847#endif 848 849static void 850moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 851 vm_offset_t kernelend) 852{ 853 register_t msr; 854 vm_paddr_t pa; 855 vm_offset_t size, off; 856 uint64_t pte_lo; 857 int i; 858 859 if (moea64_large_page_size == 0) 860 hw_direct_map = 0; 861 862 DISABLE_TRANS(msr); 863 if (hw_direct_map) { 864 PMAP_LOCK(kernel_pmap); 865 for (i = 0; i < pregions_sz; i++) { 866 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 867 pregions[i].mr_size; pa += moea64_large_page_size) { 868 pte_lo = LPTE_M; 869 870 /* 871 * Set memory access as guarded if prefetch within 872 * the page could exit the available physmem area. 873 */ 874 if (pa & moea64_large_page_mask) { 875 pa &= moea64_large_page_mask; 876 pte_lo |= LPTE_G; 877 } 878 if (pa + moea64_large_page_size > 879 pregions[i].mr_start + pregions[i].mr_size) 880 pte_lo |= LPTE_G; 881 882 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 883 &moea64_pvo_kunmanaged, pa, pa, 884 pte_lo, PVO_WIRED | PVO_LARGE | 885 VM_PROT_EXECUTE); 886 } 887 } 888 PMAP_UNLOCK(kernel_pmap); 889 } else { 890 size = moea64_pteg_count * sizeof(struct lpteg); 891 off = (vm_offset_t)(moea64_pteg_table); 892 for (pa = off; pa < off + size; pa += PAGE_SIZE) 893 moea64_kenter(mmup, pa, pa); 894 size = sizeof(struct pvo_head) * moea64_pteg_count; 895 off = (vm_offset_t)(moea64_pvo_table); 896 for (pa = off; pa < off + size; pa += PAGE_SIZE) 897 moea64_kenter(mmup, pa, pa); 898 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 899 off = (vm_offset_t)(moea64_bpvo_pool); 900 for (pa = off; pa < off + size; pa += PAGE_SIZE) 901 moea64_kenter(mmup, pa, pa); 902 903 /* 904 * Map certain important things, like ourselves. 905 * 906 * NOTE: We do not map the exception vector space. That code is 907 * used only in real mode, and leaving it unmapped allows us to 908 * catch NULL pointer deferences, instead of making NULL a valid 909 * address. 910 */ 911 912 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 913 pa += PAGE_SIZE) 914 moea64_kenter(mmup, pa, pa); 915 } 916 ENABLE_TRANS(msr); 917} 918 919static void 920moea64_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 921{ 922 ihandle_t mmui; 923 phandle_t chosen; 924 phandle_t mmu; 925 size_t sz; 926 int i, j; 927 vm_size_t size, physsz, hwphyssz; 928 vm_offset_t pa, va; 929 register_t msr; 930 void *dpcpu; 931 932#ifndef __powerpc64__ 933 /* We don't have a direct map since there is no BAT */ 934 hw_direct_map = 0; 935 936 /* Make sure battable is zero, since we have no BAT */ 937 for (i = 0; i < 16; i++) { 938 battable[i].batu = 0; 939 battable[i].batl = 0; 940 } 941#else 942 moea64_probe_large_page(); 943 944 /* Use a direct map if we have large page support */ 945 if (moea64_large_page_size > 0) 946 hw_direct_map = 1; 947 else 948 hw_direct_map = 0; 949#endif 950 951 /* Get physical memory regions from firmware */ 952 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 953 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 954 955 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 956 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 957 panic("moea64_bootstrap: phys_avail too small"); 958 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 959 phys_avail_count = 0; 960 physsz = 0; 961 hwphyssz = 0; 962 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 963 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 964 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 965 regions[i].mr_start + regions[i].mr_size, 966 regions[i].mr_size); 967 if (hwphyssz != 0 && 968 (physsz + regions[i].mr_size) >= hwphyssz) { 969 if (physsz < hwphyssz) { 970 phys_avail[j] = regions[i].mr_start; 971 phys_avail[j + 1] = regions[i].mr_start + 972 hwphyssz - physsz; 973 physsz = hwphyssz; 974 phys_avail_count++; 975 } 976 break; 977 } 978 phys_avail[j] = regions[i].mr_start; 979 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 980 phys_avail_count++; 981 physsz += regions[i].mr_size; 982 } 983 984 /* Check for overlap with the kernel and exception vectors */ 985 for (j = 0; j < 2*phys_avail_count; j+=2) { 986 if (phys_avail[j] < EXC_LAST) 987 phys_avail[j] += EXC_LAST; 988 989 if (kernelstart >= phys_avail[j] && 990 kernelstart < phys_avail[j+1]) { 991 if (kernelend < phys_avail[j+1]) { 992 phys_avail[2*phys_avail_count] = 993 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 994 phys_avail[2*phys_avail_count + 1] = 995 phys_avail[j+1]; 996 phys_avail_count++; 997 } 998 999 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 1000 } 1001 1002 if (kernelend >= phys_avail[j] && 1003 kernelend < phys_avail[j+1]) { 1004 if (kernelstart > phys_avail[j]) { 1005 phys_avail[2*phys_avail_count] = phys_avail[j]; 1006 phys_avail[2*phys_avail_count + 1] = 1007 kernelstart & ~PAGE_MASK; 1008 phys_avail_count++; 1009 } 1010 1011 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 1012 } 1013 } 1014 1015 physmem = btoc(physsz); 1016 1017 /* 1018 * Allocate PTEG table. 1019 */ 1020#ifdef PTEGCOUNT 1021 moea64_pteg_count = PTEGCOUNT; 1022#else 1023 moea64_pteg_count = 0x1000; 1024 1025 while (moea64_pteg_count < physmem) 1026 moea64_pteg_count <<= 1; 1027 1028 moea64_pteg_count >>= 1; 1029#endif /* PTEGCOUNT */ 1030 1031 size = moea64_pteg_count * sizeof(struct lpteg); 1032 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 1033 moea64_pteg_count, size); 1034 1035 /* 1036 * We now need to allocate memory. This memory, to be allocated, 1037 * has to reside in a page table. The page table we are about to 1038 * allocate. We don't have BAT. So drop to data real mode for a minute 1039 * as a measure of last resort. We do this a couple times. 1040 */ 1041 1042 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 1043 DISABLE_TRANS(msr); 1044 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 1045 ENABLE_TRANS(msr); 1046 1047 moea64_pteg_mask = moea64_pteg_count - 1; 1048 1049 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 1050 1051 /* 1052 * Allocate pv/overflow lists. 1053 */ 1054 size = sizeof(struct pvo_head) * moea64_pteg_count; 1055 1056 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 1057 PAGE_SIZE); 1058 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 1059 1060 DISABLE_TRANS(msr); 1061 for (i = 0; i < moea64_pteg_count; i++) 1062 LIST_INIT(&moea64_pvo_table[i]); 1063 ENABLE_TRANS(msr); 1064 1065 /* 1066 * Initialize the lock that synchronizes access to the pteg and pvo 1067 * tables. 1068 */ 1069 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 1070 MTX_RECURSE); 1071 1072 /* 1073 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 1074 */ 1075 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 1076 1077 /* 1078 * Initialise the unmanaged pvo pool. 1079 */ 1080 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 1081 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 1082 moea64_bpvo_pool_index = 0; 1083 1084 /* 1085 * Make sure kernel vsid is allocated as well as VSID 0. 1086 */ 1087 #ifndef __powerpc64__ 1088 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 1089 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 1090 moea64_vsid_bitmap[0] |= 1; 1091 #endif 1092 1093 /* 1094 * Initialize the kernel pmap (which is statically allocated). 1095 */ 1096 #ifdef __powerpc64__ 1097 for (i = 0; i < 64; i++) { 1098 pcpup->pc_slb[i].slbv = 0; 1099 pcpup->pc_slb[i].slbe = 0; 1100 } 1101 #else 1102 for (i = 0; i < 16; i++) 1103 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 1104 #endif 1105 1106 kernel_pmap->pmap_phys = kernel_pmap; 1107 kernel_pmap->pm_active = ~0; 1108 1109 PMAP_LOCK_INIT(kernel_pmap); 1110 1111 /* 1112 * Now map in all the other buffers we allocated earlier 1113 */ 1114 1115 moea64_setup_direct_map(mmup, kernelstart, kernelend); 1116 1117 /* 1118 * Set up the Open Firmware pmap and add its mappings if not in real 1119 * mode. 1120 */ 1121 1122 if (!ofw_real_mode) { 1123 #ifndef __powerpc64__ 1124 moea64_pinit(mmup, &ofw_pmap); 1125 1126 for (i = 0; i < 16; i++) 1127 ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i]; 1128 #endif 1129 1130 if ((chosen = OF_finddevice("/chosen")) == -1) 1131 panic("moea64_bootstrap: can't find /chosen"); 1132 OF_getprop(chosen, "mmu", &mmui, 4); 1133 1134 if ((mmu = OF_instance_to_package(mmui)) == -1) 1135 panic("moea64_bootstrap: can't get mmu package"); 1136 if ((sz = OF_getproplen(mmu, "translations")) == -1) 1137 panic("moea64_bootstrap: can't get ofw translation count"); 1138 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 1139 panic("moea64_bootstrap: too many ofw translations"); 1140 1141 moea64_add_ofw_mappings(mmup, mmu, sz); 1142 } 1143 1144#ifdef SMP 1145 TLBSYNC(); 1146#endif 1147 1148 /* 1149 * Calculate the last available physical address. 1150 */ 1151 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1152 ; 1153 Maxmem = powerpc_btop(phys_avail[i + 1]); 1154 1155 /* 1156 * Initialize MMU and remap early physical mappings 1157 */ 1158 moea64_cpu_bootstrap(mmup,0); 1159 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 1160 pmap_bootstrapped++; 1161 bs_remap_earlyboot(); 1162 1163 /* 1164 * Set the start and end of kva. 1165 */ 1166 virtual_avail = VM_MIN_KERNEL_ADDRESS; 1167 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 1168 1169 /* 1170 * Map the entire KVA range into the SLB. We must not fault there. 1171 */ 1172 #ifdef __powerpc64__ 1173 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 1174 moea64_bootstrap_slb_prefault(va, 0); 1175 #endif 1176 1177 /* 1178 * Figure out how far we can extend virtual_end into segment 16 1179 * without running into existing mappings. Segment 16 is guaranteed 1180 * to contain neither RAM nor devices (at least on Apple hardware), 1181 * but will generally contain some OFW mappings we should not 1182 * step on. 1183 */ 1184 1185 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 1186 PMAP_LOCK(kernel_pmap); 1187 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 1188 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 1189 virtual_end += PAGE_SIZE; 1190 PMAP_UNLOCK(kernel_pmap); 1191 #endif 1192 1193 /* 1194 * Allocate some things for page zeroing. We put this directly 1195 * in the page table, marked with LPTE_LOCKED, to avoid any 1196 * of the PVO book-keeping or other parts of the VM system 1197 * from even knowing that this hack exists. 1198 */ 1199 1200 if (!hw_direct_map) { 1201 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 1202 MTX_DEF); 1203 for (i = 0; i < 2; i++) { 1204 struct lpte pt; 1205 uint64_t vsid; 1206 int pteidx, ptegidx; 1207 1208 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 1209 virtual_end -= PAGE_SIZE; 1210 1211 LOCK_TABLE(); 1212 1213 vsid = va_to_vsid(kernel_pmap, 1214 moea64_scratchpage_va[i]); 1215 moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i], 1216 LPTE_NOEXEC, 0); 1217 pt.pte_hi |= LPTE_LOCKED; 1218 1219 moea64_scratchpage_vpn[i] = (vsid << 16) | 1220 ((moea64_scratchpage_va[i] & ADDR_PIDX) >> 1221 ADDR_PIDX_SHFT); 1222 ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i], 0); 1223 pteidx = moea64_pte_insert(ptegidx, &pt); 1224 if (pt.pte_hi & LPTE_HID) 1225 ptegidx ^= moea64_pteg_mask; 1226 1227 moea64_scratchpage_pte[i] = 1228 &moea64_pteg_table[ptegidx].pt[pteidx]; 1229 1230 UNLOCK_TABLE(); 1231 } 1232 } 1233 1234 /* 1235 * Allocate a kernel stack with a guard page for thread0 and map it 1236 * into the kernel page map. 1237 */ 1238 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 1239 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1240 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 1241 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 1242 thread0.td_kstack = va; 1243 thread0.td_kstack_pages = KSTACK_PAGES; 1244 for (i = 0; i < KSTACK_PAGES; i++) { 1245 moea64_kenter(mmup, va, pa); 1246 pa += PAGE_SIZE; 1247 va += PAGE_SIZE; 1248 } 1249 1250 /* 1251 * Allocate virtual address space for the message buffer. 1252 */ 1253 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1254 msgbufp = (struct msgbuf *)virtual_avail; 1255 va = virtual_avail; 1256 virtual_avail += round_page(MSGBUF_SIZE); 1257 while (va < virtual_avail) { 1258 moea64_kenter(mmup, va, pa); 1259 pa += PAGE_SIZE; 1260 va += PAGE_SIZE; 1261 } 1262 1263 /* 1264 * Allocate virtual address space for the dynamic percpu area. 1265 */ 1266 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1267 dpcpu = (void *)virtual_avail; 1268 va = virtual_avail; 1269 virtual_avail += DPCPU_SIZE; 1270 while (va < virtual_avail) { 1271 moea64_kenter(mmup, va, pa); 1272 pa += PAGE_SIZE; 1273 va += PAGE_SIZE; 1274 } 1275 dpcpu_init(dpcpu, 0); 1276} 1277 1278/* 1279 * Activate a user pmap. The pmap must be activated before its address 1280 * space can be accessed in any way. 1281 */ 1282void 1283moea64_activate(mmu_t mmu, struct thread *td) 1284{ 1285 pmap_t pm; 1286 1287 pm = &td->td_proc->p_vmspace->vm_pmap; 1288 pm->pm_active |= PCPU_GET(cpumask); 1289 1290 #ifdef __powerpc64__ 1291 PCPU_SET(userslb, pm->pm_slb); 1292 #else 1293 PCPU_SET(curpmap, pm->pmap_phys); 1294 #endif 1295} 1296 1297void 1298moea64_deactivate(mmu_t mmu, struct thread *td) 1299{ 1300 pmap_t pm; 1301 1302 pm = &td->td_proc->p_vmspace->vm_pmap; 1303 pm->pm_active &= ~(PCPU_GET(cpumask)); 1304 #ifdef __powerpc64__ 1305 PCPU_SET(userslb, NULL); 1306 #else 1307 PCPU_SET(curpmap, NULL); 1308 #endif 1309} 1310 1311void 1312moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1313{ 1314 struct pvo_entry *pvo; 1315 struct lpte *pt; 1316 uint64_t vsid; 1317 int i, ptegidx; 1318 1319 PMAP_LOCK(pm); 1320 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1321 1322 if (pvo != NULL) { 1323 LOCK_TABLE(); 1324 pt = moea64_pvo_to_pte(pvo); 1325 1326 if (wired) { 1327 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1328 pm->pm_stats.wired_count++; 1329 pvo->pvo_vaddr |= PVO_WIRED; 1330 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1331 } else { 1332 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1333 pm->pm_stats.wired_count--; 1334 pvo->pvo_vaddr &= ~PVO_WIRED; 1335 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1336 } 1337 1338 if (pt != NULL) { 1339 /* Update wiring flag in page table. */ 1340 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1341 pvo->pvo_vpn); 1342 } else if (wired) { 1343 /* 1344 * If we are wiring the page, and it wasn't in the 1345 * page table before, add it. 1346 */ 1347 vsid = PVO_VSID(pvo); 1348 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1349 pvo->pvo_vaddr & PVO_LARGE); 1350 1351 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 1352 if (i >= 0) { 1353 PVO_PTEGIDX_CLR(pvo); 1354 PVO_PTEGIDX_SET(pvo, i); 1355 } 1356 } 1357 1358 UNLOCK_TABLE(); 1359 } 1360 PMAP_UNLOCK(pm); 1361} 1362 1363/* 1364 * This goes through and sets the physical address of our 1365 * special scratch PTE to the PA we want to zero or copy. Because 1366 * of locking issues (this can get called in pvo_enter() by 1367 * the UMA allocator), we can't use most other utility functions here 1368 */ 1369 1370static __inline 1371void moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1372 1373 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1374 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1375 1376 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1377 TLBIE(moea64_scratchpage_vpn[which]); 1378 1379 moea64_scratchpage_pte[which]->pte_lo &= 1380 ~(LPTE_WIMG | LPTE_RPGN); 1381 moea64_scratchpage_pte[which]->pte_lo |= 1382 moea64_calc_wimg(pa) | (uint64_t)pa; 1383 EIEIO(); 1384 1385 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1386 PTESYNC(); isync(); 1387} 1388 1389void 1390moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1391{ 1392 vm_offset_t dst; 1393 vm_offset_t src; 1394 1395 dst = VM_PAGE_TO_PHYS(mdst); 1396 src = VM_PAGE_TO_PHYS(msrc); 1397 1398 if (hw_direct_map) { 1399 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1400 } else { 1401 mtx_lock(&moea64_scratchpage_mtx); 1402 1403 moea64_set_scratchpage_pa(0,src); 1404 moea64_set_scratchpage_pa(1,dst); 1405 1406 kcopy((void *)moea64_scratchpage_va[0], 1407 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1408 1409 mtx_unlock(&moea64_scratchpage_mtx); 1410 } 1411} 1412 1413void 1414moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1415{ 1416 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1417 1418 if (!moea64_initialized) 1419 panic("moea64_zero_page: can't zero pa %#" PRIxPTR, pa); 1420 if (size + off > PAGE_SIZE) 1421 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1422 1423 if (hw_direct_map) { 1424 bzero((caddr_t)pa + off, size); 1425 } else { 1426 mtx_lock(&moea64_scratchpage_mtx); 1427 moea64_set_scratchpage_pa(0,pa); 1428 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1429 mtx_unlock(&moea64_scratchpage_mtx); 1430 } 1431} 1432 1433/* 1434 * Zero a page of physical memory by temporarily mapping it 1435 */ 1436void 1437moea64_zero_page(mmu_t mmu, vm_page_t m) 1438{ 1439 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1440 vm_offset_t va, off; 1441 1442 if (!moea64_initialized) 1443 panic("moea64_zero_page: can't zero pa %#zx", pa); 1444 1445 if (!hw_direct_map) { 1446 mtx_lock(&moea64_scratchpage_mtx); 1447 1448 moea64_set_scratchpage_pa(0,pa); 1449 va = moea64_scratchpage_va[0]; 1450 } else { 1451 va = pa; 1452 } 1453 1454 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1455 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1456 1457 if (!hw_direct_map) 1458 mtx_unlock(&moea64_scratchpage_mtx); 1459} 1460 1461void 1462moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1463{ 1464 1465 moea64_zero_page(mmu, m); 1466} 1467 1468/* 1469 * Map the given physical page at the specified virtual address in the 1470 * target pmap with the protection requested. If specified the page 1471 * will be wired down. 1472 */ 1473void 1474moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1475 vm_prot_t prot, boolean_t wired) 1476{ 1477 1478 vm_page_lock_queues(); 1479 PMAP_LOCK(pmap); 1480 moea64_enter_locked(pmap, va, m, prot, wired); 1481 vm_page_unlock_queues(); 1482 PMAP_UNLOCK(pmap); 1483} 1484 1485/* 1486 * Map the given physical page at the specified virtual address in the 1487 * target pmap with the protection requested. If specified the page 1488 * will be wired down. 1489 * 1490 * The page queues and pmap must be locked. 1491 */ 1492 1493static void 1494moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1495 boolean_t wired) 1496{ 1497 struct pvo_head *pvo_head; 1498 uma_zone_t zone; 1499 vm_page_t pg; 1500 uint64_t pte_lo; 1501 u_int pvo_flags; 1502 int error; 1503 1504 if (!moea64_initialized) { 1505 pvo_head = &moea64_pvo_kunmanaged; 1506 pg = NULL; 1507 zone = moea64_upvo_zone; 1508 pvo_flags = 0; 1509 } else { 1510 pvo_head = vm_page_to_pvoh(m); 1511 pg = m; 1512 zone = moea64_mpvo_zone; 1513 pvo_flags = PVO_MANAGED; 1514 } 1515 1516 if (pmap_bootstrapped) 1517 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1518 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1519 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1520 (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1521 ("moea64_enter_locked: page %p is not busy", m)); 1522 1523 /* XXX change the pvo head for fake pages */ 1524 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1525 pvo_flags &= ~PVO_MANAGED; 1526 pvo_head = &moea64_pvo_kunmanaged; 1527 zone = moea64_upvo_zone; 1528 } 1529 1530 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m)); 1531 1532 if (prot & VM_PROT_WRITE) { 1533 pte_lo |= LPTE_BW; 1534 if (pmap_bootstrapped && 1535 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1536 vm_page_flag_set(m, PG_WRITEABLE); 1537 } else 1538 pte_lo |= LPTE_BR; 1539 1540 if (prot & VM_PROT_EXECUTE) 1541 pvo_flags |= VM_PROT_EXECUTE; 1542 1543 if (wired) 1544 pvo_flags |= PVO_WIRED; 1545 1546 if ((m->flags & PG_FICTITIOUS) != 0) 1547 pvo_flags |= PVO_FAKE; 1548 1549 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1550 pte_lo, pvo_flags); 1551 1552 /* 1553 * Flush the page from the instruction cache if this page is 1554 * mapped executable and cacheable. 1555 */ 1556 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1557 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1558 } 1559} 1560 1561static void 1562moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1563{ 1564 1565 /* 1566 * This is much trickier than on older systems because 1567 * we can't sync the icache on physical addresses directly 1568 * without a direct map. Instead we check a couple of cases 1569 * where the memory is already mapped in and, failing that, 1570 * use the same trick we use for page zeroing to create 1571 * a temporary mapping for this physical address. 1572 */ 1573 1574 if (!pmap_bootstrapped) { 1575 /* 1576 * If PMAP is not bootstrapped, we are likely to be 1577 * in real mode. 1578 */ 1579 __syncicache((void *)pa, sz); 1580 } else if (pmap == kernel_pmap) { 1581 __syncicache((void *)va, sz); 1582 } else if (hw_direct_map) { 1583 __syncicache((void *)pa, sz); 1584 } else { 1585 /* Use the scratch page to set up a temp mapping */ 1586 1587 mtx_lock(&moea64_scratchpage_mtx); 1588 1589 moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); 1590 __syncicache((void *)(moea64_scratchpage_va[1] + 1591 (va & ADDR_POFF)), sz); 1592 1593 mtx_unlock(&moea64_scratchpage_mtx); 1594 } 1595} 1596 1597/* 1598 * Maps a sequence of resident pages belonging to the same object. 1599 * The sequence begins with the given page m_start. This page is 1600 * mapped at the given virtual address start. Each subsequent page is 1601 * mapped at a virtual address that is offset from start by the same 1602 * amount as the page is offset from m_start within the object. The 1603 * last page in the sequence is the page with the largest offset from 1604 * m_start that can be mapped at a virtual address less than the given 1605 * virtual address end. Not every virtual page between start and end 1606 * is mapped; only those for which a resident page exists with the 1607 * corresponding offset from m_start are mapped. 1608 */ 1609void 1610moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1611 vm_page_t m_start, vm_prot_t prot) 1612{ 1613 vm_page_t m; 1614 vm_pindex_t diff, psize; 1615 1616 psize = atop(end - start); 1617 m = m_start; 1618 vm_page_lock_queues(); 1619 PMAP_LOCK(pm); 1620 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1621 moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1622 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1623 m = TAILQ_NEXT(m, listq); 1624 } 1625 vm_page_unlock_queues(); 1626 PMAP_UNLOCK(pm); 1627} 1628 1629void 1630moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1631 vm_prot_t prot) 1632{ 1633 1634 vm_page_lock_queues(); 1635 PMAP_LOCK(pm); 1636 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1637 FALSE); 1638 vm_page_unlock_queues(); 1639 PMAP_UNLOCK(pm); 1640} 1641 1642vm_paddr_t 1643moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1644{ 1645 struct pvo_entry *pvo; 1646 vm_paddr_t pa; 1647 1648 PMAP_LOCK(pm); 1649 pvo = moea64_pvo_find_va(pm, va); 1650 if (pvo == NULL) 1651 pa = 0; 1652 else 1653 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1654 (va - PVO_VADDR(pvo)); 1655 PMAP_UNLOCK(pm); 1656 return (pa); 1657} 1658 1659/* 1660 * Atomically extract and hold the physical page with the given 1661 * pmap and virtual address pair if that mapping permits the given 1662 * protection. 1663 */ 1664vm_page_t 1665moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1666{ 1667 struct pvo_entry *pvo; 1668 vm_page_t m; 1669 vm_paddr_t pa; 1670 1671 m = NULL; 1672 pa = 0; 1673 PMAP_LOCK(pmap); 1674retry: 1675 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1676 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1677 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1678 (prot & VM_PROT_WRITE) == 0)) { 1679 if (vm_page_pa_tryrelock(pmap, 1680 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1681 goto retry; 1682 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1683 vm_page_hold(m); 1684 } 1685 PA_UNLOCK_COND(pa); 1686 PMAP_UNLOCK(pmap); 1687 return (m); 1688} 1689 1690static void * 1691moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1692{ 1693 /* 1694 * This entire routine is a horrible hack to avoid bothering kmem 1695 * for new KVA addresses. Because this can get called from inside 1696 * kmem allocation routines, calling kmem for a new address here 1697 * can lead to multiply locking non-recursive mutexes. 1698 */ 1699 static vm_pindex_t color; 1700 vm_offset_t va; 1701 1702 vm_page_t m; 1703 int pflags, needed_lock; 1704 1705 *flags = UMA_SLAB_PRIV; 1706 needed_lock = !PMAP_LOCKED(kernel_pmap); 1707 1708 if (needed_lock) 1709 PMAP_LOCK(kernel_pmap); 1710 1711 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1712 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1713 else 1714 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1715 if (wait & M_ZERO) 1716 pflags |= VM_ALLOC_ZERO; 1717 1718 for (;;) { 1719 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1720 if (m == NULL) { 1721 if (wait & M_NOWAIT) 1722 return (NULL); 1723 VM_WAIT; 1724 } else 1725 break; 1726 } 1727 1728 va = VM_PAGE_TO_PHYS(m); 1729 1730 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1731 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1732 PVO_WIRED | PVO_BOOTSTRAP); 1733 1734 if (needed_lock) 1735 PMAP_UNLOCK(kernel_pmap); 1736 1737 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1738 bzero((void *)va, PAGE_SIZE); 1739 1740 return (void *)va; 1741} 1742 1743void 1744moea64_init(mmu_t mmu) 1745{ 1746 1747 CTR0(KTR_PMAP, "moea64_init"); 1748 1749 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1750 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1751 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1752 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1753 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1754 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1755 1756 if (!hw_direct_map) { 1757 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1758 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1759 } 1760 1761 moea64_initialized = TRUE; 1762} 1763 1764boolean_t 1765moea64_is_referenced(mmu_t mmu, vm_page_t m) 1766{ 1767 1768 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1769 ("moea64_is_referenced: page %p is not managed", m)); 1770 return (moea64_query_bit(m, PTE_REF)); 1771} 1772 1773boolean_t 1774moea64_is_modified(mmu_t mmu, vm_page_t m) 1775{ 1776 1777 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1778 ("moea64_is_modified: page %p is not managed", m)); 1779 1780 /* 1781 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1782 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1783 * is clear, no PTEs can have LPTE_CHG set. 1784 */ 1785 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1786 if ((m->oflags & VPO_BUSY) == 0 && 1787 (m->flags & PG_WRITEABLE) == 0) 1788 return (FALSE); 1789 return (moea64_query_bit(m, LPTE_CHG)); 1790} 1791 1792void 1793moea64_clear_reference(mmu_t mmu, vm_page_t m) 1794{ 1795 1796 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1797 ("moea64_clear_reference: page %p is not managed", m)); 1798 moea64_clear_bit(m, LPTE_REF); 1799} 1800 1801void 1802moea64_clear_modify(mmu_t mmu, vm_page_t m) 1803{ 1804 1805 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1806 ("moea64_clear_modify: page %p is not managed", m)); 1807 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1808 KASSERT((m->oflags & VPO_BUSY) == 0, 1809 ("moea64_clear_modify: page %p is busy", m)); 1810 1811 /* 1812 * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG 1813 * set. If the object containing the page is locked and the page is 1814 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1815 */ 1816 if ((m->flags & PG_WRITEABLE) == 0) 1817 return; 1818 moea64_clear_bit(m, LPTE_CHG); 1819} 1820 1821/* 1822 * Clear the write and modified bits in each of the given page's mappings. 1823 */ 1824void 1825moea64_remove_write(mmu_t mmu, vm_page_t m) 1826{ 1827 struct pvo_entry *pvo; 1828 struct lpte *pt; 1829 pmap_t pmap; 1830 uint64_t lo; 1831 1832 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1833 ("moea64_remove_write: page %p is not managed", m)); 1834 1835 /* 1836 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1837 * another thread while the object is locked. Thus, if PG_WRITEABLE 1838 * is clear, no page table entries need updating. 1839 */ 1840 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1841 if ((m->oflags & VPO_BUSY) == 0 && 1842 (m->flags & PG_WRITEABLE) == 0) 1843 return; 1844 vm_page_lock_queues(); 1845 lo = moea64_attr_fetch(m); 1846 SYNC(); 1847 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1848 pmap = pvo->pvo_pmap; 1849 PMAP_LOCK(pmap); 1850 LOCK_TABLE(); 1851 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1852 pt = moea64_pvo_to_pte(pvo); 1853 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1854 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1855 if (pt != NULL) { 1856 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1857 lo |= pvo->pvo_pte.lpte.pte_lo; 1858 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1859 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1860 pvo->pvo_vpn); 1861 if (pvo->pvo_pmap == kernel_pmap) 1862 isync(); 1863 } 1864 } 1865 UNLOCK_TABLE(); 1866 PMAP_UNLOCK(pmap); 1867 } 1868 if ((lo & LPTE_CHG) != 0) { 1869 moea64_attr_clear(m, LPTE_CHG); 1870 vm_page_dirty(m); 1871 } 1872 vm_page_flag_clear(m, PG_WRITEABLE); 1873 vm_page_unlock_queues(); 1874} 1875 1876/* 1877 * moea64_ts_referenced: 1878 * 1879 * Return a count of reference bits for a page, clearing those bits. 1880 * It is not necessary for every reference bit to be cleared, but it 1881 * is necessary that 0 only be returned when there are truly no 1882 * reference bits set. 1883 * 1884 * XXX: The exact number of bits to check and clear is a matter that 1885 * should be tested and standardized at some point in the future for 1886 * optimal aging of shared pages. 1887 */ 1888boolean_t 1889moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1890{ 1891 1892 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1893 ("moea64_ts_referenced: page %p is not managed", m)); 1894 return (moea64_clear_bit(m, LPTE_REF)); 1895} 1896 1897/* 1898 * Map a wired page into kernel virtual address space. 1899 */ 1900void 1901moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1902{ 1903 uint64_t pte_lo; 1904 int error; 1905 1906#if 0 1907 if (!pmap_bootstrapped) { 1908 if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end) 1909 panic("Trying to enter an address in KVA -- %#" 1910 PRIxPTR "!\n",pa); 1911 } 1912#endif 1913 1914 pte_lo = moea64_calc_wimg(pa); 1915 1916 PMAP_LOCK(kernel_pmap); 1917 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1918 &moea64_pvo_kunmanaged, va, pa, pte_lo, 1919 PVO_WIRED | VM_PROT_EXECUTE); 1920 1921 if (error != 0 && error != ENOENT) 1922 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1923 pa, error); 1924 1925 /* 1926 * Flush the memory from the instruction cache. 1927 */ 1928 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1929 __syncicache((void *)va, PAGE_SIZE); 1930 } 1931 PMAP_UNLOCK(kernel_pmap); 1932} 1933 1934/* 1935 * Extract the physical page address associated with the given kernel virtual 1936 * address. 1937 */ 1938vm_offset_t 1939moea64_kextract(mmu_t mmu, vm_offset_t va) 1940{ 1941 struct pvo_entry *pvo; 1942 vm_paddr_t pa; 1943 1944 /* 1945 * Shortcut the direct-mapped case when applicable. We never put 1946 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1947 */ 1948 if (va < VM_MIN_KERNEL_ADDRESS) 1949 return (va); 1950 1951 PMAP_LOCK(kernel_pmap); 1952 pvo = moea64_pvo_find_va(kernel_pmap, va); 1953 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1954 va)); 1955 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) + (va - PVO_VADDR(pvo)); 1956 PMAP_UNLOCK(kernel_pmap); 1957 return (pa); 1958} 1959 1960/* 1961 * Remove a wired page from kernel virtual address space. 1962 */ 1963void 1964moea64_kremove(mmu_t mmu, vm_offset_t va) 1965{ 1966 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1967} 1968 1969/* 1970 * Map a range of physical addresses into kernel virtual address space. 1971 * 1972 * The value passed in *virt is a suggested virtual address for the mapping. 1973 * Architectures which can support a direct-mapped physical to virtual region 1974 * can return the appropriate address within that region, leaving '*virt' 1975 * unchanged. We cannot and therefore do not; *virt is updated with the 1976 * first usable address after the mapped region. 1977 */ 1978vm_offset_t 1979moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1980 vm_offset_t pa_end, int prot) 1981{ 1982 vm_offset_t sva, va; 1983 1984 sva = *virt; 1985 va = sva; 1986 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1987 moea64_kenter(mmu, va, pa_start); 1988 *virt = va; 1989 1990 return (sva); 1991} 1992 1993/* 1994 * Returns true if the pmap's pv is one of the first 1995 * 16 pvs linked to from this page. This count may 1996 * be changed upwards or downwards in the future; it 1997 * is only necessary that true be returned for a small 1998 * subset of pmaps for proper page aging. 1999 */ 2000boolean_t 2001moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2002{ 2003 int loops; 2004 struct pvo_entry *pvo; 2005 boolean_t rv; 2006 2007 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2008 ("moea64_page_exists_quick: page %p is not managed", m)); 2009 loops = 0; 2010 rv = FALSE; 2011 vm_page_lock_queues(); 2012 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2013 if (pvo->pvo_pmap == pmap) { 2014 rv = TRUE; 2015 break; 2016 } 2017 if (++loops >= 16) 2018 break; 2019 } 2020 vm_page_unlock_queues(); 2021 return (rv); 2022} 2023 2024/* 2025 * Return the number of managed mappings to the given physical page 2026 * that are wired. 2027 */ 2028int 2029moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 2030{ 2031 struct pvo_entry *pvo; 2032 int count; 2033 2034 count = 0; 2035 if ((m->flags & PG_FICTITIOUS) != 0) 2036 return (count); 2037 vm_page_lock_queues(); 2038 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 2039 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 2040 count++; 2041 vm_page_unlock_queues(); 2042 return (count); 2043} 2044 2045static uintptr_t moea64_vsidcontext; 2046 2047uintptr_t 2048moea64_get_unique_vsid(void) { 2049 u_int entropy; 2050 register_t hash; 2051 uint32_t mask; 2052 int i; 2053 2054 entropy = 0; 2055 __asm __volatile("mftb %0" : "=r"(entropy)); 2056 2057 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 2058 u_int n; 2059 2060 /* 2061 * Create a new value by mutiplying by a prime and adding in 2062 * entropy from the timebase register. This is to make the 2063 * VSID more random so that the PT hash function collides 2064 * less often. (Note that the prime casues gcc to do shifts 2065 * instead of a multiply.) 2066 */ 2067 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 2068 hash = moea64_vsidcontext & (NVSIDS - 1); 2069 if (hash == 0) /* 0 is special, avoid it */ 2070 continue; 2071 n = hash >> 5; 2072 mask = 1 << (hash & (VSID_NBPW - 1)); 2073 hash = (moea64_vsidcontext & VSID_HASHMASK); 2074 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 2075 /* anything free in this bucket? */ 2076 if (moea64_vsid_bitmap[n] == 0xffffffff) { 2077 entropy = (moea64_vsidcontext >> 20); 2078 continue; 2079 } 2080 i = ffs(~moea64_vsid_bitmap[i]) - 1; 2081 mask = 1 << i; 2082 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 2083 hash |= i; 2084 } 2085 moea64_vsid_bitmap[n] |= mask; 2086 return (hash); 2087 } 2088 2089 panic("%s: out of segments",__func__); 2090} 2091 2092#ifdef __powerpc64__ 2093void 2094moea64_pinit(mmu_t mmu, pmap_t pmap) 2095{ 2096 PMAP_LOCK_INIT(pmap); 2097 2098 SPLAY_INIT(&pmap->pm_slbtree); 2099 pmap->pm_slb = slb_alloc_user_cache(); 2100} 2101#else 2102void 2103moea64_pinit(mmu_t mmu, pmap_t pmap) 2104{ 2105 int i; 2106 register_t hash; 2107 2108 PMAP_LOCK_INIT(pmap); 2109 2110 if (pmap_bootstrapped) 2111 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 2112 (vm_offset_t)pmap); 2113 else 2114 pmap->pmap_phys = pmap; 2115 2116 /* 2117 * Allocate some segment registers for this pmap. 2118 */ 2119 hash = moea64_get_unique_vsid(); 2120 2121 for (i = 0; i < 16; i++) 2122 pmap->pm_sr[i] = VSID_MAKE(i, hash); 2123} 2124#endif 2125 2126/* 2127 * Initialize the pmap associated with process 0. 2128 */ 2129void 2130moea64_pinit0(mmu_t mmu, pmap_t pm) 2131{ 2132 moea64_pinit(mmu, pm); 2133 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 2134} 2135 2136/* 2137 * Set the physical protection on the specified range of this map as requested. 2138 */ 2139void 2140moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 2141 vm_prot_t prot) 2142{ 2143 struct pvo_entry *pvo; 2144 struct lpte *pt; 2145 2146 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 2147 eva, prot); 2148 2149 2150 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 2151 ("moea64_protect: non current pmap")); 2152 2153 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2154 moea64_remove(mmu, pm, sva, eva); 2155 return; 2156 } 2157 2158 vm_page_lock_queues(); 2159 PMAP_LOCK(pm); 2160 for (; sva < eva; sva += PAGE_SIZE) { 2161 pvo = moea64_pvo_find_va(pm, sva); 2162 if (pvo == NULL) 2163 continue; 2164 2165 /* 2166 * Grab the PTE pointer before we diddle with the cached PTE 2167 * copy. 2168 */ 2169 LOCK_TABLE(); 2170 pt = moea64_pvo_to_pte(pvo); 2171 2172 /* 2173 * Change the protection of the page. 2174 */ 2175 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 2176 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 2177 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 2178 if ((prot & VM_PROT_EXECUTE) == 0) 2179 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 2180 2181 /* 2182 * If the PVO is in the page table, update that pte as well. 2183 */ 2184 if (pt != NULL) { 2185 moea64_pte_change(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2186 if ((pvo->pvo_pte.lpte.pte_lo & 2187 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 2188 moea64_syncicache(pm, sva, 2189 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 2190 PAGE_SIZE); 2191 } 2192 } 2193 UNLOCK_TABLE(); 2194 } 2195 vm_page_unlock_queues(); 2196 PMAP_UNLOCK(pm); 2197} 2198 2199/* 2200 * Map a list of wired pages into kernel virtual address space. This is 2201 * intended for temporary mappings which do not need page modification or 2202 * references recorded. Existing mappings in the region are overwritten. 2203 */ 2204void 2205moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2206{ 2207 while (count-- > 0) { 2208 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2209 va += PAGE_SIZE; 2210 m++; 2211 } 2212} 2213 2214/* 2215 * Remove page mappings from kernel virtual address space. Intended for 2216 * temporary mappings entered by moea64_qenter. 2217 */ 2218void 2219moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2220{ 2221 while (count-- > 0) { 2222 moea64_kremove(mmu, va); 2223 va += PAGE_SIZE; 2224 } 2225} 2226 2227void 2228moea64_release_vsid(uint64_t vsid) 2229{ 2230 int idx, mask; 2231 2232 idx = vsid & (NVSIDS-1); 2233 mask = 1 << (idx % VSID_NBPW); 2234 idx /= VSID_NBPW; 2235 moea64_vsid_bitmap[idx] &= ~mask; 2236} 2237 2238 2239void 2240moea64_release(mmu_t mmu, pmap_t pmap) 2241{ 2242 2243 /* 2244 * Free segment registers' VSIDs 2245 */ 2246 #ifdef __powerpc64__ 2247 free_vsids(pmap); 2248 slb_free_user_cache(pmap->pm_slb); 2249 #else 2250 if (pmap->pm_sr[0] == 0) 2251 panic("moea64_release"); 2252 2253 moea64_release_vsid(pmap->pm_sr[0]); 2254 #endif 2255 2256 PMAP_LOCK_DESTROY(pmap); 2257} 2258 2259/* 2260 * Remove the given range of addresses from the specified map. 2261 */ 2262void 2263moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2264{ 2265 struct pvo_entry *pvo; 2266 2267 vm_page_lock_queues(); 2268 PMAP_LOCK(pm); 2269 for (; sva < eva; sva += PAGE_SIZE) { 2270 pvo = moea64_pvo_find_va(pm, sva); 2271 if (pvo != NULL) 2272 moea64_pvo_remove(pvo); 2273 } 2274 vm_page_unlock_queues(); 2275 PMAP_UNLOCK(pm); 2276} 2277 2278/* 2279 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2280 * will reflect changes in pte's back to the vm_page. 2281 */ 2282void 2283moea64_remove_all(mmu_t mmu, vm_page_t m) 2284{ 2285 struct pvo_head *pvo_head; 2286 struct pvo_entry *pvo, *next_pvo; 2287 pmap_t pmap; 2288 2289 vm_page_lock_queues(); 2290 pvo_head = vm_page_to_pvoh(m); 2291 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2292 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2293 2294 MOEA_PVO_CHECK(pvo); /* sanity check */ 2295 pmap = pvo->pvo_pmap; 2296 PMAP_LOCK(pmap); 2297 moea64_pvo_remove(pvo); 2298 PMAP_UNLOCK(pmap); 2299 } 2300 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 2301 moea64_attr_clear(m, LPTE_CHG); 2302 vm_page_dirty(m); 2303 } 2304 vm_page_flag_clear(m, PG_WRITEABLE); 2305 vm_page_unlock_queues(); 2306} 2307 2308/* 2309 * Allocate a physical page of memory directly from the phys_avail map. 2310 * Can only be called from moea64_bootstrap before avail start and end are 2311 * calculated. 2312 */ 2313static vm_offset_t 2314moea64_bootstrap_alloc(vm_size_t size, u_int align) 2315{ 2316 vm_offset_t s, e; 2317 int i, j; 2318 2319 size = round_page(size); 2320 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2321 if (align != 0) 2322 s = (phys_avail[i] + align - 1) & ~(align - 1); 2323 else 2324 s = phys_avail[i]; 2325 e = s + size; 2326 2327 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2328 continue; 2329 2330 if (s == phys_avail[i]) { 2331 phys_avail[i] += size; 2332 } else if (e == phys_avail[i + 1]) { 2333 phys_avail[i + 1] -= size; 2334 } else { 2335 for (j = phys_avail_count * 2; j > i; j -= 2) { 2336 phys_avail[j] = phys_avail[j - 2]; 2337 phys_avail[j + 1] = phys_avail[j - 1]; 2338 } 2339 2340 phys_avail[i + 3] = phys_avail[i + 1]; 2341 phys_avail[i + 1] = s; 2342 phys_avail[i + 2] = e; 2343 phys_avail_count++; 2344 } 2345 2346 return (s); 2347 } 2348 panic("moea64_bootstrap_alloc: could not allocate memory"); 2349} 2350 2351static void 2352tlbia(void) 2353{ 2354 vm_offset_t i; 2355 #ifndef __powerpc64__ 2356 register_t msr, scratch; 2357 #endif 2358 2359 TLBSYNC(); 2360 2361 for (i = 0; i < 0xFF000; i += 0x00001000) { 2362 #ifdef __powerpc64__ 2363 __asm __volatile("tlbiel %0" :: "r"(i)); 2364 #else 2365 __asm __volatile("\ 2366 mfmsr %0; \ 2367 mr %1, %0; \ 2368 insrdi %1,%3,1,0; \ 2369 mtmsrd %1; \ 2370 isync; \ 2371 \ 2372 tlbiel %2; \ 2373 \ 2374 mtmsrd %0; \ 2375 isync;" 2376 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 2377 #endif 2378 } 2379 2380 EIEIO(); 2381 TLBSYNC(); 2382} 2383 2384#ifdef __powerpc64__ 2385static void 2386slbia(void) 2387{ 2388 register_t seg0; 2389 2390 __asm __volatile ("slbia"); 2391 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0)); 2392} 2393#endif 2394 2395static int 2396moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 2397 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 2398{ 2399 struct pvo_entry *pvo; 2400 uint64_t vsid; 2401 int first; 2402 u_int ptegidx; 2403 int i; 2404 int bootstrap; 2405 2406 /* 2407 * One nasty thing that can happen here is that the UMA calls to 2408 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2409 * which calls UMA... 2410 * 2411 * We break the loop by detecting recursion and allocating out of 2412 * the bootstrap pool. 2413 */ 2414 2415 moea64_pvo_enter_calls++; 2416 first = 0; 2417 bootstrap = (flags & PVO_BOOTSTRAP); 2418 2419 if (!moea64_initialized) 2420 bootstrap = 1; 2421 2422 /* 2423 * Compute the PTE Group index. 2424 */ 2425 va &= ~ADDR_POFF; 2426 vsid = va_to_vsid(pm, va); 2427 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2428 2429 /* 2430 * Remove any existing mapping for this page. Reuse the pvo entry if 2431 * there is a mapping. 2432 */ 2433 LOCK_TABLE(); 2434 2435 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2436 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2437 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2438 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2439 (pte_lo & LPTE_PP)) { 2440 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2441 /* Re-insert if spilled */ 2442 i = moea64_pte_insert(ptegidx, 2443 &pvo->pvo_pte.lpte); 2444 if (i >= 0) 2445 PVO_PTEGIDX_SET(pvo, i); 2446 moea64_pte_overflow--; 2447 } 2448 UNLOCK_TABLE(); 2449 return (0); 2450 } 2451 moea64_pvo_remove(pvo); 2452 break; 2453 } 2454 } 2455 2456 /* 2457 * If we aren't overwriting a mapping, try to allocate. 2458 */ 2459 if (bootstrap) { 2460 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2461 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2462 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2463 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2464 } 2465 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2466 moea64_bpvo_pool_index++; 2467 bootstrap = 1; 2468 } else { 2469 /* 2470 * Note: drop the table lock around the UMA allocation in 2471 * case the UMA allocator needs to manipulate the page 2472 * table. The mapping we are working with is already 2473 * protected by the PMAP lock. 2474 */ 2475 UNLOCK_TABLE(); 2476 pvo = uma_zalloc(zone, M_NOWAIT); 2477 LOCK_TABLE(); 2478 } 2479 2480 if (pvo == NULL) { 2481 UNLOCK_TABLE(); 2482 return (ENOMEM); 2483 } 2484 2485 moea64_pvo_entries++; 2486 pvo->pvo_vaddr = va; 2487 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2488 | (vsid << 16); 2489 pvo->pvo_pmap = pm; 2490 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2491 pvo->pvo_vaddr &= ~ADDR_POFF; 2492 2493 if (!(flags & VM_PROT_EXECUTE)) 2494 pte_lo |= LPTE_NOEXEC; 2495 if (flags & PVO_WIRED) 2496 pvo->pvo_vaddr |= PVO_WIRED; 2497 if (pvo_head != &moea64_pvo_kunmanaged) 2498 pvo->pvo_vaddr |= PVO_MANAGED; 2499 if (bootstrap) 2500 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2501 if (flags & PVO_FAKE) 2502 pvo->pvo_vaddr |= PVO_FAKE; 2503 if (flags & PVO_LARGE) 2504 pvo->pvo_vaddr |= PVO_LARGE; 2505 2506 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2507 (uint64_t)(pa) | pte_lo, flags); 2508 2509 /* 2510 * Remember if the list was empty and therefore will be the first 2511 * item. 2512 */ 2513 if (LIST_FIRST(pvo_head) == NULL) 2514 first = 1; 2515 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2516 2517 if (pvo->pvo_vaddr & PVO_WIRED) { 2518 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2519 pm->pm_stats.wired_count++; 2520 } 2521 pm->pm_stats.resident_count++; 2522 2523 /* 2524 * We hope this succeeds but it isn't required. 2525 */ 2526 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2527 if (i >= 0) { 2528 PVO_PTEGIDX_SET(pvo, i); 2529 } else { 2530 panic("moea64_pvo_enter: overflow"); 2531 moea64_pte_overflow++; 2532 } 2533 2534 if (pm == kernel_pmap) 2535 isync(); 2536 2537 UNLOCK_TABLE(); 2538 2539#ifdef __powerpc64__ 2540 /* 2541 * Make sure all our bootstrap mappings are in the SLB as soon 2542 * as virtual memory is switched on. 2543 */ 2544 if (!pmap_bootstrapped) 2545 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2546#endif 2547 2548 return (first ? ENOENT : 0); 2549} 2550 2551static void 2552moea64_pvo_remove(struct pvo_entry *pvo) 2553{ 2554 struct lpte *pt; 2555 2556 /* 2557 * If there is an active pte entry, we need to deactivate it (and 2558 * save the ref & cfg bits). 2559 */ 2560 LOCK_TABLE(); 2561 pt = moea64_pvo_to_pte(pvo); 2562 if (pt != NULL) { 2563 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2564 PVO_PTEGIDX_CLR(pvo); 2565 } else { 2566 moea64_pte_overflow--; 2567 } 2568 2569 /* 2570 * Update our statistics. 2571 */ 2572 pvo->pvo_pmap->pm_stats.resident_count--; 2573 if (pvo->pvo_vaddr & PVO_WIRED) 2574 pvo->pvo_pmap->pm_stats.wired_count--; 2575 2576 /* 2577 * Save the REF/CHG bits into their cache if the page is managed. 2578 */ 2579 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2580 struct vm_page *pg; 2581 2582 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2583 if (pg != NULL) { 2584 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2585 (LPTE_REF | LPTE_CHG)); 2586 } 2587 } 2588 2589 /* 2590 * Remove this PVO from the PV list. 2591 */ 2592 LIST_REMOVE(pvo, pvo_vlink); 2593 2594 /* 2595 * Remove this from the overflow list and return it to the pool 2596 * if we aren't going to reuse it. 2597 */ 2598 LIST_REMOVE(pvo, pvo_olink); 2599 UNLOCK_TABLE(); 2600 2601 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2602 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2603 moea64_upvo_zone, pvo); 2604 2605 moea64_pvo_entries--; 2606 moea64_pvo_remove_calls++; 2607} 2608 2609static struct pvo_entry * 2610moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2611{ 2612 struct pvo_entry *pvo; 2613 int ptegidx; 2614 uint64_t vsid; 2615 #ifdef __powerpc64__ 2616 struct slb slb; 2617 2618 /* The page is not mapped if the segment isn't */ 2619 if (va_to_slb_entry(pm, va, &slb) != 0) 2620 return NULL; 2621 2622 vsid = (slb.slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 2623 if (slb.slbv & SLBV_L) 2624 va &= ~moea64_large_page_mask; 2625 else 2626 va &= ~ADDR_POFF; 2627 ptegidx = va_to_pteg(vsid, va, slb.slbv & SLBV_L); 2628 #else 2629 va &= ~ADDR_POFF; 2630 vsid = va_to_vsid(pm, va); 2631 ptegidx = va_to_pteg(vsid, va, 0); 2632 #endif 2633 2634 LOCK_TABLE(); 2635 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2636 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) 2637 break; 2638 } 2639 UNLOCK_TABLE(); 2640 2641 return (pvo); 2642} 2643 2644static struct lpte * 2645moea64_pvo_to_pte(const struct pvo_entry *pvo) 2646{ 2647 struct lpte *pt; 2648 int pteidx, ptegidx; 2649 uint64_t vsid; 2650 2651 ASSERT_TABLE_LOCK(); 2652 2653 /* If the PTEG index is not set, then there is no page table entry */ 2654 if (!PVO_PTEGIDX_ISSET(pvo)) 2655 return (NULL); 2656 2657 /* 2658 * Calculate the ptegidx 2659 */ 2660 vsid = PVO_VSID(pvo); 2661 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 2662 pvo->pvo_vaddr & PVO_LARGE); 2663 2664 /* 2665 * We can find the actual pte entry without searching by grabbing 2666 * the PTEG index from 3 unused bits in pvo_vaddr and by 2667 * noticing the HID bit. 2668 */ 2669 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2670 ptegidx ^= moea64_pteg_mask; 2671 2672 pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo); 2673 2674 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2675 !PVO_PTEGIDX_ISSET(pvo)) { 2676 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2677 "valid pte index", pvo); 2678 } 2679 2680 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2681 PVO_PTEGIDX_ISSET(pvo)) { 2682 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2683 "pvo but no valid pte", pvo); 2684 } 2685 2686 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2687 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2688 LPTE_VALID) { 2689 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2690 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2691 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2692 } 2693 2694 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2695 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) { 2696 panic("moea64_pvo_to_pte: pvo %p pte does not match " 2697 "pte %p in moea64_pteg_table difference is %#x", 2698 pvo, pt, 2699 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2700 } 2701 2702 return (pt); 2703 } 2704 2705 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2706 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2707 "moea64_pteg_table but valid in pvo", pvo, pt); 2708 } 2709 2710 return (NULL); 2711} 2712 2713static __inline int 2714moea64_pte_spillable_ident(u_int ptegidx) 2715{ 2716 struct lpte *pt; 2717 int i, j, k; 2718 2719 /* Start at a random slot */ 2720 i = mftb() % 8; 2721 k = -1; 2722 for (j = 0; j < 8; j++) { 2723 pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8]; 2724 if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED)) 2725 continue; 2726 2727 /* This is a candidate, so remember it */ 2728 k = (i + j) % 8; 2729 2730 /* Try to get a page that has not been used lately */ 2731 if (!(pt->pte_lo & LPTE_REF)) 2732 return (k); 2733 } 2734 2735 return (k); 2736} 2737 2738static int 2739moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2740{ 2741 struct lpte *pt; 2742 struct pvo_entry *pvo; 2743 u_int pteg_bktidx; 2744 int i; 2745 2746 ASSERT_TABLE_LOCK(); 2747 2748 /* 2749 * First try primary hash. 2750 */ 2751 pteg_bktidx = ptegidx; 2752 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 2753 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 2754 pvo_pt->pte_hi &= ~LPTE_HID; 2755 moea64_pte_set(pt, pvo_pt); 2756 return (i); 2757 } 2758 } 2759 2760 /* 2761 * Now try secondary hash. 2762 */ 2763 pteg_bktidx ^= moea64_pteg_mask; 2764 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 2765 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 2766 pvo_pt->pte_hi |= LPTE_HID; 2767 moea64_pte_set(pt, pvo_pt); 2768 return (i); 2769 } 2770 } 2771 2772 /* 2773 * Out of luck. Find a PTE to sacrifice. 2774 */ 2775 pteg_bktidx = ptegidx; 2776 i = moea64_pte_spillable_ident(pteg_bktidx); 2777 if (i < 0) { 2778 pteg_bktidx ^= moea64_pteg_mask; 2779 i = moea64_pte_spillable_ident(pteg_bktidx); 2780 } 2781 2782 if (i < 0) { 2783 /* No freeable slots in either PTEG? We're hosed. */ 2784 panic("moea64_pte_insert: overflow"); 2785 return (-1); 2786 } 2787 2788 if (pteg_bktidx == ptegidx) 2789 pvo_pt->pte_hi &= ~LPTE_HID; 2790 else 2791 pvo_pt->pte_hi |= LPTE_HID; 2792 2793 /* 2794 * Synchronize the sacrifice PTE with its PVO, then mark both 2795 * invalid. The PVO will be reused when/if the VM system comes 2796 * here after a fault. 2797 */ 2798 pt = &moea64_pteg_table[pteg_bktidx].pt[i]; 2799 2800 if (pt->pte_hi & LPTE_HID) 2801 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */ 2802 2803 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) { 2804 if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) { 2805 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID, 2806 ("Invalid PVO for valid PTE!")); 2807 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2808 PVO_PTEGIDX_CLR(pvo); 2809 moea64_pte_overflow++; 2810 break; 2811 } 2812 } 2813 2814 KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi, 2815 ("Unable to find PVO for spilled PTE")); 2816 2817 /* 2818 * Set the new PTE. 2819 */ 2820 moea64_pte_set(pt, pvo_pt); 2821 2822 return (i); 2823} 2824 2825static boolean_t 2826moea64_query_bit(vm_page_t m, u_int64_t ptebit) 2827{ 2828 struct pvo_entry *pvo; 2829 struct lpte *pt; 2830 2831 if (moea64_attr_fetch(m) & ptebit) 2832 return (TRUE); 2833 2834 vm_page_lock_queues(); 2835 2836 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2837 MOEA_PVO_CHECK(pvo); /* sanity check */ 2838 2839 /* 2840 * See if we saved the bit off. If so, cache it and return 2841 * success. 2842 */ 2843 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2844 moea64_attr_save(m, ptebit); 2845 MOEA_PVO_CHECK(pvo); /* sanity check */ 2846 vm_page_unlock_queues(); 2847 return (TRUE); 2848 } 2849 } 2850 2851 /* 2852 * No luck, now go through the hard part of looking at the PTEs 2853 * themselves. Sync so that any pending REF/CHG bits are flushed to 2854 * the PTEs. 2855 */ 2856 SYNC(); 2857 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2858 MOEA_PVO_CHECK(pvo); /* sanity check */ 2859 2860 /* 2861 * See if this pvo has a valid PTE. if so, fetch the 2862 * REF/CHG bits from the valid PTE. If the appropriate 2863 * ptebit is set, cache it and return success. 2864 */ 2865 LOCK_TABLE(); 2866 pt = moea64_pvo_to_pte(pvo); 2867 if (pt != NULL) { 2868 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2869 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2870 UNLOCK_TABLE(); 2871 2872 moea64_attr_save(m, ptebit); 2873 MOEA_PVO_CHECK(pvo); /* sanity check */ 2874 vm_page_unlock_queues(); 2875 return (TRUE); 2876 } 2877 } 2878 UNLOCK_TABLE(); 2879 } 2880 2881 vm_page_unlock_queues(); 2882 return (FALSE); 2883} 2884 2885static u_int 2886moea64_clear_bit(vm_page_t m, u_int64_t ptebit) 2887{ 2888 u_int count; 2889 struct pvo_entry *pvo; 2890 struct lpte *pt; 2891 2892 vm_page_lock_queues(); 2893 2894 /* 2895 * Clear the cached value. 2896 */ 2897 moea64_attr_clear(m, ptebit); 2898 2899 /* 2900 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2901 * we can reset the right ones). note that since the pvo entries and 2902 * list heads are accessed via BAT0 and are never placed in the page 2903 * table, we don't have to worry about further accesses setting the 2904 * REF/CHG bits. 2905 */ 2906 SYNC(); 2907 2908 /* 2909 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2910 * valid pte clear the ptebit from the valid pte. 2911 */ 2912 count = 0; 2913 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2914 MOEA_PVO_CHECK(pvo); /* sanity check */ 2915 2916 LOCK_TABLE(); 2917 pt = moea64_pvo_to_pte(pvo); 2918 if (pt != NULL) { 2919 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2920 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2921 count++; 2922 moea64_pte_clear(pt, pvo->pvo_vpn, ptebit); 2923 } 2924 } 2925 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2926 MOEA_PVO_CHECK(pvo); /* sanity check */ 2927 UNLOCK_TABLE(); 2928 } 2929 2930 vm_page_unlock_queues(); 2931 return (count); 2932} 2933 2934boolean_t 2935moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2936{ 2937 struct pvo_entry *pvo; 2938 vm_offset_t ppa; 2939 int error = 0; 2940 2941 PMAP_LOCK(kernel_pmap); 2942 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2943 pvo = moea64_pvo_find_va(kernel_pmap, ppa); 2944 if (pvo == NULL || 2945 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2946 error = EFAULT; 2947 break; 2948 } 2949 } 2950 PMAP_UNLOCK(kernel_pmap); 2951 2952 return (error); 2953} 2954 2955/* 2956 * Map a set of physical memory pages into the kernel virtual 2957 * address space. Return a pointer to where it is mapped. This 2958 * routine is intended to be used for mapping device memory, 2959 * NOT real memory. 2960 */ 2961void * 2962moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2963{ 2964 vm_offset_t va, tmpva, ppa, offset; 2965 2966 ppa = trunc_page(pa); 2967 offset = pa & PAGE_MASK; 2968 size = roundup(offset + size, PAGE_SIZE); 2969 2970 va = kmem_alloc_nofault(kernel_map, size); 2971 2972 if (!va) 2973 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2974 2975 for (tmpva = va; size > 0;) { 2976 moea64_kenter(mmu, tmpva, ppa); 2977 size -= PAGE_SIZE; 2978 tmpva += PAGE_SIZE; 2979 ppa += PAGE_SIZE; 2980 } 2981 2982 return ((void *)(va + offset)); 2983} 2984 2985void 2986moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2987{ 2988 vm_offset_t base, offset; 2989 2990 base = trunc_page(va); 2991 offset = va & PAGE_MASK; 2992 size = roundup(offset + size, PAGE_SIZE); 2993 2994 kmem_free(kernel_map, base, size); 2995} 2996 2997static void 2998moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2999{ 3000 struct pvo_entry *pvo; 3001 vm_offset_t lim; 3002 vm_paddr_t pa; 3003 vm_size_t len; 3004 3005 PMAP_LOCK(pm); 3006 while (sz > 0) { 3007 lim = round_page(va); 3008 len = MIN(lim - va, sz); 3009 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 3010 if (pvo != NULL) { 3011 pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) | 3012 (va & ADDR_POFF); 3013 moea64_syncicache(pm, va, pa, len); 3014 } 3015 va += len; 3016 sz -= len; 3017 } 3018 PMAP_UNLOCK(pm); 3019} 3020