mmu_oea64.c revision 212331
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 212331 2010-09-08 19:28:43Z nwhitehorn $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_kstack_pages.h" 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <sys/kdb.h> 131 132#include <dev/ofw/openfirm.h> 133 134#include <vm/vm.h> 135#include <vm/vm_param.h> 136#include <vm/vm_kern.h> 137#include <vm/vm_page.h> 138#include <vm/vm_map.h> 139#include <vm/vm_object.h> 140#include <vm/vm_extern.h> 141#include <vm/vm_pageout.h> 142#include <vm/vm_pager.h> 143#include <vm/uma.h> 144 145#include <machine/_inttypes.h> 146#include <machine/cpu.h> 147#include <machine/platform.h> 148#include <machine/frame.h> 149#include <machine/md_var.h> 150#include <machine/psl.h> 151#include <machine/bat.h> 152#include <machine/hid.h> 153#include <machine/pte.h> 154#include <machine/sr.h> 155#include <machine/trap.h> 156#include <machine/mmuvar.h> 157 158#include "mmu_if.h" 159 160#define MOEA_DEBUG 161 162#define TODO panic("%s: not implemented", __func__); 163void moea64_release_vsid(uint64_t vsid); 164uintptr_t moea64_get_unique_vsid(void); 165 166static __inline register_t 167cntlzd(volatile register_t a) { 168 register_t b; 169 __asm ("cntlzd %0, %1" : "=r"(b) : "r"(a)); 170 return b; 171} 172 173#define PTESYNC() __asm __volatile("ptesync"); 174#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 175#define SYNC() __asm __volatile("sync"); 176#define EIEIO() __asm __volatile("eieio"); 177 178/* 179 * The tlbie instruction must be executed in 64-bit mode 180 * so we have to twiddle MSR[SF] around every invocation. 181 * Just to add to the fun, exceptions must be off as well 182 * so that we can't trap in 64-bit mode. What a pain. 183 */ 184struct mtx tlbie_mutex; 185 186static __inline void 187TLBIE(uint64_t vpn) { 188#ifndef __powerpc64__ 189 register_t vpn_hi, vpn_lo; 190 register_t msr; 191 register_t scratch; 192#endif 193 194 vpn <<= ADDR_PIDX_SHFT; 195 vpn &= ~(0xffffULL << 48); 196 197 mtx_lock_spin(&tlbie_mutex); 198#ifdef __powerpc64__ 199 __asm __volatile("\ 200 ptesync; \ 201 tlbie %0; \ 202 eieio; \ 203 tlbsync; \ 204 ptesync;" 205 :: "r"(vpn) : "memory"); 206#else 207 vpn_hi = (uint32_t)(vpn >> 32); 208 vpn_lo = (uint32_t)vpn; 209 210 __asm __volatile("\ 211 mfmsr %0; \ 212 mr %1, %0; \ 213 insrdi %1,%5,1,0; \ 214 mtmsrd %1; \ 215 ptesync; \ 216 \ 217 sld %1,%2,%4; \ 218 or %1,%1,%3; \ 219 tlbie %1; \ 220 \ 221 mtmsrd %0; \ 222 eieio; \ 223 tlbsync; \ 224 ptesync;" 225 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 226 : "memory"); 227#endif 228 mtx_unlock_spin(&tlbie_mutex); 229} 230 231#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 232#define ENABLE_TRANS(msr) mtmsr(msr); isync() 233 234#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 235#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 236#define VSID_HASH_MASK 0x0000007fffffffffULL 237 238#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 239#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 240#define PVO_WIRED 0x010UL /* PVO entry is wired */ 241#define PVO_MANAGED 0x020UL /* PVO entry is managed */ 242#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 243 bootstrap */ 244#define PVO_FAKE 0x100UL /* fictitious phys page */ 245#define PVO_LARGE 0x200UL /* large page */ 246#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 247#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 248#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 249#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 250#define PVO_PTEGIDX_CLR(pvo) \ 251 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 252#define PVO_PTEGIDX_SET(pvo, i) \ 253 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 254#define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16) 255 256#define MOEA_PVO_CHECK(pvo) 257 258#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 259#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 260#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 261 262struct ofw_map { 263 cell_t om_va; 264 cell_t om_len; 265 cell_t om_pa_hi; 266 cell_t om_pa_lo; 267 cell_t om_mode; 268}; 269 270/* 271 * Map of physical memory regions. 272 */ 273static struct mem_region *regions; 274static struct mem_region *pregions; 275static u_int phys_avail_count; 276static int regions_sz, pregions_sz; 277extern int ofw_real_mode; 278 279extern struct pmap ofw_pmap; 280 281extern void bs_remap_earlyboot(void); 282 283 284/* 285 * Lock for the pteg and pvo tables. 286 */ 287struct mtx moea64_table_mutex; 288struct mtx moea64_slb_mutex; 289 290/* 291 * PTEG data. 292 */ 293static struct lpteg *moea64_pteg_table; 294u_int moea64_pteg_count; 295u_int moea64_pteg_mask; 296 297/* 298 * PVO data. 299 */ 300struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 301/* lists of unmanaged pages */ 302struct pvo_head moea64_pvo_kunmanaged = 303 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 304struct pvo_head moea64_pvo_unmanaged = 305 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged); 306 307uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 308uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 309 310#define BPVO_POOL_SIZE 327680 311static struct pvo_entry *moea64_bpvo_pool; 312static int moea64_bpvo_pool_index = 0; 313 314#define VSID_NBPW (sizeof(u_int32_t) * 8) 315#ifdef __powerpc64__ 316#define NVSIDS (NPMAPS * 16) 317#define VSID_HASHMASK 0xffffffffUL 318#else 319#define NVSIDS NPMAPS 320#define VSID_HASHMASK 0xfffffUL 321#endif 322static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 323 324static boolean_t moea64_initialized = FALSE; 325 326/* 327 * Statistics. 328 */ 329u_int moea64_pte_valid = 0; 330u_int moea64_pte_overflow = 0; 331u_int moea64_pvo_entries = 0; 332u_int moea64_pvo_enter_calls = 0; 333u_int moea64_pvo_remove_calls = 0; 334SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 335 &moea64_pte_valid, 0, ""); 336SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 337 &moea64_pte_overflow, 0, ""); 338SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 339 &moea64_pvo_entries, 0, ""); 340SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 341 &moea64_pvo_enter_calls, 0, ""); 342SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 343 &moea64_pvo_remove_calls, 0, ""); 344 345vm_offset_t moea64_scratchpage_va[2]; 346uint64_t moea64_scratchpage_vpn[2]; 347struct lpte *moea64_scratchpage_pte[2]; 348struct mtx moea64_scratchpage_mtx; 349 350uint64_t moea64_large_page_mask = 0; 351int moea64_large_page_size = 0; 352int moea64_large_page_shift = 0; 353 354/* 355 * Allocate physical memory for use in moea64_bootstrap. 356 */ 357static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 358 359/* 360 * PTE calls. 361 */ 362static int moea64_pte_insert(u_int, struct lpte *); 363 364/* 365 * PVO calls. 366 */ 367static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 368 vm_offset_t, vm_offset_t, uint64_t, int); 369static void moea64_pvo_remove(struct pvo_entry *); 370static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 371static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *); 372 373/* 374 * Utility routines. 375 */ 376static void moea64_bootstrap(mmu_t mmup, 377 vm_offset_t kernelstart, vm_offset_t kernelend); 378static void moea64_cpu_bootstrap(mmu_t, int ap); 379static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 380 vm_prot_t, boolean_t); 381static boolean_t moea64_query_bit(vm_page_t, u_int64_t); 382static u_int moea64_clear_bit(vm_page_t, u_int64_t); 383static void moea64_kremove(mmu_t, vm_offset_t); 384static void moea64_syncicache(pmap_t pmap, vm_offset_t va, 385 vm_offset_t pa, vm_size_t sz); 386static void tlbia(void); 387#ifdef __powerpc64__ 388static void slbia(void); 389#endif 390 391/* 392 * Kernel MMU interface 393 */ 394void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 395void moea64_clear_modify(mmu_t, vm_page_t); 396void moea64_clear_reference(mmu_t, vm_page_t); 397void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 398void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 399void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 400 vm_prot_t); 401void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 402vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 403vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 404void moea64_init(mmu_t); 405boolean_t moea64_is_modified(mmu_t, vm_page_t); 406boolean_t moea64_is_referenced(mmu_t, vm_page_t); 407boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 408vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 409boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 410int moea64_page_wired_mappings(mmu_t, vm_page_t); 411void moea64_pinit(mmu_t, pmap_t); 412void moea64_pinit0(mmu_t, pmap_t); 413void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 414void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 415void moea64_qremove(mmu_t, vm_offset_t, int); 416void moea64_release(mmu_t, pmap_t); 417void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 418void moea64_remove_all(mmu_t, vm_page_t); 419void moea64_remove_write(mmu_t, vm_page_t); 420void moea64_zero_page(mmu_t, vm_page_t); 421void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 422void moea64_zero_page_idle(mmu_t, vm_page_t); 423void moea64_activate(mmu_t, struct thread *); 424void moea64_deactivate(mmu_t, struct thread *); 425void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 426void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 427vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 428void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 429boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 430static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 431 432static mmu_method_t moea64_methods[] = { 433 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 434 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 435 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 436 MMUMETHOD(mmu_copy_page, moea64_copy_page), 437 MMUMETHOD(mmu_enter, moea64_enter), 438 MMUMETHOD(mmu_enter_object, moea64_enter_object), 439 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 440 MMUMETHOD(mmu_extract, moea64_extract), 441 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 442 MMUMETHOD(mmu_init, moea64_init), 443 MMUMETHOD(mmu_is_modified, moea64_is_modified), 444 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 445 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 446 MMUMETHOD(mmu_map, moea64_map), 447 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 448 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 449 MMUMETHOD(mmu_pinit, moea64_pinit), 450 MMUMETHOD(mmu_pinit0, moea64_pinit0), 451 MMUMETHOD(mmu_protect, moea64_protect), 452 MMUMETHOD(mmu_qenter, moea64_qenter), 453 MMUMETHOD(mmu_qremove, moea64_qremove), 454 MMUMETHOD(mmu_release, moea64_release), 455 MMUMETHOD(mmu_remove, moea64_remove), 456 MMUMETHOD(mmu_remove_all, moea64_remove_all), 457 MMUMETHOD(mmu_remove_write, moea64_remove_write), 458 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 459 MMUMETHOD(mmu_zero_page, moea64_zero_page), 460 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 461 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 462 MMUMETHOD(mmu_activate, moea64_activate), 463 MMUMETHOD(mmu_deactivate, moea64_deactivate), 464 465 /* Internal interfaces */ 466 MMUMETHOD(mmu_bootstrap, moea64_bootstrap), 467 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap), 468 MMUMETHOD(mmu_mapdev, moea64_mapdev), 469 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 470 MMUMETHOD(mmu_kextract, moea64_kextract), 471 MMUMETHOD(mmu_kenter, moea64_kenter), 472 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 473 474 { 0, 0 } 475}; 476 477static mmu_def_t oea64_mmu = { 478 MMU_TYPE_G5, 479 moea64_methods, 480 0 481}; 482MMU_DEF(oea64_mmu); 483 484static __inline u_int 485va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 486{ 487 uint64_t hash; 488 int shift; 489 490 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 491 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 492 shift); 493 return (hash & moea64_pteg_mask); 494} 495 496static __inline struct pvo_head * 497pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 498{ 499 struct vm_page *pg; 500 501 pg = PHYS_TO_VM_PAGE(pa); 502 503 if (pg_p != NULL) 504 *pg_p = pg; 505 506 if (pg == NULL) 507 return (&moea64_pvo_unmanaged); 508 509 return (&pg->md.mdpg_pvoh); 510} 511 512static __inline struct pvo_head * 513vm_page_to_pvoh(vm_page_t m) 514{ 515 516 return (&m->md.mdpg_pvoh); 517} 518 519static __inline void 520moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 521{ 522 523 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 524 m->md.mdpg_attrs &= ~ptebit; 525} 526 527static __inline u_int64_t 528moea64_attr_fetch(vm_page_t m) 529{ 530 531 return (m->md.mdpg_attrs); 532} 533 534static __inline void 535moea64_attr_save(vm_page_t m, u_int64_t ptebit) 536{ 537 538 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 539 m->md.mdpg_attrs |= ptebit; 540} 541 542static __inline void 543moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 544 uint64_t pte_lo, int flags) 545{ 546 547 ASSERT_TABLE_LOCK(); 548 549 /* 550 * Construct a PTE. Default to IMB initially. Valid bit only gets 551 * set when the real pte is set in memory. 552 * 553 * Note: Don't set the valid bit for correct operation of tlb update. 554 */ 555 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 556 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 557 558 if (flags & PVO_LARGE) 559 pt->pte_hi |= LPTE_BIG; 560 561 pt->pte_lo = pte_lo; 562} 563 564static __inline void 565moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 566{ 567 568 ASSERT_TABLE_LOCK(); 569 570 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 571} 572 573static __inline void 574moea64_pte_clear(struct lpte *pt, uint64_t vpn, u_int64_t ptebit) 575{ 576 ASSERT_TABLE_LOCK(); 577 578 /* 579 * As shown in Section 7.6.3.2.3 580 */ 581 pt->pte_lo &= ~ptebit; 582 TLBIE(vpn); 583} 584 585static __inline void 586moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 587{ 588 589 ASSERT_TABLE_LOCK(); 590 pvo_pt->pte_hi |= LPTE_VALID; 591 592 /* 593 * Update the PTE as defined in section 7.6.3.1. 594 * Note that the REF/CHG bits are from pvo_pt and thus should have 595 * been saved so this routine can restore them (if desired). 596 */ 597 pt->pte_lo = pvo_pt->pte_lo; 598 EIEIO(); 599 pt->pte_hi = pvo_pt->pte_hi; 600 PTESYNC(); 601 moea64_pte_valid++; 602} 603 604static __inline void 605moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) 606{ 607 ASSERT_TABLE_LOCK(); 608 pvo_pt->pte_hi &= ~LPTE_VALID; 609 610 /* 611 * Force the reg & chg bits back into the PTEs. 612 */ 613 SYNC(); 614 615 /* 616 * Invalidate the pte. 617 */ 618 pt->pte_hi &= ~LPTE_VALID; 619 TLBIE(vpn); 620 621 /* 622 * Save the reg & chg bits. 623 */ 624 moea64_pte_synch(pt, pvo_pt); 625 moea64_pte_valid--; 626} 627 628static __inline void 629moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) 630{ 631 632 /* 633 * Invalidate the PTE 634 */ 635 moea64_pte_unset(pt, pvo_pt, vpn); 636 moea64_pte_set(pt, pvo_pt); 637} 638 639static __inline uint64_t 640moea64_calc_wimg(vm_offset_t pa) 641{ 642 uint64_t pte_lo; 643 int i; 644 645 /* 646 * Assume the page is cache inhibited and access is guarded unless 647 * it's in our available memory array. 648 */ 649 pte_lo = LPTE_I | LPTE_G; 650 for (i = 0; i < pregions_sz; i++) { 651 if ((pa >= pregions[i].mr_start) && 652 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 653 pte_lo &= ~(LPTE_I | LPTE_G); 654 pte_lo |= LPTE_M; 655 break; 656 } 657 } 658 659 return pte_lo; 660} 661 662/* 663 * Quick sort callout for comparing memory regions. 664 */ 665static int mr_cmp(const void *a, const void *b); 666static int om_cmp(const void *a, const void *b); 667 668static int 669mr_cmp(const void *a, const void *b) 670{ 671 const struct mem_region *regiona; 672 const struct mem_region *regionb; 673 674 regiona = a; 675 regionb = b; 676 if (regiona->mr_start < regionb->mr_start) 677 return (-1); 678 else if (regiona->mr_start > regionb->mr_start) 679 return (1); 680 else 681 return (0); 682} 683 684static int 685om_cmp(const void *a, const void *b) 686{ 687 const struct ofw_map *mapa; 688 const struct ofw_map *mapb; 689 690 mapa = a; 691 mapb = b; 692 if (mapa->om_pa_hi < mapb->om_pa_hi) 693 return (-1); 694 else if (mapa->om_pa_hi > mapb->om_pa_hi) 695 return (1); 696 else if (mapa->om_pa_lo < mapb->om_pa_lo) 697 return (-1); 698 else if (mapa->om_pa_lo > mapb->om_pa_lo) 699 return (1); 700 else 701 return (0); 702} 703 704static void 705moea64_cpu_bootstrap(mmu_t mmup, int ap) 706{ 707 int i = 0; 708 #ifdef __powerpc64__ 709 struct slb *slb = PCPU_GET(slb); 710 #endif 711 712 /* 713 * Initialize segment registers and MMU 714 */ 715 716 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 717 718 /* 719 * Install kernel SLB entries 720 */ 721 722 #ifdef __powerpc64__ 723 slbia(); 724 725 for (i = 0; i < 64; i++) { 726 if (!(slb[i].slbe & SLBE_VALID)) 727 continue; 728 729 __asm __volatile ("slbmte %0, %1" :: 730 "r"(slb[i].slbv), "r"(slb[i].slbe)); 731 } 732 #else 733 for (i = 0; i < 16; i++) 734 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 735 #endif 736 737 /* 738 * Install page table 739 */ 740 741 __asm __volatile ("ptesync; mtsdr1 %0; isync" 742 :: "r"((uintptr_t)moea64_pteg_table 743 | (64 - cntlzd(moea64_pteg_mask >> 11)))); 744 tlbia(); 745} 746 747static void 748moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 749{ 750 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 751 register_t msr; 752 vm_offset_t off; 753 vm_paddr_t pa_base; 754 int i, ofw_mappings; 755 756 bzero(translations, sz); 757 if (OF_getprop(mmu, "translations", translations, sz) == -1) 758 panic("moea64_bootstrap: can't get ofw translations"); 759 760 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 761 sz /= sizeof(*translations); 762 qsort(translations, sz, sizeof (*translations), om_cmp); 763 764 for (i = 0, ofw_mappings = 0; i < sz; i++) { 765 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 766 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 767 translations[i].om_len); 768 769 if (translations[i].om_pa_lo % PAGE_SIZE) 770 panic("OFW translation not page-aligned!"); 771 772 pa_base = translations[i].om_pa_lo; 773 774 #ifdef __powerpc64__ 775 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 776 #else 777 if (translations[i].om_pa_hi) 778 panic("OFW translations above 32-bit boundary!"); 779 #endif 780 781 /* Now enter the pages for this mapping */ 782 783 DISABLE_TRANS(msr); 784 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 785 if (moea64_pvo_find_va(kernel_pmap, 786 translations[i].om_va + off) != NULL) 787 continue; 788 789 moea64_kenter(mmup, translations[i].om_va + off, 790 pa_base + off); 791 792 ofw_mappings++; 793 } 794 ENABLE_TRANS(msr); 795 } 796} 797 798#ifdef __powerpc64__ 799static void 800moea64_probe_large_page(void) 801{ 802 uint16_t pvr = mfpvr() >> 16; 803 804 switch (pvr) { 805 case IBM970: 806 case IBM970FX: 807 case IBM970MP: 808 powerpc_sync(); isync(); 809 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 810 powerpc_sync(); isync(); 811 812 /* FALLTHROUGH */ 813 case IBMCELLBE: 814 moea64_large_page_size = 0x1000000; /* 16 MB */ 815 moea64_large_page_shift = 24; 816 break; 817 default: 818 moea64_large_page_size = 0; 819 } 820 821 moea64_large_page_mask = moea64_large_page_size - 1; 822} 823 824static void 825moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 826{ 827 struct slb *cache; 828 struct slb entry; 829 uint64_t esid, slbe; 830 uint64_t i; 831 832 cache = PCPU_GET(slb); 833 esid = va >> ADDR_SR_SHFT; 834 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 835 836 for (i = 0; i < 64; i++) { 837 if (cache[i].slbe == (slbe | i)) 838 return; 839 } 840 841 entry.slbe = slbe; 842 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 843 if (large) 844 entry.slbv |= SLBV_L; 845 846 slb_insert(kernel_pmap, cache, &entry); 847} 848#endif 849 850static void 851moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 852 vm_offset_t kernelend) 853{ 854 register_t msr; 855 vm_paddr_t pa; 856 vm_offset_t size, off; 857 uint64_t pte_lo; 858 int i; 859 860 if (moea64_large_page_size == 0) 861 hw_direct_map = 0; 862 863 DISABLE_TRANS(msr); 864 if (hw_direct_map) { 865 PMAP_LOCK(kernel_pmap); 866 for (i = 0; i < pregions_sz; i++) { 867 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 868 pregions[i].mr_size; pa += moea64_large_page_size) { 869 pte_lo = LPTE_M; 870 871 /* 872 * Set memory access as guarded if prefetch within 873 * the page could exit the available physmem area. 874 */ 875 if (pa & moea64_large_page_mask) { 876 pa &= moea64_large_page_mask; 877 pte_lo |= LPTE_G; 878 } 879 if (pa + moea64_large_page_size > 880 pregions[i].mr_start + pregions[i].mr_size) 881 pte_lo |= LPTE_G; 882 883 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 884 &moea64_pvo_kunmanaged, pa, pa, 885 pte_lo, PVO_WIRED | PVO_LARGE | 886 VM_PROT_EXECUTE); 887 } 888 } 889 PMAP_UNLOCK(kernel_pmap); 890 } else { 891 size = moea64_pteg_count * sizeof(struct lpteg); 892 off = (vm_offset_t)(moea64_pteg_table); 893 for (pa = off; pa < off + size; pa += PAGE_SIZE) 894 moea64_kenter(mmup, pa, pa); 895 size = sizeof(struct pvo_head) * moea64_pteg_count; 896 off = (vm_offset_t)(moea64_pvo_table); 897 for (pa = off; pa < off + size; pa += PAGE_SIZE) 898 moea64_kenter(mmup, pa, pa); 899 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 900 off = (vm_offset_t)(moea64_bpvo_pool); 901 for (pa = off; pa < off + size; pa += PAGE_SIZE) 902 moea64_kenter(mmup, pa, pa); 903 904 /* 905 * Map certain important things, like ourselves. 906 * 907 * NOTE: We do not map the exception vector space. That code is 908 * used only in real mode, and leaving it unmapped allows us to 909 * catch NULL pointer deferences, instead of making NULL a valid 910 * address. 911 */ 912 913 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 914 pa += PAGE_SIZE) 915 moea64_kenter(mmup, pa, pa); 916 } 917 ENABLE_TRANS(msr); 918} 919 920static void 921moea64_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 922{ 923 ihandle_t mmui; 924 phandle_t chosen; 925 phandle_t mmu; 926 size_t sz; 927 int i, j; 928 vm_size_t size, physsz, hwphyssz; 929 vm_offset_t pa, va; 930 register_t msr; 931 void *dpcpu; 932 933#ifndef __powerpc64__ 934 /* We don't have a direct map since there is no BAT */ 935 hw_direct_map = 0; 936 937 /* Make sure battable is zero, since we have no BAT */ 938 for (i = 0; i < 16; i++) { 939 battable[i].batu = 0; 940 battable[i].batl = 0; 941 } 942#else 943 moea64_probe_large_page(); 944 945 /* Use a direct map if we have large page support */ 946 if (moea64_large_page_size > 0) 947 hw_direct_map = 1; 948 else 949 hw_direct_map = 0; 950#endif 951 952 /* Get physical memory regions from firmware */ 953 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 954 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 955 956 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 957 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 958 panic("moea64_bootstrap: phys_avail too small"); 959 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 960 phys_avail_count = 0; 961 physsz = 0; 962 hwphyssz = 0; 963 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 964 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 965 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 966 regions[i].mr_start + regions[i].mr_size, 967 regions[i].mr_size); 968 if (hwphyssz != 0 && 969 (physsz + regions[i].mr_size) >= hwphyssz) { 970 if (physsz < hwphyssz) { 971 phys_avail[j] = regions[i].mr_start; 972 phys_avail[j + 1] = regions[i].mr_start + 973 hwphyssz - physsz; 974 physsz = hwphyssz; 975 phys_avail_count++; 976 } 977 break; 978 } 979 phys_avail[j] = regions[i].mr_start; 980 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 981 phys_avail_count++; 982 physsz += regions[i].mr_size; 983 } 984 985 /* Check for overlap with the kernel and exception vectors */ 986 for (j = 0; j < 2*phys_avail_count; j+=2) { 987 if (phys_avail[j] < EXC_LAST) 988 phys_avail[j] += EXC_LAST; 989 990 if (kernelstart >= phys_avail[j] && 991 kernelstart < phys_avail[j+1]) { 992 if (kernelend < phys_avail[j+1]) { 993 phys_avail[2*phys_avail_count] = 994 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 995 phys_avail[2*phys_avail_count + 1] = 996 phys_avail[j+1]; 997 phys_avail_count++; 998 } 999 1000 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 1001 } 1002 1003 if (kernelend >= phys_avail[j] && 1004 kernelend < phys_avail[j+1]) { 1005 if (kernelstart > phys_avail[j]) { 1006 phys_avail[2*phys_avail_count] = phys_avail[j]; 1007 phys_avail[2*phys_avail_count + 1] = 1008 kernelstart & ~PAGE_MASK; 1009 phys_avail_count++; 1010 } 1011 1012 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 1013 } 1014 } 1015 1016 physmem = btoc(physsz); 1017 1018 /* 1019 * Allocate PTEG table. 1020 */ 1021#ifdef PTEGCOUNT 1022 moea64_pteg_count = PTEGCOUNT; 1023#else 1024 moea64_pteg_count = 0x1000; 1025 1026 while (moea64_pteg_count < physmem) 1027 moea64_pteg_count <<= 1; 1028 1029 moea64_pteg_count >>= 1; 1030#endif /* PTEGCOUNT */ 1031 1032 size = moea64_pteg_count * sizeof(struct lpteg); 1033 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 1034 moea64_pteg_count, size); 1035 1036 /* 1037 * We now need to allocate memory. This memory, to be allocated, 1038 * has to reside in a page table. The page table we are about to 1039 * allocate. We don't have BAT. So drop to data real mode for a minute 1040 * as a measure of last resort. We do this a couple times. 1041 */ 1042 1043 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 1044 DISABLE_TRANS(msr); 1045 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 1046 ENABLE_TRANS(msr); 1047 1048 moea64_pteg_mask = moea64_pteg_count - 1; 1049 1050 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 1051 1052 /* 1053 * Allocate pv/overflow lists. 1054 */ 1055 size = sizeof(struct pvo_head) * moea64_pteg_count; 1056 1057 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 1058 PAGE_SIZE); 1059 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 1060 1061 DISABLE_TRANS(msr); 1062 for (i = 0; i < moea64_pteg_count; i++) 1063 LIST_INIT(&moea64_pvo_table[i]); 1064 ENABLE_TRANS(msr); 1065 1066 /* 1067 * Initialize the lock that synchronizes access to the pteg and pvo 1068 * tables. 1069 */ 1070 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 1071 MTX_RECURSE); 1072 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 1073 1074 /* 1075 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 1076 */ 1077 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 1078 1079 /* 1080 * Initialise the unmanaged pvo pool. 1081 */ 1082 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 1083 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 1084 moea64_bpvo_pool_index = 0; 1085 1086 /* 1087 * Make sure kernel vsid is allocated as well as VSID 0. 1088 */ 1089 #ifndef __powerpc64__ 1090 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 1091 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 1092 moea64_vsid_bitmap[0] |= 1; 1093 #endif 1094 1095 /* 1096 * Initialize the kernel pmap (which is statically allocated). 1097 */ 1098 #ifdef __powerpc64__ 1099 for (i = 0; i < 64; i++) { 1100 pcpup->pc_slb[i].slbv = 0; 1101 pcpup->pc_slb[i].slbe = 0; 1102 } 1103 #else 1104 for (i = 0; i < 16; i++) 1105 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 1106 #endif 1107 1108 kernel_pmap->pmap_phys = kernel_pmap; 1109 kernel_pmap->pm_active = ~0; 1110 1111 PMAP_LOCK_INIT(kernel_pmap); 1112 1113 /* 1114 * Now map in all the other buffers we allocated earlier 1115 */ 1116 1117 moea64_setup_direct_map(mmup, kernelstart, kernelend); 1118 1119 /* 1120 * Set up the Open Firmware pmap and add its mappings if not in real 1121 * mode. 1122 */ 1123 1124 if (!ofw_real_mode) { 1125 #ifndef __powerpc64__ 1126 moea64_pinit(mmup, &ofw_pmap); 1127 1128 for (i = 0; i < 16; i++) 1129 ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i]; 1130 #endif 1131 1132 if ((chosen = OF_finddevice("/chosen")) == -1) 1133 panic("moea64_bootstrap: can't find /chosen"); 1134 OF_getprop(chosen, "mmu", &mmui, 4); 1135 1136 if ((mmu = OF_instance_to_package(mmui)) == -1) 1137 panic("moea64_bootstrap: can't get mmu package"); 1138 if ((sz = OF_getproplen(mmu, "translations")) == -1) 1139 panic("moea64_bootstrap: can't get ofw translation count"); 1140 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 1141 panic("moea64_bootstrap: too many ofw translations"); 1142 1143 moea64_add_ofw_mappings(mmup, mmu, sz); 1144 } 1145 1146#ifdef SMP 1147 TLBSYNC(); 1148#endif 1149 1150 /* 1151 * Calculate the last available physical address. 1152 */ 1153 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1154 ; 1155 Maxmem = powerpc_btop(phys_avail[i + 1]); 1156 1157 /* 1158 * Initialize MMU and remap early physical mappings 1159 */ 1160 moea64_cpu_bootstrap(mmup,0); 1161 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 1162 pmap_bootstrapped++; 1163 bs_remap_earlyboot(); 1164 1165 /* 1166 * Set the start and end of kva. 1167 */ 1168 virtual_avail = VM_MIN_KERNEL_ADDRESS; 1169 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 1170 1171 /* 1172 * Map the entire KVA range into the SLB. We must not fault there. 1173 */ 1174 #ifdef __powerpc64__ 1175 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 1176 moea64_bootstrap_slb_prefault(va, 0); 1177 #endif 1178 1179 /* 1180 * Figure out how far we can extend virtual_end into segment 16 1181 * without running into existing mappings. Segment 16 is guaranteed 1182 * to contain neither RAM nor devices (at least on Apple hardware), 1183 * but will generally contain some OFW mappings we should not 1184 * step on. 1185 */ 1186 1187 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 1188 PMAP_LOCK(kernel_pmap); 1189 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 1190 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 1191 virtual_end += PAGE_SIZE; 1192 PMAP_UNLOCK(kernel_pmap); 1193 #endif 1194 1195 /* 1196 * Allocate some things for page zeroing. We put this directly 1197 * in the page table, marked with LPTE_LOCKED, to avoid any 1198 * of the PVO book-keeping or other parts of the VM system 1199 * from even knowing that this hack exists. 1200 */ 1201 1202 if (!hw_direct_map) { 1203 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 1204 MTX_DEF); 1205 for (i = 0; i < 2; i++) { 1206 struct lpte pt; 1207 uint64_t vsid; 1208 int pteidx, ptegidx; 1209 1210 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 1211 virtual_end -= PAGE_SIZE; 1212 1213 LOCK_TABLE(); 1214 1215 vsid = va_to_vsid(kernel_pmap, 1216 moea64_scratchpage_va[i]); 1217 moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i], 1218 LPTE_NOEXEC, 0); 1219 pt.pte_hi |= LPTE_LOCKED; 1220 1221 moea64_scratchpage_vpn[i] = (vsid << 16) | 1222 ((moea64_scratchpage_va[i] & ADDR_PIDX) >> 1223 ADDR_PIDX_SHFT); 1224 ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i], 0); 1225 pteidx = moea64_pte_insert(ptegidx, &pt); 1226 if (pt.pte_hi & LPTE_HID) 1227 ptegidx ^= moea64_pteg_mask; 1228 1229 moea64_scratchpage_pte[i] = 1230 &moea64_pteg_table[ptegidx].pt[pteidx]; 1231 1232 UNLOCK_TABLE(); 1233 } 1234 } 1235 1236 /* 1237 * Allocate a kernel stack with a guard page for thread0 and map it 1238 * into the kernel page map. 1239 */ 1240 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 1241 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1242 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 1243 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 1244 thread0.td_kstack = va; 1245 thread0.td_kstack_pages = KSTACK_PAGES; 1246 for (i = 0; i < KSTACK_PAGES; i++) { 1247 moea64_kenter(mmup, va, pa); 1248 pa += PAGE_SIZE; 1249 va += PAGE_SIZE; 1250 } 1251 1252 /* 1253 * Allocate virtual address space for the message buffer. 1254 */ 1255 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1256 msgbufp = (struct msgbuf *)virtual_avail; 1257 va = virtual_avail; 1258 virtual_avail += round_page(MSGBUF_SIZE); 1259 while (va < virtual_avail) { 1260 moea64_kenter(mmup, va, pa); 1261 pa += PAGE_SIZE; 1262 va += PAGE_SIZE; 1263 } 1264 1265 /* 1266 * Allocate virtual address space for the dynamic percpu area. 1267 */ 1268 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1269 dpcpu = (void *)virtual_avail; 1270 va = virtual_avail; 1271 virtual_avail += DPCPU_SIZE; 1272 while (va < virtual_avail) { 1273 moea64_kenter(mmup, va, pa); 1274 pa += PAGE_SIZE; 1275 va += PAGE_SIZE; 1276 } 1277 dpcpu_init(dpcpu, 0); 1278} 1279 1280/* 1281 * Activate a user pmap. The pmap must be activated before its address 1282 * space can be accessed in any way. 1283 */ 1284void 1285moea64_activate(mmu_t mmu, struct thread *td) 1286{ 1287 pmap_t pm; 1288 1289 pm = &td->td_proc->p_vmspace->vm_pmap; 1290 pm->pm_active |= PCPU_GET(cpumask); 1291 1292 #ifdef __powerpc64__ 1293 PCPU_SET(userslb, pm->pm_slb); 1294 #else 1295 PCPU_SET(curpmap, pm->pmap_phys); 1296 #endif 1297} 1298 1299void 1300moea64_deactivate(mmu_t mmu, struct thread *td) 1301{ 1302 pmap_t pm; 1303 1304 pm = &td->td_proc->p_vmspace->vm_pmap; 1305 pm->pm_active &= ~(PCPU_GET(cpumask)); 1306 #ifdef __powerpc64__ 1307 PCPU_SET(userslb, NULL); 1308 #else 1309 PCPU_SET(curpmap, NULL); 1310 #endif 1311} 1312 1313void 1314moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1315{ 1316 struct pvo_entry *pvo; 1317 struct lpte *pt; 1318 uint64_t vsid; 1319 int i, ptegidx; 1320 1321 PMAP_LOCK(pm); 1322 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1323 1324 if (pvo != NULL) { 1325 LOCK_TABLE(); 1326 pt = moea64_pvo_to_pte(pvo); 1327 1328 if (wired) { 1329 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1330 pm->pm_stats.wired_count++; 1331 pvo->pvo_vaddr |= PVO_WIRED; 1332 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1333 } else { 1334 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1335 pm->pm_stats.wired_count--; 1336 pvo->pvo_vaddr &= ~PVO_WIRED; 1337 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1338 } 1339 1340 if (pt != NULL) { 1341 /* Update wiring flag in page table. */ 1342 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1343 pvo->pvo_vpn); 1344 } else if (wired) { 1345 /* 1346 * If we are wiring the page, and it wasn't in the 1347 * page table before, add it. 1348 */ 1349 vsid = PVO_VSID(pvo); 1350 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1351 pvo->pvo_vaddr & PVO_LARGE); 1352 1353 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 1354 if (i >= 0) { 1355 PVO_PTEGIDX_CLR(pvo); 1356 PVO_PTEGIDX_SET(pvo, i); 1357 } 1358 } 1359 1360 UNLOCK_TABLE(); 1361 } 1362 PMAP_UNLOCK(pm); 1363} 1364 1365/* 1366 * This goes through and sets the physical address of our 1367 * special scratch PTE to the PA we want to zero or copy. Because 1368 * of locking issues (this can get called in pvo_enter() by 1369 * the UMA allocator), we can't use most other utility functions here 1370 */ 1371 1372static __inline 1373void moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1374 1375 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1376 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1377 1378 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1379 TLBIE(moea64_scratchpage_vpn[which]); 1380 1381 moea64_scratchpage_pte[which]->pte_lo &= 1382 ~(LPTE_WIMG | LPTE_RPGN); 1383 moea64_scratchpage_pte[which]->pte_lo |= 1384 moea64_calc_wimg(pa) | (uint64_t)pa; 1385 EIEIO(); 1386 1387 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1388 PTESYNC(); isync(); 1389} 1390 1391void 1392moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1393{ 1394 vm_offset_t dst; 1395 vm_offset_t src; 1396 1397 dst = VM_PAGE_TO_PHYS(mdst); 1398 src = VM_PAGE_TO_PHYS(msrc); 1399 1400 if (hw_direct_map) { 1401 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1402 } else { 1403 mtx_lock(&moea64_scratchpage_mtx); 1404 1405 moea64_set_scratchpage_pa(0,src); 1406 moea64_set_scratchpage_pa(1,dst); 1407 1408 kcopy((void *)moea64_scratchpage_va[0], 1409 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1410 1411 mtx_unlock(&moea64_scratchpage_mtx); 1412 } 1413} 1414 1415void 1416moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1417{ 1418 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1419 1420 if (!moea64_initialized) 1421 panic("moea64_zero_page: can't zero pa %#" PRIxPTR, pa); 1422 if (size + off > PAGE_SIZE) 1423 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1424 1425 if (hw_direct_map) { 1426 bzero((caddr_t)pa + off, size); 1427 } else { 1428 mtx_lock(&moea64_scratchpage_mtx); 1429 moea64_set_scratchpage_pa(0,pa); 1430 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1431 mtx_unlock(&moea64_scratchpage_mtx); 1432 } 1433} 1434 1435/* 1436 * Zero a page of physical memory by temporarily mapping it 1437 */ 1438void 1439moea64_zero_page(mmu_t mmu, vm_page_t m) 1440{ 1441 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1442 vm_offset_t va, off; 1443 1444 if (!moea64_initialized) 1445 panic("moea64_zero_page: can't zero pa %#zx", pa); 1446 1447 if (!hw_direct_map) { 1448 mtx_lock(&moea64_scratchpage_mtx); 1449 1450 moea64_set_scratchpage_pa(0,pa); 1451 va = moea64_scratchpage_va[0]; 1452 } else { 1453 va = pa; 1454 } 1455 1456 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1457 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1458 1459 if (!hw_direct_map) 1460 mtx_unlock(&moea64_scratchpage_mtx); 1461} 1462 1463void 1464moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1465{ 1466 1467 moea64_zero_page(mmu, m); 1468} 1469 1470/* 1471 * Map the given physical page at the specified virtual address in the 1472 * target pmap with the protection requested. If specified the page 1473 * will be wired down. 1474 */ 1475void 1476moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1477 vm_prot_t prot, boolean_t wired) 1478{ 1479 1480 vm_page_lock_queues(); 1481 PMAP_LOCK(pmap); 1482 moea64_enter_locked(pmap, va, m, prot, wired); 1483 vm_page_unlock_queues(); 1484 PMAP_UNLOCK(pmap); 1485} 1486 1487/* 1488 * Map the given physical page at the specified virtual address in the 1489 * target pmap with the protection requested. If specified the page 1490 * will be wired down. 1491 * 1492 * The page queues and pmap must be locked. 1493 */ 1494 1495static void 1496moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1497 boolean_t wired) 1498{ 1499 struct pvo_head *pvo_head; 1500 uma_zone_t zone; 1501 vm_page_t pg; 1502 uint64_t pte_lo; 1503 u_int pvo_flags; 1504 int error; 1505 1506 if (!moea64_initialized) { 1507 pvo_head = &moea64_pvo_kunmanaged; 1508 pg = NULL; 1509 zone = moea64_upvo_zone; 1510 pvo_flags = 0; 1511 } else { 1512 pvo_head = vm_page_to_pvoh(m); 1513 pg = m; 1514 zone = moea64_mpvo_zone; 1515 pvo_flags = PVO_MANAGED; 1516 } 1517 1518 if (pmap_bootstrapped) 1519 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1520 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1521 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1522 (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1523 ("moea64_enter_locked: page %p is not busy", m)); 1524 1525 /* XXX change the pvo head for fake pages */ 1526 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1527 pvo_flags &= ~PVO_MANAGED; 1528 pvo_head = &moea64_pvo_kunmanaged; 1529 zone = moea64_upvo_zone; 1530 } 1531 1532 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m)); 1533 1534 if (prot & VM_PROT_WRITE) { 1535 pte_lo |= LPTE_BW; 1536 if (pmap_bootstrapped && 1537 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1538 vm_page_flag_set(m, PG_WRITEABLE); 1539 } else 1540 pte_lo |= LPTE_BR; 1541 1542 if (prot & VM_PROT_EXECUTE) 1543 pvo_flags |= VM_PROT_EXECUTE; 1544 1545 if (wired) 1546 pvo_flags |= PVO_WIRED; 1547 1548 if ((m->flags & PG_FICTITIOUS) != 0) 1549 pvo_flags |= PVO_FAKE; 1550 1551 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1552 pte_lo, pvo_flags); 1553 1554 /* 1555 * Flush the page from the instruction cache if this page is 1556 * mapped executable and cacheable. 1557 */ 1558 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1559 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1560 } 1561} 1562 1563static void 1564moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1565{ 1566 1567 /* 1568 * This is much trickier than on older systems because 1569 * we can't sync the icache on physical addresses directly 1570 * without a direct map. Instead we check a couple of cases 1571 * where the memory is already mapped in and, failing that, 1572 * use the same trick we use for page zeroing to create 1573 * a temporary mapping for this physical address. 1574 */ 1575 1576 if (!pmap_bootstrapped) { 1577 /* 1578 * If PMAP is not bootstrapped, we are likely to be 1579 * in real mode. 1580 */ 1581 __syncicache((void *)pa, sz); 1582 } else if (pmap == kernel_pmap) { 1583 __syncicache((void *)va, sz); 1584 } else if (hw_direct_map) { 1585 __syncicache((void *)pa, sz); 1586 } else { 1587 /* Use the scratch page to set up a temp mapping */ 1588 1589 mtx_lock(&moea64_scratchpage_mtx); 1590 1591 moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); 1592 __syncicache((void *)(moea64_scratchpage_va[1] + 1593 (va & ADDR_POFF)), sz); 1594 1595 mtx_unlock(&moea64_scratchpage_mtx); 1596 } 1597} 1598 1599/* 1600 * Maps a sequence of resident pages belonging to the same object. 1601 * The sequence begins with the given page m_start. This page is 1602 * mapped at the given virtual address start. Each subsequent page is 1603 * mapped at a virtual address that is offset from start by the same 1604 * amount as the page is offset from m_start within the object. The 1605 * last page in the sequence is the page with the largest offset from 1606 * m_start that can be mapped at a virtual address less than the given 1607 * virtual address end. Not every virtual page between start and end 1608 * is mapped; only those for which a resident page exists with the 1609 * corresponding offset from m_start are mapped. 1610 */ 1611void 1612moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1613 vm_page_t m_start, vm_prot_t prot) 1614{ 1615 vm_page_t m; 1616 vm_pindex_t diff, psize; 1617 1618 psize = atop(end - start); 1619 m = m_start; 1620 vm_page_lock_queues(); 1621 PMAP_LOCK(pm); 1622 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1623 moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1624 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1625 m = TAILQ_NEXT(m, listq); 1626 } 1627 vm_page_unlock_queues(); 1628 PMAP_UNLOCK(pm); 1629} 1630 1631void 1632moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1633 vm_prot_t prot) 1634{ 1635 1636 vm_page_lock_queues(); 1637 PMAP_LOCK(pm); 1638 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1639 FALSE); 1640 vm_page_unlock_queues(); 1641 PMAP_UNLOCK(pm); 1642} 1643 1644vm_paddr_t 1645moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1646{ 1647 struct pvo_entry *pvo; 1648 vm_paddr_t pa; 1649 1650 PMAP_LOCK(pm); 1651 pvo = moea64_pvo_find_va(pm, va); 1652 if (pvo == NULL) 1653 pa = 0; 1654 else 1655 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1656 (va - PVO_VADDR(pvo)); 1657 PMAP_UNLOCK(pm); 1658 return (pa); 1659} 1660 1661/* 1662 * Atomically extract and hold the physical page with the given 1663 * pmap and virtual address pair if that mapping permits the given 1664 * protection. 1665 */ 1666vm_page_t 1667moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1668{ 1669 struct pvo_entry *pvo; 1670 vm_page_t m; 1671 vm_paddr_t pa; 1672 1673 m = NULL; 1674 pa = 0; 1675 PMAP_LOCK(pmap); 1676retry: 1677 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1678 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1679 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1680 (prot & VM_PROT_WRITE) == 0)) { 1681 if (vm_page_pa_tryrelock(pmap, 1682 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1683 goto retry; 1684 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1685 vm_page_hold(m); 1686 } 1687 PA_UNLOCK_COND(pa); 1688 PMAP_UNLOCK(pmap); 1689 return (m); 1690} 1691 1692static void * 1693moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1694{ 1695 /* 1696 * This entire routine is a horrible hack to avoid bothering kmem 1697 * for new KVA addresses. Because this can get called from inside 1698 * kmem allocation routines, calling kmem for a new address here 1699 * can lead to multiply locking non-recursive mutexes. 1700 */ 1701 static vm_pindex_t color; 1702 vm_offset_t va; 1703 1704 vm_page_t m; 1705 int pflags, needed_lock; 1706 1707 *flags = UMA_SLAB_PRIV; 1708 needed_lock = !PMAP_LOCKED(kernel_pmap); 1709 1710 if (needed_lock) 1711 PMAP_LOCK(kernel_pmap); 1712 1713 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1714 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1715 else 1716 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1717 if (wait & M_ZERO) 1718 pflags |= VM_ALLOC_ZERO; 1719 1720 for (;;) { 1721 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1722 if (m == NULL) { 1723 if (wait & M_NOWAIT) 1724 return (NULL); 1725 VM_WAIT; 1726 } else 1727 break; 1728 } 1729 1730 va = VM_PAGE_TO_PHYS(m); 1731 1732 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1733 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1734 PVO_WIRED | PVO_BOOTSTRAP); 1735 1736 if (needed_lock) 1737 PMAP_UNLOCK(kernel_pmap); 1738 1739 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1740 bzero((void *)va, PAGE_SIZE); 1741 1742 return (void *)va; 1743} 1744 1745void 1746moea64_init(mmu_t mmu) 1747{ 1748 1749 CTR0(KTR_PMAP, "moea64_init"); 1750 1751 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1752 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1753 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1754 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1755 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1756 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1757 1758 if (!hw_direct_map) { 1759 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1760 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1761 } 1762 1763 moea64_initialized = TRUE; 1764} 1765 1766boolean_t 1767moea64_is_referenced(mmu_t mmu, vm_page_t m) 1768{ 1769 1770 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1771 ("moea64_is_referenced: page %p is not managed", m)); 1772 return (moea64_query_bit(m, PTE_REF)); 1773} 1774 1775boolean_t 1776moea64_is_modified(mmu_t mmu, vm_page_t m) 1777{ 1778 1779 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1780 ("moea64_is_modified: page %p is not managed", m)); 1781 1782 /* 1783 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1784 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1785 * is clear, no PTEs can have LPTE_CHG set. 1786 */ 1787 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1788 if ((m->oflags & VPO_BUSY) == 0 && 1789 (m->flags & PG_WRITEABLE) == 0) 1790 return (FALSE); 1791 return (moea64_query_bit(m, LPTE_CHG)); 1792} 1793 1794void 1795moea64_clear_reference(mmu_t mmu, vm_page_t m) 1796{ 1797 1798 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1799 ("moea64_clear_reference: page %p is not managed", m)); 1800 moea64_clear_bit(m, LPTE_REF); 1801} 1802 1803void 1804moea64_clear_modify(mmu_t mmu, vm_page_t m) 1805{ 1806 1807 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1808 ("moea64_clear_modify: page %p is not managed", m)); 1809 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1810 KASSERT((m->oflags & VPO_BUSY) == 0, 1811 ("moea64_clear_modify: page %p is busy", m)); 1812 1813 /* 1814 * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG 1815 * set. If the object containing the page is locked and the page is 1816 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1817 */ 1818 if ((m->flags & PG_WRITEABLE) == 0) 1819 return; 1820 moea64_clear_bit(m, LPTE_CHG); 1821} 1822 1823/* 1824 * Clear the write and modified bits in each of the given page's mappings. 1825 */ 1826void 1827moea64_remove_write(mmu_t mmu, vm_page_t m) 1828{ 1829 struct pvo_entry *pvo; 1830 struct lpte *pt; 1831 pmap_t pmap; 1832 uint64_t lo; 1833 1834 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1835 ("moea64_remove_write: page %p is not managed", m)); 1836 1837 /* 1838 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1839 * another thread while the object is locked. Thus, if PG_WRITEABLE 1840 * is clear, no page table entries need updating. 1841 */ 1842 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1843 if ((m->oflags & VPO_BUSY) == 0 && 1844 (m->flags & PG_WRITEABLE) == 0) 1845 return; 1846 vm_page_lock_queues(); 1847 lo = moea64_attr_fetch(m); 1848 SYNC(); 1849 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1850 pmap = pvo->pvo_pmap; 1851 PMAP_LOCK(pmap); 1852 LOCK_TABLE(); 1853 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1854 pt = moea64_pvo_to_pte(pvo); 1855 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1856 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1857 if (pt != NULL) { 1858 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1859 lo |= pvo->pvo_pte.lpte.pte_lo; 1860 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1861 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1862 pvo->pvo_vpn); 1863 if (pvo->pvo_pmap == kernel_pmap) 1864 isync(); 1865 } 1866 } 1867 UNLOCK_TABLE(); 1868 PMAP_UNLOCK(pmap); 1869 } 1870 if ((lo & LPTE_CHG) != 0) { 1871 moea64_attr_clear(m, LPTE_CHG); 1872 vm_page_dirty(m); 1873 } 1874 vm_page_flag_clear(m, PG_WRITEABLE); 1875 vm_page_unlock_queues(); 1876} 1877 1878/* 1879 * moea64_ts_referenced: 1880 * 1881 * Return a count of reference bits for a page, clearing those bits. 1882 * It is not necessary for every reference bit to be cleared, but it 1883 * is necessary that 0 only be returned when there are truly no 1884 * reference bits set. 1885 * 1886 * XXX: The exact number of bits to check and clear is a matter that 1887 * should be tested and standardized at some point in the future for 1888 * optimal aging of shared pages. 1889 */ 1890boolean_t 1891moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1892{ 1893 1894 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1895 ("moea64_ts_referenced: page %p is not managed", m)); 1896 return (moea64_clear_bit(m, LPTE_REF)); 1897} 1898 1899/* 1900 * Map a wired page into kernel virtual address space. 1901 */ 1902void 1903moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1904{ 1905 uint64_t pte_lo; 1906 int error; 1907 1908#if 0 1909 if (!pmap_bootstrapped) { 1910 if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end) 1911 panic("Trying to enter an address in KVA -- %#" 1912 PRIxPTR "!\n",pa); 1913 } 1914#endif 1915 1916 pte_lo = moea64_calc_wimg(pa); 1917 1918 PMAP_LOCK(kernel_pmap); 1919 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1920 &moea64_pvo_kunmanaged, va, pa, pte_lo, 1921 PVO_WIRED | VM_PROT_EXECUTE); 1922 1923 if (error != 0 && error != ENOENT) 1924 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1925 pa, error); 1926 1927 /* 1928 * Flush the memory from the instruction cache. 1929 */ 1930 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1931 __syncicache((void *)va, PAGE_SIZE); 1932 } 1933 PMAP_UNLOCK(kernel_pmap); 1934} 1935 1936/* 1937 * Extract the physical page address associated with the given kernel virtual 1938 * address. 1939 */ 1940vm_offset_t 1941moea64_kextract(mmu_t mmu, vm_offset_t va) 1942{ 1943 struct pvo_entry *pvo; 1944 vm_paddr_t pa; 1945 1946 /* 1947 * Shortcut the direct-mapped case when applicable. We never put 1948 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1949 */ 1950 if (va < VM_MIN_KERNEL_ADDRESS) 1951 return (va); 1952 1953 PMAP_LOCK(kernel_pmap); 1954 pvo = moea64_pvo_find_va(kernel_pmap, va); 1955 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1956 va)); 1957 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) + (va - PVO_VADDR(pvo)); 1958 PMAP_UNLOCK(kernel_pmap); 1959 return (pa); 1960} 1961 1962/* 1963 * Remove a wired page from kernel virtual address space. 1964 */ 1965void 1966moea64_kremove(mmu_t mmu, vm_offset_t va) 1967{ 1968 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1969} 1970 1971/* 1972 * Map a range of physical addresses into kernel virtual address space. 1973 * 1974 * The value passed in *virt is a suggested virtual address for the mapping. 1975 * Architectures which can support a direct-mapped physical to virtual region 1976 * can return the appropriate address within that region, leaving '*virt' 1977 * unchanged. We cannot and therefore do not; *virt is updated with the 1978 * first usable address after the mapped region. 1979 */ 1980vm_offset_t 1981moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1982 vm_offset_t pa_end, int prot) 1983{ 1984 vm_offset_t sva, va; 1985 1986 sva = *virt; 1987 va = sva; 1988 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1989 moea64_kenter(mmu, va, pa_start); 1990 *virt = va; 1991 1992 return (sva); 1993} 1994 1995/* 1996 * Returns true if the pmap's pv is one of the first 1997 * 16 pvs linked to from this page. This count may 1998 * be changed upwards or downwards in the future; it 1999 * is only necessary that true be returned for a small 2000 * subset of pmaps for proper page aging. 2001 */ 2002boolean_t 2003moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2004{ 2005 int loops; 2006 struct pvo_entry *pvo; 2007 boolean_t rv; 2008 2009 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2010 ("moea64_page_exists_quick: page %p is not managed", m)); 2011 loops = 0; 2012 rv = FALSE; 2013 vm_page_lock_queues(); 2014 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2015 if (pvo->pvo_pmap == pmap) { 2016 rv = TRUE; 2017 break; 2018 } 2019 if (++loops >= 16) 2020 break; 2021 } 2022 vm_page_unlock_queues(); 2023 return (rv); 2024} 2025 2026/* 2027 * Return the number of managed mappings to the given physical page 2028 * that are wired. 2029 */ 2030int 2031moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 2032{ 2033 struct pvo_entry *pvo; 2034 int count; 2035 2036 count = 0; 2037 if ((m->flags & PG_FICTITIOUS) != 0) 2038 return (count); 2039 vm_page_lock_queues(); 2040 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 2041 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 2042 count++; 2043 vm_page_unlock_queues(); 2044 return (count); 2045} 2046 2047static uintptr_t moea64_vsidcontext; 2048 2049uintptr_t 2050moea64_get_unique_vsid(void) { 2051 u_int entropy; 2052 register_t hash; 2053 uint32_t mask; 2054 int i; 2055 2056 entropy = 0; 2057 __asm __volatile("mftb %0" : "=r"(entropy)); 2058 2059 mtx_lock(&moea64_slb_mutex); 2060 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 2061 u_int n; 2062 2063 /* 2064 * Create a new value by mutiplying by a prime and adding in 2065 * entropy from the timebase register. This is to make the 2066 * VSID more random so that the PT hash function collides 2067 * less often. (Note that the prime casues gcc to do shifts 2068 * instead of a multiply.) 2069 */ 2070 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 2071 hash = moea64_vsidcontext & (NVSIDS - 1); 2072 if (hash == 0) /* 0 is special, avoid it */ 2073 continue; 2074 n = hash >> 5; 2075 mask = 1 << (hash & (VSID_NBPW - 1)); 2076 hash = (moea64_vsidcontext & VSID_HASHMASK); 2077 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 2078 /* anything free in this bucket? */ 2079 if (moea64_vsid_bitmap[n] == 0xffffffff) { 2080 entropy = (moea64_vsidcontext >> 20); 2081 continue; 2082 } 2083 i = ffs(~moea64_vsid_bitmap[n]) - 1; 2084 mask = 1 << i; 2085 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 2086 hash |= i; 2087 } 2088 KASSERT(!(moea64_vsid_bitmap[n] & mask), 2089 ("Allocating in-use VSID %#zx\n", hash)); 2090 moea64_vsid_bitmap[n] |= mask; 2091 mtx_unlock(&moea64_slb_mutex); 2092 return (hash); 2093 } 2094 2095 mtx_unlock(&moea64_slb_mutex); 2096 panic("%s: out of segments",__func__); 2097} 2098 2099#ifdef __powerpc64__ 2100void 2101moea64_pinit(mmu_t mmu, pmap_t pmap) 2102{ 2103 PMAP_LOCK_INIT(pmap); 2104 2105 SPLAY_INIT(&pmap->pm_slbtree); 2106 pmap->pm_slb = slb_alloc_user_cache(); 2107} 2108#else 2109void 2110moea64_pinit(mmu_t mmu, pmap_t pmap) 2111{ 2112 int i; 2113 uint32_t hash; 2114 2115 PMAP_LOCK_INIT(pmap); 2116 2117 if (pmap_bootstrapped) 2118 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 2119 (vm_offset_t)pmap); 2120 else 2121 pmap->pmap_phys = pmap; 2122 2123 /* 2124 * Allocate some segment registers for this pmap. 2125 */ 2126 hash = moea64_get_unique_vsid(); 2127 2128 for (i = 0; i < 16; i++) 2129 pmap->pm_sr[i] = VSID_MAKE(i, hash); 2130 2131 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 2132} 2133#endif 2134 2135/* 2136 * Initialize the pmap associated with process 0. 2137 */ 2138void 2139moea64_pinit0(mmu_t mmu, pmap_t pm) 2140{ 2141 moea64_pinit(mmu, pm); 2142 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 2143} 2144 2145/* 2146 * Set the physical protection on the specified range of this map as requested. 2147 */ 2148void 2149moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 2150 vm_prot_t prot) 2151{ 2152 struct pvo_entry *pvo; 2153 struct lpte *pt; 2154 2155 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 2156 eva, prot); 2157 2158 2159 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 2160 ("moea64_protect: non current pmap")); 2161 2162 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2163 moea64_remove(mmu, pm, sva, eva); 2164 return; 2165 } 2166 2167 vm_page_lock_queues(); 2168 PMAP_LOCK(pm); 2169 for (; sva < eva; sva += PAGE_SIZE) { 2170 pvo = moea64_pvo_find_va(pm, sva); 2171 if (pvo == NULL) 2172 continue; 2173 2174 /* 2175 * Grab the PTE pointer before we diddle with the cached PTE 2176 * copy. 2177 */ 2178 LOCK_TABLE(); 2179 pt = moea64_pvo_to_pte(pvo); 2180 2181 /* 2182 * Change the protection of the page. 2183 */ 2184 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 2185 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 2186 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 2187 if ((prot & VM_PROT_EXECUTE) == 0) 2188 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 2189 2190 /* 2191 * If the PVO is in the page table, update that pte as well. 2192 */ 2193 if (pt != NULL) { 2194 moea64_pte_change(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2195 if ((pvo->pvo_pte.lpte.pte_lo & 2196 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 2197 moea64_syncicache(pm, sva, 2198 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 2199 PAGE_SIZE); 2200 } 2201 } 2202 UNLOCK_TABLE(); 2203 } 2204 vm_page_unlock_queues(); 2205 PMAP_UNLOCK(pm); 2206} 2207 2208/* 2209 * Map a list of wired pages into kernel virtual address space. This is 2210 * intended for temporary mappings which do not need page modification or 2211 * references recorded. Existing mappings in the region are overwritten. 2212 */ 2213void 2214moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2215{ 2216 while (count-- > 0) { 2217 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2218 va += PAGE_SIZE; 2219 m++; 2220 } 2221} 2222 2223/* 2224 * Remove page mappings from kernel virtual address space. Intended for 2225 * temporary mappings entered by moea64_qenter. 2226 */ 2227void 2228moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2229{ 2230 while (count-- > 0) { 2231 moea64_kremove(mmu, va); 2232 va += PAGE_SIZE; 2233 } 2234} 2235 2236void 2237moea64_release_vsid(uint64_t vsid) 2238{ 2239 int idx, mask; 2240 2241 mtx_lock(&moea64_slb_mutex); 2242 idx = vsid & (NVSIDS-1); 2243 mask = 1 << (idx % VSID_NBPW); 2244 idx /= VSID_NBPW; 2245 KASSERT(moea64_vsid_bitmap[idx] & mask, 2246 ("Freeing unallocated VSID %#jx", vsid)); 2247 moea64_vsid_bitmap[idx] &= ~mask; 2248 mtx_unlock(&moea64_slb_mutex); 2249} 2250 2251 2252void 2253moea64_release(mmu_t mmu, pmap_t pmap) 2254{ 2255 2256 /* 2257 * Free segment registers' VSIDs 2258 */ 2259 #ifdef __powerpc64__ 2260 free_vsids(pmap); 2261 slb_free_user_cache(pmap->pm_slb); 2262 #else 2263 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2264 2265 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2266 #endif 2267 2268 PMAP_LOCK_DESTROY(pmap); 2269} 2270 2271/* 2272 * Remove the given range of addresses from the specified map. 2273 */ 2274void 2275moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2276{ 2277 struct pvo_entry *pvo; 2278 2279 vm_page_lock_queues(); 2280 PMAP_LOCK(pm); 2281 for (; sva < eva; sva += PAGE_SIZE) { 2282 pvo = moea64_pvo_find_va(pm, sva); 2283 if (pvo != NULL) 2284 moea64_pvo_remove(pvo); 2285 } 2286 vm_page_unlock_queues(); 2287 PMAP_UNLOCK(pm); 2288} 2289 2290/* 2291 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2292 * will reflect changes in pte's back to the vm_page. 2293 */ 2294void 2295moea64_remove_all(mmu_t mmu, vm_page_t m) 2296{ 2297 struct pvo_head *pvo_head; 2298 struct pvo_entry *pvo, *next_pvo; 2299 pmap_t pmap; 2300 2301 vm_page_lock_queues(); 2302 pvo_head = vm_page_to_pvoh(m); 2303 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2304 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2305 2306 MOEA_PVO_CHECK(pvo); /* sanity check */ 2307 pmap = pvo->pvo_pmap; 2308 PMAP_LOCK(pmap); 2309 moea64_pvo_remove(pvo); 2310 PMAP_UNLOCK(pmap); 2311 } 2312 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 2313 moea64_attr_clear(m, LPTE_CHG); 2314 vm_page_dirty(m); 2315 } 2316 vm_page_flag_clear(m, PG_WRITEABLE); 2317 vm_page_unlock_queues(); 2318} 2319 2320/* 2321 * Allocate a physical page of memory directly from the phys_avail map. 2322 * Can only be called from moea64_bootstrap before avail start and end are 2323 * calculated. 2324 */ 2325static vm_offset_t 2326moea64_bootstrap_alloc(vm_size_t size, u_int align) 2327{ 2328 vm_offset_t s, e; 2329 int i, j; 2330 2331 size = round_page(size); 2332 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2333 if (align != 0) 2334 s = (phys_avail[i] + align - 1) & ~(align - 1); 2335 else 2336 s = phys_avail[i]; 2337 e = s + size; 2338 2339 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2340 continue; 2341 2342 if (s == phys_avail[i]) { 2343 phys_avail[i] += size; 2344 } else if (e == phys_avail[i + 1]) { 2345 phys_avail[i + 1] -= size; 2346 } else { 2347 for (j = phys_avail_count * 2; j > i; j -= 2) { 2348 phys_avail[j] = phys_avail[j - 2]; 2349 phys_avail[j + 1] = phys_avail[j - 1]; 2350 } 2351 2352 phys_avail[i + 3] = phys_avail[i + 1]; 2353 phys_avail[i + 1] = s; 2354 phys_avail[i + 2] = e; 2355 phys_avail_count++; 2356 } 2357 2358 return (s); 2359 } 2360 panic("moea64_bootstrap_alloc: could not allocate memory"); 2361} 2362 2363static void 2364tlbia(void) 2365{ 2366 vm_offset_t i; 2367 #ifndef __powerpc64__ 2368 register_t msr, scratch; 2369 #endif 2370 2371 TLBSYNC(); 2372 2373 for (i = 0; i < 0xFF000; i += 0x00001000) { 2374 #ifdef __powerpc64__ 2375 __asm __volatile("tlbiel %0" :: "r"(i)); 2376 #else 2377 __asm __volatile("\ 2378 mfmsr %0; \ 2379 mr %1, %0; \ 2380 insrdi %1,%3,1,0; \ 2381 mtmsrd %1; \ 2382 isync; \ 2383 \ 2384 tlbiel %2; \ 2385 \ 2386 mtmsrd %0; \ 2387 isync;" 2388 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 2389 #endif 2390 } 2391 2392 EIEIO(); 2393 TLBSYNC(); 2394} 2395 2396#ifdef __powerpc64__ 2397static void 2398slbia(void) 2399{ 2400 register_t seg0; 2401 2402 __asm __volatile ("slbia"); 2403 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0)); 2404} 2405#endif 2406 2407static int 2408moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 2409 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 2410{ 2411 struct pvo_entry *pvo; 2412 uint64_t vsid; 2413 int first; 2414 u_int ptegidx; 2415 int i; 2416 int bootstrap; 2417 2418 /* 2419 * One nasty thing that can happen here is that the UMA calls to 2420 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2421 * which calls UMA... 2422 * 2423 * We break the loop by detecting recursion and allocating out of 2424 * the bootstrap pool. 2425 */ 2426 2427 moea64_pvo_enter_calls++; 2428 first = 0; 2429 bootstrap = (flags & PVO_BOOTSTRAP); 2430 2431 if (!moea64_initialized) 2432 bootstrap = 1; 2433 2434 /* 2435 * Compute the PTE Group index. 2436 */ 2437 va &= ~ADDR_POFF; 2438 vsid = va_to_vsid(pm, va); 2439 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2440 2441 /* 2442 * Remove any existing mapping for this page. Reuse the pvo entry if 2443 * there is a mapping. 2444 */ 2445 LOCK_TABLE(); 2446 2447 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2448 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2449 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2450 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2451 (pte_lo & LPTE_PP)) { 2452 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2453 /* Re-insert if spilled */ 2454 i = moea64_pte_insert(ptegidx, 2455 &pvo->pvo_pte.lpte); 2456 if (i >= 0) 2457 PVO_PTEGIDX_SET(pvo, i); 2458 moea64_pte_overflow--; 2459 } 2460 UNLOCK_TABLE(); 2461 return (0); 2462 } 2463 moea64_pvo_remove(pvo); 2464 break; 2465 } 2466 } 2467 2468 /* 2469 * If we aren't overwriting a mapping, try to allocate. 2470 */ 2471 if (bootstrap) { 2472 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2473 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2474 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2475 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2476 } 2477 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2478 moea64_bpvo_pool_index++; 2479 bootstrap = 1; 2480 } else { 2481 /* 2482 * Note: drop the table lock around the UMA allocation in 2483 * case the UMA allocator needs to manipulate the page 2484 * table. The mapping we are working with is already 2485 * protected by the PMAP lock. 2486 */ 2487 UNLOCK_TABLE(); 2488 pvo = uma_zalloc(zone, M_NOWAIT); 2489 LOCK_TABLE(); 2490 } 2491 2492 if (pvo == NULL) { 2493 UNLOCK_TABLE(); 2494 return (ENOMEM); 2495 } 2496 2497 moea64_pvo_entries++; 2498 pvo->pvo_vaddr = va; 2499 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2500 | (vsid << 16); 2501 pvo->pvo_pmap = pm; 2502 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2503 pvo->pvo_vaddr &= ~ADDR_POFF; 2504 2505 if (!(flags & VM_PROT_EXECUTE)) 2506 pte_lo |= LPTE_NOEXEC; 2507 if (flags & PVO_WIRED) 2508 pvo->pvo_vaddr |= PVO_WIRED; 2509 if (pvo_head != &moea64_pvo_kunmanaged) 2510 pvo->pvo_vaddr |= PVO_MANAGED; 2511 if (bootstrap) 2512 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2513 if (flags & PVO_FAKE) 2514 pvo->pvo_vaddr |= PVO_FAKE; 2515 if (flags & PVO_LARGE) 2516 pvo->pvo_vaddr |= PVO_LARGE; 2517 2518 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2519 (uint64_t)(pa) | pte_lo, flags); 2520 2521 /* 2522 * Remember if the list was empty and therefore will be the first 2523 * item. 2524 */ 2525 if (LIST_FIRST(pvo_head) == NULL) 2526 first = 1; 2527 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2528 2529 if (pvo->pvo_vaddr & PVO_WIRED) { 2530 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2531 pm->pm_stats.wired_count++; 2532 } 2533 pm->pm_stats.resident_count++; 2534 2535 /* 2536 * We hope this succeeds but it isn't required. 2537 */ 2538 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2539 if (i >= 0) { 2540 PVO_PTEGIDX_SET(pvo, i); 2541 } else { 2542 panic("moea64_pvo_enter: overflow"); 2543 moea64_pte_overflow++; 2544 } 2545 2546 if (pm == kernel_pmap) 2547 isync(); 2548 2549 UNLOCK_TABLE(); 2550 2551#ifdef __powerpc64__ 2552 /* 2553 * Make sure all our bootstrap mappings are in the SLB as soon 2554 * as virtual memory is switched on. 2555 */ 2556 if (!pmap_bootstrapped) 2557 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2558#endif 2559 2560 return (first ? ENOENT : 0); 2561} 2562 2563static void 2564moea64_pvo_remove(struct pvo_entry *pvo) 2565{ 2566 struct lpte *pt; 2567 2568 /* 2569 * If there is an active pte entry, we need to deactivate it (and 2570 * save the ref & cfg bits). 2571 */ 2572 LOCK_TABLE(); 2573 pt = moea64_pvo_to_pte(pvo); 2574 if (pt != NULL) { 2575 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2576 PVO_PTEGIDX_CLR(pvo); 2577 } else { 2578 moea64_pte_overflow--; 2579 } 2580 2581 /* 2582 * Update our statistics. 2583 */ 2584 pvo->pvo_pmap->pm_stats.resident_count--; 2585 if (pvo->pvo_vaddr & PVO_WIRED) 2586 pvo->pvo_pmap->pm_stats.wired_count--; 2587 2588 /* 2589 * Save the REF/CHG bits into their cache if the page is managed. 2590 */ 2591 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2592 struct vm_page *pg; 2593 2594 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2595 if (pg != NULL) { 2596 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2597 (LPTE_REF | LPTE_CHG)); 2598 } 2599 } 2600 2601 /* 2602 * Remove this PVO from the PV list. 2603 */ 2604 LIST_REMOVE(pvo, pvo_vlink); 2605 2606 /* 2607 * Remove this from the overflow list and return it to the pool 2608 * if we aren't going to reuse it. 2609 */ 2610 LIST_REMOVE(pvo, pvo_olink); 2611 UNLOCK_TABLE(); 2612 2613 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2614 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2615 moea64_upvo_zone, pvo); 2616 2617 moea64_pvo_entries--; 2618 moea64_pvo_remove_calls++; 2619} 2620 2621static struct pvo_entry * 2622moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2623{ 2624 struct pvo_entry *pvo; 2625 int ptegidx; 2626 uint64_t vsid; 2627 #ifdef __powerpc64__ 2628 struct slb slb; 2629 2630 /* The page is not mapped if the segment isn't */ 2631 if (va_to_slb_entry(pm, va, &slb) != 0) 2632 return NULL; 2633 2634 vsid = (slb.slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 2635 if (slb.slbv & SLBV_L) 2636 va &= ~moea64_large_page_mask; 2637 else 2638 va &= ~ADDR_POFF; 2639 ptegidx = va_to_pteg(vsid, va, slb.slbv & SLBV_L); 2640 #else 2641 va &= ~ADDR_POFF; 2642 vsid = va_to_vsid(pm, va); 2643 ptegidx = va_to_pteg(vsid, va, 0); 2644 #endif 2645 2646 LOCK_TABLE(); 2647 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2648 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) 2649 break; 2650 } 2651 UNLOCK_TABLE(); 2652 2653 return (pvo); 2654} 2655 2656static struct lpte * 2657moea64_pvo_to_pte(const struct pvo_entry *pvo) 2658{ 2659 struct lpte *pt; 2660 int pteidx, ptegidx; 2661 uint64_t vsid; 2662 2663 ASSERT_TABLE_LOCK(); 2664 2665 /* If the PTEG index is not set, then there is no page table entry */ 2666 if (!PVO_PTEGIDX_ISSET(pvo)) 2667 return (NULL); 2668 2669 /* 2670 * Calculate the ptegidx 2671 */ 2672 vsid = PVO_VSID(pvo); 2673 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 2674 pvo->pvo_vaddr & PVO_LARGE); 2675 2676 /* 2677 * We can find the actual pte entry without searching by grabbing 2678 * the PTEG index from 3 unused bits in pvo_vaddr and by 2679 * noticing the HID bit. 2680 */ 2681 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2682 ptegidx ^= moea64_pteg_mask; 2683 2684 pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo); 2685 2686 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2687 !PVO_PTEGIDX_ISSET(pvo)) { 2688 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2689 "valid pte index", pvo); 2690 } 2691 2692 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2693 PVO_PTEGIDX_ISSET(pvo)) { 2694 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2695 "pvo but no valid pte", pvo); 2696 } 2697 2698 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2699 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2700 LPTE_VALID) { 2701 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2702 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2703 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2704 } 2705 2706 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2707 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) { 2708 panic("moea64_pvo_to_pte: pvo %p pte does not match " 2709 "pte %p in moea64_pteg_table difference is %#x", 2710 pvo, pt, 2711 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2712 } 2713 2714 return (pt); 2715 } 2716 2717 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2718 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2719 "moea64_pteg_table but valid in pvo", pvo, pt); 2720 } 2721 2722 return (NULL); 2723} 2724 2725static __inline int 2726moea64_pte_spillable_ident(u_int ptegidx) 2727{ 2728 struct lpte *pt; 2729 int i, j, k; 2730 2731 /* Start at a random slot */ 2732 i = mftb() % 8; 2733 k = -1; 2734 for (j = 0; j < 8; j++) { 2735 pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8]; 2736 if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED)) 2737 continue; 2738 2739 /* This is a candidate, so remember it */ 2740 k = (i + j) % 8; 2741 2742 /* Try to get a page that has not been used lately */ 2743 if (!(pt->pte_lo & LPTE_REF)) 2744 return (k); 2745 } 2746 2747 return (k); 2748} 2749 2750static int 2751moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2752{ 2753 struct lpte *pt; 2754 struct pvo_entry *pvo; 2755 u_int pteg_bktidx; 2756 int i; 2757 2758 ASSERT_TABLE_LOCK(); 2759 2760 /* 2761 * First try primary hash. 2762 */ 2763 pteg_bktidx = ptegidx; 2764 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 2765 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 2766 pvo_pt->pte_hi &= ~LPTE_HID; 2767 moea64_pte_set(pt, pvo_pt); 2768 return (i); 2769 } 2770 } 2771 2772 /* 2773 * Now try secondary hash. 2774 */ 2775 pteg_bktidx ^= moea64_pteg_mask; 2776 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 2777 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 2778 pvo_pt->pte_hi |= LPTE_HID; 2779 moea64_pte_set(pt, pvo_pt); 2780 return (i); 2781 } 2782 } 2783 2784 /* 2785 * Out of luck. Find a PTE to sacrifice. 2786 */ 2787 pteg_bktidx = ptegidx; 2788 i = moea64_pte_spillable_ident(pteg_bktidx); 2789 if (i < 0) { 2790 pteg_bktidx ^= moea64_pteg_mask; 2791 i = moea64_pte_spillable_ident(pteg_bktidx); 2792 } 2793 2794 if (i < 0) { 2795 /* No freeable slots in either PTEG? We're hosed. */ 2796 panic("moea64_pte_insert: overflow"); 2797 return (-1); 2798 } 2799 2800 if (pteg_bktidx == ptegidx) 2801 pvo_pt->pte_hi &= ~LPTE_HID; 2802 else 2803 pvo_pt->pte_hi |= LPTE_HID; 2804 2805 /* 2806 * Synchronize the sacrifice PTE with its PVO, then mark both 2807 * invalid. The PVO will be reused when/if the VM system comes 2808 * here after a fault. 2809 */ 2810 pt = &moea64_pteg_table[pteg_bktidx].pt[i]; 2811 2812 if (pt->pte_hi & LPTE_HID) 2813 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */ 2814 2815 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) { 2816 if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) { 2817 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID, 2818 ("Invalid PVO for valid PTE!")); 2819 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2820 PVO_PTEGIDX_CLR(pvo); 2821 moea64_pte_overflow++; 2822 break; 2823 } 2824 } 2825 2826 KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi, 2827 ("Unable to find PVO for spilled PTE")); 2828 2829 /* 2830 * Set the new PTE. 2831 */ 2832 moea64_pte_set(pt, pvo_pt); 2833 2834 return (i); 2835} 2836 2837static boolean_t 2838moea64_query_bit(vm_page_t m, u_int64_t ptebit) 2839{ 2840 struct pvo_entry *pvo; 2841 struct lpte *pt; 2842 2843 if (moea64_attr_fetch(m) & ptebit) 2844 return (TRUE); 2845 2846 vm_page_lock_queues(); 2847 2848 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2849 MOEA_PVO_CHECK(pvo); /* sanity check */ 2850 2851 /* 2852 * See if we saved the bit off. If so, cache it and return 2853 * success. 2854 */ 2855 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2856 moea64_attr_save(m, ptebit); 2857 MOEA_PVO_CHECK(pvo); /* sanity check */ 2858 vm_page_unlock_queues(); 2859 return (TRUE); 2860 } 2861 } 2862 2863 /* 2864 * No luck, now go through the hard part of looking at the PTEs 2865 * themselves. Sync so that any pending REF/CHG bits are flushed to 2866 * the PTEs. 2867 */ 2868 SYNC(); 2869 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2870 MOEA_PVO_CHECK(pvo); /* sanity check */ 2871 2872 /* 2873 * See if this pvo has a valid PTE. if so, fetch the 2874 * REF/CHG bits from the valid PTE. If the appropriate 2875 * ptebit is set, cache it and return success. 2876 */ 2877 LOCK_TABLE(); 2878 pt = moea64_pvo_to_pte(pvo); 2879 if (pt != NULL) { 2880 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2881 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2882 UNLOCK_TABLE(); 2883 2884 moea64_attr_save(m, ptebit); 2885 MOEA_PVO_CHECK(pvo); /* sanity check */ 2886 vm_page_unlock_queues(); 2887 return (TRUE); 2888 } 2889 } 2890 UNLOCK_TABLE(); 2891 } 2892 2893 vm_page_unlock_queues(); 2894 return (FALSE); 2895} 2896 2897static u_int 2898moea64_clear_bit(vm_page_t m, u_int64_t ptebit) 2899{ 2900 u_int count; 2901 struct pvo_entry *pvo; 2902 struct lpte *pt; 2903 2904 vm_page_lock_queues(); 2905 2906 /* 2907 * Clear the cached value. 2908 */ 2909 moea64_attr_clear(m, ptebit); 2910 2911 /* 2912 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2913 * we can reset the right ones). note that since the pvo entries and 2914 * list heads are accessed via BAT0 and are never placed in the page 2915 * table, we don't have to worry about further accesses setting the 2916 * REF/CHG bits. 2917 */ 2918 SYNC(); 2919 2920 /* 2921 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2922 * valid pte clear the ptebit from the valid pte. 2923 */ 2924 count = 0; 2925 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2926 MOEA_PVO_CHECK(pvo); /* sanity check */ 2927 2928 LOCK_TABLE(); 2929 pt = moea64_pvo_to_pte(pvo); 2930 if (pt != NULL) { 2931 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2932 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2933 count++; 2934 moea64_pte_clear(pt, pvo->pvo_vpn, ptebit); 2935 } 2936 } 2937 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2938 MOEA_PVO_CHECK(pvo); /* sanity check */ 2939 UNLOCK_TABLE(); 2940 } 2941 2942 vm_page_unlock_queues(); 2943 return (count); 2944} 2945 2946boolean_t 2947moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2948{ 2949 struct pvo_entry *pvo; 2950 vm_offset_t ppa; 2951 int error = 0; 2952 2953 PMAP_LOCK(kernel_pmap); 2954 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2955 pvo = moea64_pvo_find_va(kernel_pmap, ppa); 2956 if (pvo == NULL || 2957 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2958 error = EFAULT; 2959 break; 2960 } 2961 } 2962 PMAP_UNLOCK(kernel_pmap); 2963 2964 return (error); 2965} 2966 2967/* 2968 * Map a set of physical memory pages into the kernel virtual 2969 * address space. Return a pointer to where it is mapped. This 2970 * routine is intended to be used for mapping device memory, 2971 * NOT real memory. 2972 */ 2973void * 2974moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2975{ 2976 vm_offset_t va, tmpva, ppa, offset; 2977 2978 ppa = trunc_page(pa); 2979 offset = pa & PAGE_MASK; 2980 size = roundup(offset + size, PAGE_SIZE); 2981 2982 va = kmem_alloc_nofault(kernel_map, size); 2983 2984 if (!va) 2985 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2986 2987 for (tmpva = va; size > 0;) { 2988 moea64_kenter(mmu, tmpva, ppa); 2989 size -= PAGE_SIZE; 2990 tmpva += PAGE_SIZE; 2991 ppa += PAGE_SIZE; 2992 } 2993 2994 return ((void *)(va + offset)); 2995} 2996 2997void 2998moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2999{ 3000 vm_offset_t base, offset; 3001 3002 base = trunc_page(va); 3003 offset = va & PAGE_MASK; 3004 size = roundup(offset + size, PAGE_SIZE); 3005 3006 kmem_free(kernel_map, base, size); 3007} 3008 3009static void 3010moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 3011{ 3012 struct pvo_entry *pvo; 3013 vm_offset_t lim; 3014 vm_paddr_t pa; 3015 vm_size_t len; 3016 3017 PMAP_LOCK(pm); 3018 while (sz > 0) { 3019 lim = round_page(va); 3020 len = MIN(lim - va, sz); 3021 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 3022 if (pvo != NULL) { 3023 pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) | 3024 (va & ADDR_POFF); 3025 moea64_syncicache(pm, va, pa, len); 3026 } 3027 va += len; 3028 sz -= len; 3029 } 3030 PMAP_UNLOCK(pm); 3031} 3032