mmu_oea64.c revision 263687
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29/*- 30 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 31 * Copyright (C) 1995, 1996 TooLs GmbH. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. All advertising materials mentioning features or use of this software 43 * must display the following acknowledgement: 44 * This product includes software developed by TooLs GmbH. 45 * 4. The name of TooLs GmbH may not be used to endorse or promote products 46 * derived from this software without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 60 */ 61/*- 62 * Copyright (C) 2001 Benno Rice. 63 * All rights reserved. 64 * 65 * Redistribution and use in source and binary forms, with or without 66 * modification, are permitted provided that the following conditions 67 * are met: 68 * 1. Redistributions of source code must retain the above copyright 69 * notice, this list of conditions and the following disclaimer. 70 * 2. Redistributions in binary form must reproduce the above copyright 71 * notice, this list of conditions and the following disclaimer in the 72 * documentation and/or other materials provided with the distribution. 73 * 74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 84 */ 85 86#include <sys/cdefs.h> 87__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/mmu_oea64.c 263687 2014-03-24 13:48:04Z emaste $"); 88 89/* 90 * Manages physical address maps. 91 * 92 * Since the information managed by this module is also stored by the 93 * logical address mapping module, this module may throw away valid virtual 94 * to physical mappings at almost any time. However, invalidations of 95 * mappings must be done as requested. 96 * 97 * In order to cope with hardware architectures which make virtual to 98 * physical map invalidates expensive, this module may delay invalidate 99 * reduced protection operations until such time as they are actually 100 * necessary. This module is given full information as to which processors 101 * are currently using which maps, and to when physical maps must be made 102 * correct. 103 */ 104 105#include "opt_compat.h" 106#include "opt_kstack_pages.h" 107 108#include <sys/param.h> 109#include <sys/kernel.h> 110#include <sys/queue.h> 111#include <sys/cpuset.h> 112#include <sys/ktr.h> 113#include <sys/lock.h> 114#include <sys/msgbuf.h> 115#include <sys/malloc.h> 116#include <sys/mutex.h> 117#include <sys/proc.h> 118#include <sys/rwlock.h> 119#include <sys/sched.h> 120#include <sys/sysctl.h> 121#include <sys/systm.h> 122#include <sys/vmmeter.h> 123 124#include <sys/kdb.h> 125 126#include <dev/ofw/openfirm.h> 127 128#include <vm/vm.h> 129#include <vm/vm_param.h> 130#include <vm/vm_kern.h> 131#include <vm/vm_page.h> 132#include <vm/vm_map.h> 133#include <vm/vm_object.h> 134#include <vm/vm_extern.h> 135#include <vm/vm_pageout.h> 136#include <vm/uma.h> 137 138#include <machine/_inttypes.h> 139#include <machine/cpu.h> 140#include <machine/platform.h> 141#include <machine/frame.h> 142#include <machine/md_var.h> 143#include <machine/psl.h> 144#include <machine/bat.h> 145#include <machine/hid.h> 146#include <machine/pte.h> 147#include <machine/sr.h> 148#include <machine/trap.h> 149#include <machine/mmuvar.h> 150 151#include "mmu_oea64.h" 152#include "mmu_if.h" 153#include "moea64_if.h" 154 155void moea64_release_vsid(uint64_t vsid); 156uintptr_t moea64_get_unique_vsid(void); 157 158#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 159#define ENABLE_TRANS(msr) mtmsr(msr) 160 161#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 162#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 163#define VSID_HASH_MASK 0x0000007fffffffffULL 164 165/* 166 * Locking semantics: 167 * -- Read lock: if no modifications are being made to either the PVO lists 168 * or page table or if any modifications being made result in internal 169 * changes (e.g. wiring, protection) such that the existence of the PVOs 170 * is unchanged and they remain associated with the same pmap (in which 171 * case the changes should be protected by the pmap lock) 172 * -- Write lock: required if PTEs/PVOs are being inserted or removed. 173 */ 174 175#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock) 176#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock) 177#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock) 178#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock) 179 180struct ofw_map { 181 cell_t om_va; 182 cell_t om_len; 183 cell_t om_pa_hi; 184 cell_t om_pa_lo; 185 cell_t om_mode; 186}; 187 188extern unsigned char _etext[]; 189extern unsigned char _end[]; 190 191extern int dumpsys_minidump; 192 193/* 194 * Map of physical memory regions. 195 */ 196static struct mem_region *regions; 197static struct mem_region *pregions; 198static u_int phys_avail_count; 199static int regions_sz, pregions_sz; 200 201extern void bs_remap_earlyboot(void); 202 203/* 204 * Lock for the pteg and pvo tables. 205 */ 206struct rwlock moea64_table_lock; 207struct mtx moea64_slb_mutex; 208 209/* 210 * PTEG data. 211 */ 212u_int moea64_pteg_count; 213u_int moea64_pteg_mask; 214 215/* 216 * PVO data. 217 */ 218struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 219 220uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 221uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 222 223#define BPVO_POOL_SIZE 327680 224static struct pvo_entry *moea64_bpvo_pool; 225static int moea64_bpvo_pool_index = 0; 226 227#define VSID_NBPW (sizeof(u_int32_t) * 8) 228#ifdef __powerpc64__ 229#define NVSIDS (NPMAPS * 16) 230#define VSID_HASHMASK 0xffffffffUL 231#else 232#define NVSIDS NPMAPS 233#define VSID_HASHMASK 0xfffffUL 234#endif 235static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 236 237static boolean_t moea64_initialized = FALSE; 238 239/* 240 * Statistics. 241 */ 242u_int moea64_pte_valid = 0; 243u_int moea64_pte_overflow = 0; 244u_int moea64_pvo_entries = 0; 245u_int moea64_pvo_enter_calls = 0; 246u_int moea64_pvo_remove_calls = 0; 247SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 248 &moea64_pte_valid, 0, ""); 249SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 250 &moea64_pte_overflow, 0, ""); 251SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 252 &moea64_pvo_entries, 0, ""); 253SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 254 &moea64_pvo_enter_calls, 0, ""); 255SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 256 &moea64_pvo_remove_calls, 0, ""); 257 258vm_offset_t moea64_scratchpage_va[2]; 259struct pvo_entry *moea64_scratchpage_pvo[2]; 260uintptr_t moea64_scratchpage_pte[2]; 261struct mtx moea64_scratchpage_mtx; 262 263uint64_t moea64_large_page_mask = 0; 264uint64_t moea64_large_page_size = 0; 265int moea64_large_page_shift = 0; 266 267/* 268 * PVO calls. 269 */ 270static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 271 vm_offset_t, vm_offset_t, uint64_t, int); 272static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 273static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 274 275/* 276 * Utility routines. 277 */ 278static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 279static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 280static void moea64_kremove(mmu_t, vm_offset_t); 281static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 282 vm_offset_t pa, vm_size_t sz); 283 284/* 285 * Kernel MMU interface 286 */ 287void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 288void moea64_clear_modify(mmu_t, vm_page_t); 289void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 290void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 291 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 292void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 293void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 294 vm_prot_t); 295void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 296vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 297vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 298void moea64_init(mmu_t); 299boolean_t moea64_is_modified(mmu_t, vm_page_t); 300boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 301boolean_t moea64_is_referenced(mmu_t, vm_page_t); 302int moea64_ts_referenced(mmu_t, vm_page_t); 303vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 304boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 305int moea64_page_wired_mappings(mmu_t, vm_page_t); 306void moea64_pinit(mmu_t, pmap_t); 307void moea64_pinit0(mmu_t, pmap_t); 308void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 309void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 310void moea64_qremove(mmu_t, vm_offset_t, int); 311void moea64_release(mmu_t, pmap_t); 312void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 313void moea64_remove_pages(mmu_t, pmap_t); 314void moea64_remove_all(mmu_t, vm_page_t); 315void moea64_remove_write(mmu_t, vm_page_t); 316void moea64_zero_page(mmu_t, vm_page_t); 317void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 318void moea64_zero_page_idle(mmu_t, vm_page_t); 319void moea64_activate(mmu_t, struct thread *); 320void moea64_deactivate(mmu_t, struct thread *); 321void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); 322void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 323void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 324vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); 325void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 326void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 327void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); 328boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 329static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 330vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 331 vm_size_t *sz); 332struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev); 333 334static mmu_method_t moea64_methods[] = { 335 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 336 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 337 MMUMETHOD(mmu_copy_page, moea64_copy_page), 338 MMUMETHOD(mmu_copy_pages, moea64_copy_pages), 339 MMUMETHOD(mmu_enter, moea64_enter), 340 MMUMETHOD(mmu_enter_object, moea64_enter_object), 341 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 342 MMUMETHOD(mmu_extract, moea64_extract), 343 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 344 MMUMETHOD(mmu_init, moea64_init), 345 MMUMETHOD(mmu_is_modified, moea64_is_modified), 346 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 347 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 348 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 349 MMUMETHOD(mmu_map, moea64_map), 350 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 351 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 352 MMUMETHOD(mmu_pinit, moea64_pinit), 353 MMUMETHOD(mmu_pinit0, moea64_pinit0), 354 MMUMETHOD(mmu_protect, moea64_protect), 355 MMUMETHOD(mmu_qenter, moea64_qenter), 356 MMUMETHOD(mmu_qremove, moea64_qremove), 357 MMUMETHOD(mmu_release, moea64_release), 358 MMUMETHOD(mmu_remove, moea64_remove), 359 MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 360 MMUMETHOD(mmu_remove_all, moea64_remove_all), 361 MMUMETHOD(mmu_remove_write, moea64_remove_write), 362 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 363 MMUMETHOD(mmu_zero_page, moea64_zero_page), 364 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 365 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 366 MMUMETHOD(mmu_activate, moea64_activate), 367 MMUMETHOD(mmu_deactivate, moea64_deactivate), 368 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 369 370 /* Internal interfaces */ 371 MMUMETHOD(mmu_mapdev, moea64_mapdev), 372 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 373 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 374 MMUMETHOD(mmu_kextract, moea64_kextract), 375 MMUMETHOD(mmu_kenter, moea64_kenter), 376 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 377 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 378 MMUMETHOD(mmu_scan_md, moea64_scan_md), 379 MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), 380 381 { 0, 0 } 382}; 383 384MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 385 386static __inline u_int 387va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 388{ 389 uint64_t hash; 390 int shift; 391 392 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 393 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 394 shift); 395 return (hash & moea64_pteg_mask); 396} 397 398static __inline struct pvo_head * 399vm_page_to_pvoh(vm_page_t m) 400{ 401 402 return (&m->md.mdpg_pvoh); 403} 404 405static __inline void 406moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 407 uint64_t pte_lo, int flags) 408{ 409 410 /* 411 * Construct a PTE. Default to IMB initially. Valid bit only gets 412 * set when the real pte is set in memory. 413 * 414 * Note: Don't set the valid bit for correct operation of tlb update. 415 */ 416 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 417 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 418 419 if (flags & PVO_LARGE) 420 pt->pte_hi |= LPTE_BIG; 421 422 pt->pte_lo = pte_lo; 423} 424 425static __inline uint64_t 426moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 427{ 428 uint64_t pte_lo; 429 int i; 430 431 if (ma != VM_MEMATTR_DEFAULT) { 432 switch (ma) { 433 case VM_MEMATTR_UNCACHEABLE: 434 return (LPTE_I | LPTE_G); 435 case VM_MEMATTR_WRITE_COMBINING: 436 case VM_MEMATTR_WRITE_BACK: 437 case VM_MEMATTR_PREFETCHABLE: 438 return (LPTE_I); 439 case VM_MEMATTR_WRITE_THROUGH: 440 return (LPTE_W | LPTE_M); 441 } 442 } 443 444 /* 445 * Assume the page is cache inhibited and access is guarded unless 446 * it's in our available memory array. 447 */ 448 pte_lo = LPTE_I | LPTE_G; 449 for (i = 0; i < pregions_sz; i++) { 450 if ((pa >= pregions[i].mr_start) && 451 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 452 pte_lo &= ~(LPTE_I | LPTE_G); 453 pte_lo |= LPTE_M; 454 break; 455 } 456 } 457 458 return pte_lo; 459} 460 461/* 462 * Quick sort callout for comparing memory regions. 463 */ 464static int om_cmp(const void *a, const void *b); 465 466static int 467om_cmp(const void *a, const void *b) 468{ 469 const struct ofw_map *mapa; 470 const struct ofw_map *mapb; 471 472 mapa = a; 473 mapb = b; 474 if (mapa->om_pa_hi < mapb->om_pa_hi) 475 return (-1); 476 else if (mapa->om_pa_hi > mapb->om_pa_hi) 477 return (1); 478 else if (mapa->om_pa_lo < mapb->om_pa_lo) 479 return (-1); 480 else if (mapa->om_pa_lo > mapb->om_pa_lo) 481 return (1); 482 else 483 return (0); 484} 485 486static void 487moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 488{ 489 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 490 register_t msr; 491 vm_offset_t off; 492 vm_paddr_t pa_base; 493 int i; 494 495 bzero(translations, sz); 496 if (OF_getprop(mmu, "translations", translations, sz) == -1) 497 panic("moea64_bootstrap: can't get ofw translations"); 498 499 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 500 sz /= sizeof(*translations); 501 qsort(translations, sz, sizeof (*translations), om_cmp); 502 503 for (i = 0; i < sz; i++) { 504 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 505 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 506 translations[i].om_len); 507 508 if (translations[i].om_pa_lo % PAGE_SIZE) 509 panic("OFW translation not page-aligned!"); 510 511 pa_base = translations[i].om_pa_lo; 512 513 #ifdef __powerpc64__ 514 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 515 #else 516 if (translations[i].om_pa_hi) 517 panic("OFW translations above 32-bit boundary!"); 518 #endif 519 520 /* Now enter the pages for this mapping */ 521 522 DISABLE_TRANS(msr); 523 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 524 if (moea64_pvo_find_va(kernel_pmap, 525 translations[i].om_va + off) != NULL) 526 continue; 527 528 moea64_kenter(mmup, translations[i].om_va + off, 529 pa_base + off); 530 } 531 ENABLE_TRANS(msr); 532 } 533} 534 535#ifdef __powerpc64__ 536static void 537moea64_probe_large_page(void) 538{ 539 uint16_t pvr = mfpvr() >> 16; 540 541 switch (pvr) { 542 case IBM970: 543 case IBM970FX: 544 case IBM970MP: 545 powerpc_sync(); isync(); 546 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 547 powerpc_sync(); isync(); 548 549 /* FALLTHROUGH */ 550 default: 551 moea64_large_page_size = 0x1000000; /* 16 MB */ 552 moea64_large_page_shift = 24; 553 } 554 555 moea64_large_page_mask = moea64_large_page_size - 1; 556} 557 558static void 559moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 560{ 561 struct slb *cache; 562 struct slb entry; 563 uint64_t esid, slbe; 564 uint64_t i; 565 566 cache = PCPU_GET(slb); 567 esid = va >> ADDR_SR_SHFT; 568 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 569 570 for (i = 0; i < 64; i++) { 571 if (cache[i].slbe == (slbe | i)) 572 return; 573 } 574 575 entry.slbe = slbe; 576 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 577 if (large) 578 entry.slbv |= SLBV_L; 579 580 slb_insert_kernel(entry.slbe, entry.slbv); 581} 582#endif 583 584static void 585moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 586 vm_offset_t kernelend) 587{ 588 register_t msr; 589 vm_paddr_t pa; 590 vm_offset_t size, off; 591 uint64_t pte_lo; 592 int i; 593 594 if (moea64_large_page_size == 0) 595 hw_direct_map = 0; 596 597 DISABLE_TRANS(msr); 598 if (hw_direct_map) { 599 LOCK_TABLE_WR(); 600 PMAP_LOCK(kernel_pmap); 601 for (i = 0; i < pregions_sz; i++) { 602 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 603 pregions[i].mr_size; pa += moea64_large_page_size) { 604 pte_lo = LPTE_M; 605 606 /* 607 * Set memory access as guarded if prefetch within 608 * the page could exit the available physmem area. 609 */ 610 if (pa & moea64_large_page_mask) { 611 pa &= moea64_large_page_mask; 612 pte_lo |= LPTE_G; 613 } 614 if (pa + moea64_large_page_size > 615 pregions[i].mr_start + pregions[i].mr_size) 616 pte_lo |= LPTE_G; 617 618 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 619 NULL, pa, pa, pte_lo, 620 PVO_WIRED | PVO_LARGE); 621 } 622 } 623 PMAP_UNLOCK(kernel_pmap); 624 UNLOCK_TABLE_WR(); 625 } else { 626 size = sizeof(struct pvo_head) * moea64_pteg_count; 627 off = (vm_offset_t)(moea64_pvo_table); 628 for (pa = off; pa < off + size; pa += PAGE_SIZE) 629 moea64_kenter(mmup, pa, pa); 630 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 631 off = (vm_offset_t)(moea64_bpvo_pool); 632 for (pa = off; pa < off + size; pa += PAGE_SIZE) 633 moea64_kenter(mmup, pa, pa); 634 635 /* 636 * Map certain important things, like ourselves. 637 * 638 * NOTE: We do not map the exception vector space. That code is 639 * used only in real mode, and leaving it unmapped allows us to 640 * catch NULL pointer deferences, instead of making NULL a valid 641 * address. 642 */ 643 644 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 645 pa += PAGE_SIZE) 646 moea64_kenter(mmup, pa, pa); 647 } 648 ENABLE_TRANS(msr); 649 650 /* 651 * Allow user to override unmapped_buf_allowed for testing. 652 * XXXKIB Only direct map implementation was tested. 653 */ 654 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", 655 &unmapped_buf_allowed)) 656 unmapped_buf_allowed = hw_direct_map; 657} 658 659void 660moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 661{ 662 int i, j; 663 vm_size_t physsz, hwphyssz; 664 665#ifndef __powerpc64__ 666 /* We don't have a direct map since there is no BAT */ 667 hw_direct_map = 0; 668 669 /* Make sure battable is zero, since we have no BAT */ 670 for (i = 0; i < 16; i++) { 671 battable[i].batu = 0; 672 battable[i].batl = 0; 673 } 674#else 675 moea64_probe_large_page(); 676 677 /* Use a direct map if we have large page support */ 678 if (moea64_large_page_size > 0) 679 hw_direct_map = 1; 680 else 681 hw_direct_map = 0; 682#endif 683 684 /* Get physical memory regions from firmware */ 685 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 686 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 687 688 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 689 panic("moea64_bootstrap: phys_avail too small"); 690 691 phys_avail_count = 0; 692 physsz = 0; 693 hwphyssz = 0; 694 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 695 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 696 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 697 regions[i].mr_start + regions[i].mr_size, 698 regions[i].mr_size); 699 if (hwphyssz != 0 && 700 (physsz + regions[i].mr_size) >= hwphyssz) { 701 if (physsz < hwphyssz) { 702 phys_avail[j] = regions[i].mr_start; 703 phys_avail[j + 1] = regions[i].mr_start + 704 hwphyssz - physsz; 705 physsz = hwphyssz; 706 phys_avail_count++; 707 } 708 break; 709 } 710 phys_avail[j] = regions[i].mr_start; 711 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 712 phys_avail_count++; 713 physsz += regions[i].mr_size; 714 } 715 716 /* Check for overlap with the kernel and exception vectors */ 717 for (j = 0; j < 2*phys_avail_count; j+=2) { 718 if (phys_avail[j] < EXC_LAST) 719 phys_avail[j] += EXC_LAST; 720 721 if (kernelstart >= phys_avail[j] && 722 kernelstart < phys_avail[j+1]) { 723 if (kernelend < phys_avail[j+1]) { 724 phys_avail[2*phys_avail_count] = 725 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 726 phys_avail[2*phys_avail_count + 1] = 727 phys_avail[j+1]; 728 phys_avail_count++; 729 } 730 731 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 732 } 733 734 if (kernelend >= phys_avail[j] && 735 kernelend < phys_avail[j+1]) { 736 if (kernelstart > phys_avail[j]) { 737 phys_avail[2*phys_avail_count] = phys_avail[j]; 738 phys_avail[2*phys_avail_count + 1] = 739 kernelstart & ~PAGE_MASK; 740 phys_avail_count++; 741 } 742 743 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 744 } 745 } 746 747 physmem = btoc(physsz); 748 749#ifdef PTEGCOUNT 750 moea64_pteg_count = PTEGCOUNT; 751#else 752 moea64_pteg_count = 0x1000; 753 754 while (moea64_pteg_count < physmem) 755 moea64_pteg_count <<= 1; 756 757 moea64_pteg_count >>= 1; 758#endif /* PTEGCOUNT */ 759} 760 761void 762moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 763{ 764 vm_size_t size; 765 register_t msr; 766 int i; 767 768 /* 769 * Set PTEG mask 770 */ 771 moea64_pteg_mask = moea64_pteg_count - 1; 772 773 /* 774 * Allocate pv/overflow lists. 775 */ 776 size = sizeof(struct pvo_head) * moea64_pteg_count; 777 778 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 779 PAGE_SIZE); 780 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 781 782 DISABLE_TRANS(msr); 783 for (i = 0; i < moea64_pteg_count; i++) 784 LIST_INIT(&moea64_pvo_table[i]); 785 ENABLE_TRANS(msr); 786 787 /* 788 * Initialize the lock that synchronizes access to the pteg and pvo 789 * tables. 790 */ 791 rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE); 792 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 793 794 /* 795 * Initialise the unmanaged pvo pool. 796 */ 797 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 798 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 799 moea64_bpvo_pool_index = 0; 800 801 /* 802 * Make sure kernel vsid is allocated as well as VSID 0. 803 */ 804 #ifndef __powerpc64__ 805 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 806 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 807 moea64_vsid_bitmap[0] |= 1; 808 #endif 809 810 /* 811 * Initialize the kernel pmap (which is statically allocated). 812 */ 813 #ifdef __powerpc64__ 814 for (i = 0; i < 64; i++) { 815 pcpup->pc_slb[i].slbv = 0; 816 pcpup->pc_slb[i].slbe = 0; 817 } 818 #else 819 for (i = 0; i < 16; i++) 820 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 821 #endif 822 823 kernel_pmap->pmap_phys = kernel_pmap; 824 CPU_FILL(&kernel_pmap->pm_active); 825 RB_INIT(&kernel_pmap->pmap_pvo); 826 827 PMAP_LOCK_INIT(kernel_pmap); 828 829 /* 830 * Now map in all the other buffers we allocated earlier 831 */ 832 833 moea64_setup_direct_map(mmup, kernelstart, kernelend); 834} 835 836void 837moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 838{ 839 ihandle_t mmui; 840 phandle_t chosen; 841 phandle_t mmu; 842 size_t sz; 843 int i; 844 vm_offset_t pa, va; 845 void *dpcpu; 846 847 /* 848 * Set up the Open Firmware pmap and add its mappings if not in real 849 * mode. 850 */ 851 852 chosen = OF_finddevice("/chosen"); 853 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 854 mmu = OF_instance_to_package(mmui); 855 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 856 sz = 0; 857 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 858 panic("moea64_bootstrap: too many ofw translations"); 859 860 if (sz > 0) 861 moea64_add_ofw_mappings(mmup, mmu, sz); 862 } 863 864 /* 865 * Calculate the last available physical address. 866 */ 867 for (i = 0; phys_avail[i + 2] != 0; i += 2) 868 ; 869 Maxmem = powerpc_btop(phys_avail[i + 1]); 870 871 /* 872 * Initialize MMU and remap early physical mappings 873 */ 874 MMU_CPU_BOOTSTRAP(mmup,0); 875 mtmsr(mfmsr() | PSL_DR | PSL_IR); 876 pmap_bootstrapped++; 877 bs_remap_earlyboot(); 878 879 /* 880 * Set the start and end of kva. 881 */ 882 virtual_avail = VM_MIN_KERNEL_ADDRESS; 883 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 884 885 /* 886 * Map the entire KVA range into the SLB. We must not fault there. 887 */ 888 #ifdef __powerpc64__ 889 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 890 moea64_bootstrap_slb_prefault(va, 0); 891 #endif 892 893 /* 894 * Figure out how far we can extend virtual_end into segment 16 895 * without running into existing mappings. Segment 16 is guaranteed 896 * to contain neither RAM nor devices (at least on Apple hardware), 897 * but will generally contain some OFW mappings we should not 898 * step on. 899 */ 900 901 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 902 PMAP_LOCK(kernel_pmap); 903 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 904 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 905 virtual_end += PAGE_SIZE; 906 PMAP_UNLOCK(kernel_pmap); 907 #endif 908 909 /* 910 * Allocate a kernel stack with a guard page for thread0 and map it 911 * into the kernel page map. 912 */ 913 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 914 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 915 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 916 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 917 thread0.td_kstack = va; 918 thread0.td_kstack_pages = KSTACK_PAGES; 919 for (i = 0; i < KSTACK_PAGES; i++) { 920 moea64_kenter(mmup, va, pa); 921 pa += PAGE_SIZE; 922 va += PAGE_SIZE; 923 } 924 925 /* 926 * Allocate virtual address space for the message buffer. 927 */ 928 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 929 msgbufp = (struct msgbuf *)virtual_avail; 930 va = virtual_avail; 931 virtual_avail += round_page(msgbufsize); 932 while (va < virtual_avail) { 933 moea64_kenter(mmup, va, pa); 934 pa += PAGE_SIZE; 935 va += PAGE_SIZE; 936 } 937 938 /* 939 * Allocate virtual address space for the dynamic percpu area. 940 */ 941 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 942 dpcpu = (void *)virtual_avail; 943 va = virtual_avail; 944 virtual_avail += DPCPU_SIZE; 945 while (va < virtual_avail) { 946 moea64_kenter(mmup, va, pa); 947 pa += PAGE_SIZE; 948 va += PAGE_SIZE; 949 } 950 dpcpu_init(dpcpu, 0); 951 952 /* 953 * Allocate some things for page zeroing. We put this directly 954 * in the page table, marked with LPTE_LOCKED, to avoid any 955 * of the PVO book-keeping or other parts of the VM system 956 * from even knowing that this hack exists. 957 */ 958 959 if (!hw_direct_map) { 960 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 961 MTX_DEF); 962 for (i = 0; i < 2; i++) { 963 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 964 virtual_end -= PAGE_SIZE; 965 966 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 967 968 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 969 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 970 LOCK_TABLE_RD(); 971 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 972 mmup, moea64_scratchpage_pvo[i]); 973 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 974 |= LPTE_LOCKED; 975 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 976 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 977 moea64_scratchpage_pvo[i]->pvo_vpn); 978 UNLOCK_TABLE_RD(); 979 } 980 } 981} 982 983/* 984 * Activate a user pmap. The pmap must be activated before its address 985 * space can be accessed in any way. 986 */ 987void 988moea64_activate(mmu_t mmu, struct thread *td) 989{ 990 pmap_t pm; 991 992 pm = &td->td_proc->p_vmspace->vm_pmap; 993 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 994 995 #ifdef __powerpc64__ 996 PCPU_SET(userslb, pm->pm_slb); 997 #else 998 PCPU_SET(curpmap, pm->pmap_phys); 999 #endif 1000} 1001 1002void 1003moea64_deactivate(mmu_t mmu, struct thread *td) 1004{ 1005 pmap_t pm; 1006 1007 pm = &td->td_proc->p_vmspace->vm_pmap; 1008 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1009 #ifdef __powerpc64__ 1010 PCPU_SET(userslb, NULL); 1011 #else 1012 PCPU_SET(curpmap, NULL); 1013 #endif 1014} 1015 1016void 1017moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1018{ 1019 struct pvo_entry *pvo; 1020 uintptr_t pt; 1021 uint64_t vsid; 1022 int i, ptegidx; 1023 1024 LOCK_TABLE_WR(); 1025 PMAP_LOCK(pm); 1026 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1027 1028 if (pvo != NULL) { 1029 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1030 1031 if (wired) { 1032 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1033 pm->pm_stats.wired_count++; 1034 pvo->pvo_vaddr |= PVO_WIRED; 1035 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1036 } else { 1037 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1038 pm->pm_stats.wired_count--; 1039 pvo->pvo_vaddr &= ~PVO_WIRED; 1040 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1041 } 1042 1043 if (pt != -1) { 1044 /* Update wiring flag in page table. */ 1045 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1046 pvo->pvo_vpn); 1047 } else if (wired) { 1048 /* 1049 * If we are wiring the page, and it wasn't in the 1050 * page table before, add it. 1051 */ 1052 vsid = PVO_VSID(pvo); 1053 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1054 pvo->pvo_vaddr & PVO_LARGE); 1055 1056 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1057 1058 if (i >= 0) { 1059 PVO_PTEGIDX_CLR(pvo); 1060 PVO_PTEGIDX_SET(pvo, i); 1061 } 1062 } 1063 1064 } 1065 UNLOCK_TABLE_WR(); 1066 PMAP_UNLOCK(pm); 1067} 1068 1069/* 1070 * This goes through and sets the physical address of our 1071 * special scratch PTE to the PA we want to zero or copy. Because 1072 * of locking issues (this can get called in pvo_enter() by 1073 * the UMA allocator), we can't use most other utility functions here 1074 */ 1075 1076static __inline 1077void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1078 1079 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1080 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1081 1082 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1083 ~(LPTE_WIMG | LPTE_RPGN); 1084 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1085 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1086 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1087 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1088 moea64_scratchpage_pvo[which]->pvo_vpn); 1089 isync(); 1090} 1091 1092void 1093moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1094{ 1095 vm_offset_t dst; 1096 vm_offset_t src; 1097 1098 dst = VM_PAGE_TO_PHYS(mdst); 1099 src = VM_PAGE_TO_PHYS(msrc); 1100 1101 if (hw_direct_map) { 1102 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1103 } else { 1104 mtx_lock(&moea64_scratchpage_mtx); 1105 1106 moea64_set_scratchpage_pa(mmu, 0, src); 1107 moea64_set_scratchpage_pa(mmu, 1, dst); 1108 1109 bcopy((void *)moea64_scratchpage_va[0], 1110 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1111 1112 mtx_unlock(&moea64_scratchpage_mtx); 1113 } 1114} 1115 1116static inline void 1117moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1118 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1119{ 1120 void *a_cp, *b_cp; 1121 vm_offset_t a_pg_offset, b_pg_offset; 1122 int cnt; 1123 1124 while (xfersize > 0) { 1125 a_pg_offset = a_offset & PAGE_MASK; 1126 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1127 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1128 a_pg_offset; 1129 b_pg_offset = b_offset & PAGE_MASK; 1130 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1131 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1132 b_pg_offset; 1133 bcopy(a_cp, b_cp, cnt); 1134 a_offset += cnt; 1135 b_offset += cnt; 1136 xfersize -= cnt; 1137 } 1138} 1139 1140static inline void 1141moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1142 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1143{ 1144 void *a_cp, *b_cp; 1145 vm_offset_t a_pg_offset, b_pg_offset; 1146 int cnt; 1147 1148 mtx_lock(&moea64_scratchpage_mtx); 1149 while (xfersize > 0) { 1150 a_pg_offset = a_offset & PAGE_MASK; 1151 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1152 moea64_set_scratchpage_pa(mmu, 0, 1153 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 1154 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; 1155 b_pg_offset = b_offset & PAGE_MASK; 1156 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1157 moea64_set_scratchpage_pa(mmu, 1, 1158 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 1159 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; 1160 bcopy(a_cp, b_cp, cnt); 1161 a_offset += cnt; 1162 b_offset += cnt; 1163 xfersize -= cnt; 1164 } 1165 mtx_unlock(&moea64_scratchpage_mtx); 1166} 1167 1168void 1169moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1170 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1171{ 1172 1173 if (hw_direct_map) { 1174 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, 1175 xfersize); 1176 } else { 1177 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, 1178 xfersize); 1179 } 1180} 1181 1182void 1183moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1184{ 1185 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1186 1187 if (size + off > PAGE_SIZE) 1188 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1189 1190 if (hw_direct_map) { 1191 bzero((caddr_t)pa + off, size); 1192 } else { 1193 mtx_lock(&moea64_scratchpage_mtx); 1194 moea64_set_scratchpage_pa(mmu, 0, pa); 1195 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1196 mtx_unlock(&moea64_scratchpage_mtx); 1197 } 1198} 1199 1200/* 1201 * Zero a page of physical memory by temporarily mapping it 1202 */ 1203void 1204moea64_zero_page(mmu_t mmu, vm_page_t m) 1205{ 1206 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1207 vm_offset_t va, off; 1208 1209 if (!hw_direct_map) { 1210 mtx_lock(&moea64_scratchpage_mtx); 1211 1212 moea64_set_scratchpage_pa(mmu, 0, pa); 1213 va = moea64_scratchpage_va[0]; 1214 } else { 1215 va = pa; 1216 } 1217 1218 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1219 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1220 1221 if (!hw_direct_map) 1222 mtx_unlock(&moea64_scratchpage_mtx); 1223} 1224 1225void 1226moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1227{ 1228 1229 moea64_zero_page(mmu, m); 1230} 1231 1232/* 1233 * Map the given physical page at the specified virtual address in the 1234 * target pmap with the protection requested. If specified the page 1235 * will be wired down. 1236 */ 1237 1238void 1239moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1240 vm_prot_t prot, boolean_t wired) 1241{ 1242 struct pvo_head *pvo_head; 1243 uma_zone_t zone; 1244 vm_page_t pg; 1245 uint64_t pte_lo; 1246 u_int pvo_flags; 1247 int error; 1248 1249 if (!moea64_initialized) { 1250 pvo_head = NULL; 1251 pg = NULL; 1252 zone = moea64_upvo_zone; 1253 pvo_flags = 0; 1254 } else { 1255 pvo_head = vm_page_to_pvoh(m); 1256 pg = m; 1257 zone = moea64_mpvo_zone; 1258 pvo_flags = PVO_MANAGED; 1259 } 1260 1261 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1262 VM_OBJECT_ASSERT_LOCKED(m->object); 1263 1264 /* XXX change the pvo head for fake pages */ 1265 if ((m->oflags & VPO_UNMANAGED) != 0) { 1266 pvo_flags &= ~PVO_MANAGED; 1267 pvo_head = NULL; 1268 zone = moea64_upvo_zone; 1269 } 1270 1271 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1272 1273 if (prot & VM_PROT_WRITE) { 1274 pte_lo |= LPTE_BW; 1275 if (pmap_bootstrapped && 1276 (m->oflags & VPO_UNMANAGED) == 0) 1277 vm_page_aflag_set(m, PGA_WRITEABLE); 1278 } else 1279 pte_lo |= LPTE_BR; 1280 1281 if ((prot & VM_PROT_EXECUTE) == 0) 1282 pte_lo |= LPTE_NOEXEC; 1283 1284 if (wired) 1285 pvo_flags |= PVO_WIRED; 1286 1287 LOCK_TABLE_WR(); 1288 PMAP_LOCK(pmap); 1289 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1290 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1291 PMAP_UNLOCK(pmap); 1292 UNLOCK_TABLE_WR(); 1293 1294 /* 1295 * Flush the page from the instruction cache if this page is 1296 * mapped executable and cacheable. 1297 */ 1298 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1299 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1300 vm_page_aflag_set(m, PGA_EXECUTABLE); 1301 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1302 } 1303} 1304 1305static void 1306moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1307 vm_size_t sz) 1308{ 1309 1310 /* 1311 * This is much trickier than on older systems because 1312 * we can't sync the icache on physical addresses directly 1313 * without a direct map. Instead we check a couple of cases 1314 * where the memory is already mapped in and, failing that, 1315 * use the same trick we use for page zeroing to create 1316 * a temporary mapping for this physical address. 1317 */ 1318 1319 if (!pmap_bootstrapped) { 1320 /* 1321 * If PMAP is not bootstrapped, we are likely to be 1322 * in real mode. 1323 */ 1324 __syncicache((void *)pa, sz); 1325 } else if (pmap == kernel_pmap) { 1326 __syncicache((void *)va, sz); 1327 } else if (hw_direct_map) { 1328 __syncicache((void *)pa, sz); 1329 } else { 1330 /* Use the scratch page to set up a temp mapping */ 1331 1332 mtx_lock(&moea64_scratchpage_mtx); 1333 1334 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1335 __syncicache((void *)(moea64_scratchpage_va[1] + 1336 (va & ADDR_POFF)), sz); 1337 1338 mtx_unlock(&moea64_scratchpage_mtx); 1339 } 1340} 1341 1342/* 1343 * Maps a sequence of resident pages belonging to the same object. 1344 * The sequence begins with the given page m_start. This page is 1345 * mapped at the given virtual address start. Each subsequent page is 1346 * mapped at a virtual address that is offset from start by the same 1347 * amount as the page is offset from m_start within the object. The 1348 * last page in the sequence is the page with the largest offset from 1349 * m_start that can be mapped at a virtual address less than the given 1350 * virtual address end. Not every virtual page between start and end 1351 * is mapped; only those for which a resident page exists with the 1352 * corresponding offset from m_start are mapped. 1353 */ 1354void 1355moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1356 vm_page_t m_start, vm_prot_t prot) 1357{ 1358 vm_page_t m; 1359 vm_pindex_t diff, psize; 1360 1361 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1362 1363 psize = atop(end - start); 1364 m = m_start; 1365 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1366 moea64_enter(mmu, pm, start + ptoa(diff), m, prot & 1367 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1368 m = TAILQ_NEXT(m, listq); 1369 } 1370} 1371 1372void 1373moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1374 vm_prot_t prot) 1375{ 1376 1377 moea64_enter(mmu, pm, va, m, 1378 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1379} 1380 1381vm_paddr_t 1382moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1383{ 1384 struct pvo_entry *pvo; 1385 vm_paddr_t pa; 1386 1387 PMAP_LOCK(pm); 1388 pvo = moea64_pvo_find_va(pm, va); 1389 if (pvo == NULL) 1390 pa = 0; 1391 else 1392 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1393 (va - PVO_VADDR(pvo)); 1394 PMAP_UNLOCK(pm); 1395 return (pa); 1396} 1397 1398/* 1399 * Atomically extract and hold the physical page with the given 1400 * pmap and virtual address pair if that mapping permits the given 1401 * protection. 1402 */ 1403vm_page_t 1404moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1405{ 1406 struct pvo_entry *pvo; 1407 vm_page_t m; 1408 vm_paddr_t pa; 1409 1410 m = NULL; 1411 pa = 0; 1412 PMAP_LOCK(pmap); 1413retry: 1414 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1415 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1416 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1417 (prot & VM_PROT_WRITE) == 0)) { 1418 if (vm_page_pa_tryrelock(pmap, 1419 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1420 goto retry; 1421 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1422 vm_page_hold(m); 1423 } 1424 PA_UNLOCK_COND(pa); 1425 PMAP_UNLOCK(pmap); 1426 return (m); 1427} 1428 1429static mmu_t installed_mmu; 1430 1431static void * 1432moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1433{ 1434 /* 1435 * This entire routine is a horrible hack to avoid bothering kmem 1436 * for new KVA addresses. Because this can get called from inside 1437 * kmem allocation routines, calling kmem for a new address here 1438 * can lead to multiply locking non-recursive mutexes. 1439 */ 1440 vm_offset_t va; 1441 1442 vm_page_t m; 1443 int pflags, needed_lock; 1444 1445 *flags = UMA_SLAB_PRIV; 1446 needed_lock = !PMAP_LOCKED(kernel_pmap); 1447 pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; 1448 1449 for (;;) { 1450 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1451 if (m == NULL) { 1452 if (wait & M_NOWAIT) 1453 return (NULL); 1454 VM_WAIT; 1455 } else 1456 break; 1457 } 1458 1459 va = VM_PAGE_TO_PHYS(m); 1460 1461 LOCK_TABLE_WR(); 1462 if (needed_lock) 1463 PMAP_LOCK(kernel_pmap); 1464 1465 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1466 NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP); 1467 1468 if (needed_lock) 1469 PMAP_UNLOCK(kernel_pmap); 1470 UNLOCK_TABLE_WR(); 1471 1472 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1473 bzero((void *)va, PAGE_SIZE); 1474 1475 return (void *)va; 1476} 1477 1478extern int elf32_nxstack; 1479 1480void 1481moea64_init(mmu_t mmu) 1482{ 1483 1484 CTR0(KTR_PMAP, "moea64_init"); 1485 1486 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1487 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1488 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1489 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1490 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1491 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1492 1493 if (!hw_direct_map) { 1494 installed_mmu = mmu; 1495 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1496 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1497 } 1498 1499#ifdef COMPAT_FREEBSD32 1500 elf32_nxstack = 1; 1501#endif 1502 1503 moea64_initialized = TRUE; 1504} 1505 1506boolean_t 1507moea64_is_referenced(mmu_t mmu, vm_page_t m) 1508{ 1509 1510 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1511 ("moea64_is_referenced: page %p is not managed", m)); 1512 return (moea64_query_bit(mmu, m, PTE_REF)); 1513} 1514 1515boolean_t 1516moea64_is_modified(mmu_t mmu, vm_page_t m) 1517{ 1518 1519 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1520 ("moea64_is_modified: page %p is not managed", m)); 1521 1522 /* 1523 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1524 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1525 * is clear, no PTEs can have LPTE_CHG set. 1526 */ 1527 VM_OBJECT_ASSERT_LOCKED(m->object); 1528 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1529 return (FALSE); 1530 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1531} 1532 1533boolean_t 1534moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1535{ 1536 struct pvo_entry *pvo; 1537 boolean_t rv; 1538 1539 PMAP_LOCK(pmap); 1540 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1541 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1542 PMAP_UNLOCK(pmap); 1543 return (rv); 1544} 1545 1546void 1547moea64_clear_modify(mmu_t mmu, vm_page_t m) 1548{ 1549 1550 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1551 ("moea64_clear_modify: page %p is not managed", m)); 1552 VM_OBJECT_ASSERT_WLOCKED(m->object); 1553 KASSERT(!vm_page_xbusied(m), 1554 ("moea64_clear_modify: page %p is exclusive busied", m)); 1555 1556 /* 1557 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1558 * set. If the object containing the page is locked and the page is 1559 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1560 */ 1561 if ((m->aflags & PGA_WRITEABLE) == 0) 1562 return; 1563 moea64_clear_bit(mmu, m, LPTE_CHG); 1564} 1565 1566/* 1567 * Clear the write and modified bits in each of the given page's mappings. 1568 */ 1569void 1570moea64_remove_write(mmu_t mmu, vm_page_t m) 1571{ 1572 struct pvo_entry *pvo; 1573 uintptr_t pt; 1574 pmap_t pmap; 1575 uint64_t lo = 0; 1576 1577 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1578 ("moea64_remove_write: page %p is not managed", m)); 1579 1580 /* 1581 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1582 * set by another thread while the object is locked. Thus, 1583 * if PGA_WRITEABLE is clear, no page table entries need updating. 1584 */ 1585 VM_OBJECT_ASSERT_WLOCKED(m->object); 1586 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1587 return; 1588 powerpc_sync(); 1589 LOCK_TABLE_RD(); 1590 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1591 pmap = pvo->pvo_pmap; 1592 PMAP_LOCK(pmap); 1593 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1594 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1595 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1596 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1597 if (pt != -1) { 1598 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1599 lo |= pvo->pvo_pte.lpte.pte_lo; 1600 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1601 MOEA64_PTE_CHANGE(mmu, pt, 1602 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1603 if (pvo->pvo_pmap == kernel_pmap) 1604 isync(); 1605 } 1606 } 1607 if ((lo & LPTE_CHG) != 0) 1608 vm_page_dirty(m); 1609 PMAP_UNLOCK(pmap); 1610 } 1611 UNLOCK_TABLE_RD(); 1612 vm_page_aflag_clear(m, PGA_WRITEABLE); 1613} 1614 1615/* 1616 * moea64_ts_referenced: 1617 * 1618 * Return a count of reference bits for a page, clearing those bits. 1619 * It is not necessary for every reference bit to be cleared, but it 1620 * is necessary that 0 only be returned when there are truly no 1621 * reference bits set. 1622 * 1623 * XXX: The exact number of bits to check and clear is a matter that 1624 * should be tested and standardized at some point in the future for 1625 * optimal aging of shared pages. 1626 */ 1627int 1628moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1629{ 1630 1631 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1632 ("moea64_ts_referenced: page %p is not managed", m)); 1633 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1634} 1635 1636/* 1637 * Modify the WIMG settings of all mappings for a page. 1638 */ 1639void 1640moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1641{ 1642 struct pvo_entry *pvo; 1643 struct pvo_head *pvo_head; 1644 uintptr_t pt; 1645 pmap_t pmap; 1646 uint64_t lo; 1647 1648 if ((m->oflags & VPO_UNMANAGED) != 0) { 1649 m->md.mdpg_cache_attrs = ma; 1650 return; 1651 } 1652 1653 pvo_head = vm_page_to_pvoh(m); 1654 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1655 LOCK_TABLE_RD(); 1656 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1657 pmap = pvo->pvo_pmap; 1658 PMAP_LOCK(pmap); 1659 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1660 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1661 pvo->pvo_pte.lpte.pte_lo |= lo; 1662 if (pt != -1) { 1663 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1664 pvo->pvo_vpn); 1665 if (pvo->pvo_pmap == kernel_pmap) 1666 isync(); 1667 } 1668 PMAP_UNLOCK(pmap); 1669 } 1670 UNLOCK_TABLE_RD(); 1671 m->md.mdpg_cache_attrs = ma; 1672} 1673 1674/* 1675 * Map a wired page into kernel virtual address space. 1676 */ 1677void 1678moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1679{ 1680 uint64_t pte_lo; 1681 int error; 1682 1683 pte_lo = moea64_calc_wimg(pa, ma); 1684 1685 LOCK_TABLE_WR(); 1686 PMAP_LOCK(kernel_pmap); 1687 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1688 NULL, va, pa, pte_lo, PVO_WIRED); 1689 PMAP_UNLOCK(kernel_pmap); 1690 UNLOCK_TABLE_WR(); 1691 1692 if (error != 0 && error != ENOENT) 1693 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1694 pa, error); 1695} 1696 1697void 1698moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1699{ 1700 1701 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1702} 1703 1704/* 1705 * Extract the physical page address associated with the given kernel virtual 1706 * address. 1707 */ 1708vm_paddr_t 1709moea64_kextract(mmu_t mmu, vm_offset_t va) 1710{ 1711 struct pvo_entry *pvo; 1712 vm_paddr_t pa; 1713 1714 /* 1715 * Shortcut the direct-mapped case when applicable. We never put 1716 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1717 */ 1718 if (va < VM_MIN_KERNEL_ADDRESS) 1719 return (va); 1720 1721 PMAP_LOCK(kernel_pmap); 1722 pvo = moea64_pvo_find_va(kernel_pmap, va); 1723 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1724 va)); 1725 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1726 PMAP_UNLOCK(kernel_pmap); 1727 return (pa); 1728} 1729 1730/* 1731 * Remove a wired page from kernel virtual address space. 1732 */ 1733void 1734moea64_kremove(mmu_t mmu, vm_offset_t va) 1735{ 1736 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1737} 1738 1739/* 1740 * Map a range of physical addresses into kernel virtual address space. 1741 * 1742 * The value passed in *virt is a suggested virtual address for the mapping. 1743 * Architectures which can support a direct-mapped physical to virtual region 1744 * can return the appropriate address within that region, leaving '*virt' 1745 * unchanged. We cannot and therefore do not; *virt is updated with the 1746 * first usable address after the mapped region. 1747 */ 1748vm_offset_t 1749moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1750 vm_paddr_t pa_end, int prot) 1751{ 1752 vm_offset_t sva, va; 1753 1754 sva = *virt; 1755 va = sva; 1756 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1757 moea64_kenter(mmu, va, pa_start); 1758 *virt = va; 1759 1760 return (sva); 1761} 1762 1763/* 1764 * Returns true if the pmap's pv is one of the first 1765 * 16 pvs linked to from this page. This count may 1766 * be changed upwards or downwards in the future; it 1767 * is only necessary that true be returned for a small 1768 * subset of pmaps for proper page aging. 1769 */ 1770boolean_t 1771moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1772{ 1773 int loops; 1774 struct pvo_entry *pvo; 1775 boolean_t rv; 1776 1777 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1778 ("moea64_page_exists_quick: page %p is not managed", m)); 1779 loops = 0; 1780 rv = FALSE; 1781 LOCK_TABLE_RD(); 1782 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1783 if (pvo->pvo_pmap == pmap) { 1784 rv = TRUE; 1785 break; 1786 } 1787 if (++loops >= 16) 1788 break; 1789 } 1790 UNLOCK_TABLE_RD(); 1791 return (rv); 1792} 1793 1794/* 1795 * Return the number of managed mappings to the given physical page 1796 * that are wired. 1797 */ 1798int 1799moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1800{ 1801 struct pvo_entry *pvo; 1802 int count; 1803 1804 count = 0; 1805 if ((m->oflags & VPO_UNMANAGED) != 0) 1806 return (count); 1807 LOCK_TABLE_RD(); 1808 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1809 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1810 count++; 1811 UNLOCK_TABLE_RD(); 1812 return (count); 1813} 1814 1815static uintptr_t moea64_vsidcontext; 1816 1817uintptr_t 1818moea64_get_unique_vsid(void) { 1819 u_int entropy; 1820 register_t hash; 1821 uint32_t mask; 1822 int i; 1823 1824 entropy = 0; 1825 __asm __volatile("mftb %0" : "=r"(entropy)); 1826 1827 mtx_lock(&moea64_slb_mutex); 1828 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1829 u_int n; 1830 1831 /* 1832 * Create a new value by mutiplying by a prime and adding in 1833 * entropy from the timebase register. This is to make the 1834 * VSID more random so that the PT hash function collides 1835 * less often. (Note that the prime casues gcc to do shifts 1836 * instead of a multiply.) 1837 */ 1838 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1839 hash = moea64_vsidcontext & (NVSIDS - 1); 1840 if (hash == 0) /* 0 is special, avoid it */ 1841 continue; 1842 n = hash >> 5; 1843 mask = 1 << (hash & (VSID_NBPW - 1)); 1844 hash = (moea64_vsidcontext & VSID_HASHMASK); 1845 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1846 /* anything free in this bucket? */ 1847 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1848 entropy = (moea64_vsidcontext >> 20); 1849 continue; 1850 } 1851 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1852 mask = 1 << i; 1853 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1854 hash |= i; 1855 } 1856 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1857 ("Allocating in-use VSID %#zx\n", hash)); 1858 moea64_vsid_bitmap[n] |= mask; 1859 mtx_unlock(&moea64_slb_mutex); 1860 return (hash); 1861 } 1862 1863 mtx_unlock(&moea64_slb_mutex); 1864 panic("%s: out of segments",__func__); 1865} 1866 1867#ifdef __powerpc64__ 1868void 1869moea64_pinit(mmu_t mmu, pmap_t pmap) 1870{ 1871 1872 RB_INIT(&pmap->pmap_pvo); 1873 1874 pmap->pm_slb_tree_root = slb_alloc_tree(); 1875 pmap->pm_slb = slb_alloc_user_cache(); 1876 pmap->pm_slb_len = 0; 1877} 1878#else 1879void 1880moea64_pinit(mmu_t mmu, pmap_t pmap) 1881{ 1882 int i; 1883 uint32_t hash; 1884 1885 RB_INIT(&pmap->pmap_pvo); 1886 1887 if (pmap_bootstrapped) 1888 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1889 (vm_offset_t)pmap); 1890 else 1891 pmap->pmap_phys = pmap; 1892 1893 /* 1894 * Allocate some segment registers for this pmap. 1895 */ 1896 hash = moea64_get_unique_vsid(); 1897 1898 for (i = 0; i < 16; i++) 1899 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1900 1901 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1902} 1903#endif 1904 1905/* 1906 * Initialize the pmap associated with process 0. 1907 */ 1908void 1909moea64_pinit0(mmu_t mmu, pmap_t pm) 1910{ 1911 1912 PMAP_LOCK_INIT(pm); 1913 moea64_pinit(mmu, pm); 1914 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1915} 1916 1917/* 1918 * Set the physical protection on the specified range of this map as requested. 1919 */ 1920static void 1921moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1922{ 1923 uintptr_t pt; 1924 struct vm_page *pg; 1925 uint64_t oldlo; 1926 1927 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1928 1929 /* 1930 * Grab the PTE pointer before we diddle with the cached PTE 1931 * copy. 1932 */ 1933 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1934 1935 /* 1936 * Change the protection of the page. 1937 */ 1938 oldlo = pvo->pvo_pte.lpte.pte_lo; 1939 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1940 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1941 if ((prot & VM_PROT_EXECUTE) == 0) 1942 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1943 if (prot & VM_PROT_WRITE) 1944 pvo->pvo_pte.lpte.pte_lo |= LPTE_BW; 1945 else 1946 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1947 1948 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1949 1950 /* 1951 * If the PVO is in the page table, update that pte as well. 1952 */ 1953 if (pt != -1) 1954 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1955 pvo->pvo_vpn); 1956 if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && 1957 (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1958 if ((pg->oflags & VPO_UNMANAGED) == 0) 1959 vm_page_aflag_set(pg, PGA_EXECUTABLE); 1960 moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1961 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE); 1962 } 1963 1964 /* 1965 * Update vm about the REF/CHG bits if the page is managed and we have 1966 * removed write access. 1967 */ 1968 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 1969 (oldlo & LPTE_PP) != LPTE_BR && !(prot & VM_PROT_WRITE)) { 1970 if (pg != NULL) { 1971 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 1972 vm_page_dirty(pg); 1973 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 1974 vm_page_aflag_set(pg, PGA_REFERENCED); 1975 } 1976 } 1977} 1978 1979void 1980moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1981 vm_prot_t prot) 1982{ 1983 struct pvo_entry *pvo, *tpvo, key; 1984 1985 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 1986 sva, eva, prot); 1987 1988 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1989 ("moea64_protect: non current pmap")); 1990 1991 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1992 moea64_remove(mmu, pm, sva, eva); 1993 return; 1994 } 1995 1996 LOCK_TABLE_RD(); 1997 PMAP_LOCK(pm); 1998 key.pvo_vaddr = sva; 1999 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2000 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2001 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2002 moea64_pvo_protect(mmu, pm, pvo, prot); 2003 } 2004 UNLOCK_TABLE_RD(); 2005 PMAP_UNLOCK(pm); 2006} 2007 2008/* 2009 * Map a list of wired pages into kernel virtual address space. This is 2010 * intended for temporary mappings which do not need page modification or 2011 * references recorded. Existing mappings in the region are overwritten. 2012 */ 2013void 2014moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2015{ 2016 while (count-- > 0) { 2017 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2018 va += PAGE_SIZE; 2019 m++; 2020 } 2021} 2022 2023/* 2024 * Remove page mappings from kernel virtual address space. Intended for 2025 * temporary mappings entered by moea64_qenter. 2026 */ 2027void 2028moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2029{ 2030 while (count-- > 0) { 2031 moea64_kremove(mmu, va); 2032 va += PAGE_SIZE; 2033 } 2034} 2035 2036void 2037moea64_release_vsid(uint64_t vsid) 2038{ 2039 int idx, mask; 2040 2041 mtx_lock(&moea64_slb_mutex); 2042 idx = vsid & (NVSIDS-1); 2043 mask = 1 << (idx % VSID_NBPW); 2044 idx /= VSID_NBPW; 2045 KASSERT(moea64_vsid_bitmap[idx] & mask, 2046 ("Freeing unallocated VSID %#jx", vsid)); 2047 moea64_vsid_bitmap[idx] &= ~mask; 2048 mtx_unlock(&moea64_slb_mutex); 2049} 2050 2051 2052void 2053moea64_release(mmu_t mmu, pmap_t pmap) 2054{ 2055 2056 /* 2057 * Free segment registers' VSIDs 2058 */ 2059 #ifdef __powerpc64__ 2060 slb_free_tree(pmap); 2061 slb_free_user_cache(pmap->pm_slb); 2062 #else 2063 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2064 2065 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2066 #endif 2067} 2068 2069/* 2070 * Remove all pages mapped by the specified pmap 2071 */ 2072void 2073moea64_remove_pages(mmu_t mmu, pmap_t pm) 2074{ 2075 struct pvo_entry *pvo, *tpvo; 2076 2077 LOCK_TABLE_WR(); 2078 PMAP_LOCK(pm); 2079 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { 2080 if (!(pvo->pvo_vaddr & PVO_WIRED)) 2081 moea64_pvo_remove(mmu, pvo); 2082 } 2083 UNLOCK_TABLE_WR(); 2084 PMAP_UNLOCK(pm); 2085} 2086 2087/* 2088 * Remove the given range of addresses from the specified map. 2089 */ 2090void 2091moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2092{ 2093 struct pvo_entry *pvo, *tpvo, key; 2094 2095 /* 2096 * Perform an unsynchronized read. This is, however, safe. 2097 */ 2098 if (pm->pm_stats.resident_count == 0) 2099 return; 2100 2101 LOCK_TABLE_WR(); 2102 PMAP_LOCK(pm); 2103 key.pvo_vaddr = sva; 2104 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2105 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2106 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2107 moea64_pvo_remove(mmu, pvo); 2108 } 2109 UNLOCK_TABLE_WR(); 2110 PMAP_UNLOCK(pm); 2111} 2112 2113/* 2114 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2115 * will reflect changes in pte's back to the vm_page. 2116 */ 2117void 2118moea64_remove_all(mmu_t mmu, vm_page_t m) 2119{ 2120 struct pvo_entry *pvo, *next_pvo; 2121 pmap_t pmap; 2122 2123 LOCK_TABLE_WR(); 2124 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2125 pmap = pvo->pvo_pmap; 2126 PMAP_LOCK(pmap); 2127 moea64_pvo_remove(mmu, pvo); 2128 PMAP_UNLOCK(pmap); 2129 } 2130 UNLOCK_TABLE_WR(); 2131 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) 2132 vm_page_dirty(m); 2133 vm_page_aflag_clear(m, PGA_WRITEABLE); 2134 vm_page_aflag_clear(m, PGA_EXECUTABLE); 2135} 2136 2137/* 2138 * Allocate a physical page of memory directly from the phys_avail map. 2139 * Can only be called from moea64_bootstrap before avail start and end are 2140 * calculated. 2141 */ 2142vm_offset_t 2143moea64_bootstrap_alloc(vm_size_t size, u_int align) 2144{ 2145 vm_offset_t s, e; 2146 int i, j; 2147 2148 size = round_page(size); 2149 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2150 if (align != 0) 2151 s = (phys_avail[i] + align - 1) & ~(align - 1); 2152 else 2153 s = phys_avail[i]; 2154 e = s + size; 2155 2156 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2157 continue; 2158 2159 if (s + size > platform_real_maxaddr()) 2160 continue; 2161 2162 if (s == phys_avail[i]) { 2163 phys_avail[i] += size; 2164 } else if (e == phys_avail[i + 1]) { 2165 phys_avail[i + 1] -= size; 2166 } else { 2167 for (j = phys_avail_count * 2; j > i; j -= 2) { 2168 phys_avail[j] = phys_avail[j - 2]; 2169 phys_avail[j + 1] = phys_avail[j - 1]; 2170 } 2171 2172 phys_avail[i + 3] = phys_avail[i + 1]; 2173 phys_avail[i + 1] = s; 2174 phys_avail[i + 2] = e; 2175 phys_avail_count++; 2176 } 2177 2178 return (s); 2179 } 2180 panic("moea64_bootstrap_alloc: could not allocate memory"); 2181} 2182 2183static int 2184moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2185 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2186 uint64_t pte_lo, int flags) 2187{ 2188 struct pvo_entry *pvo; 2189 uint64_t vsid; 2190 int first; 2191 u_int ptegidx; 2192 int i; 2193 int bootstrap; 2194 2195 /* 2196 * One nasty thing that can happen here is that the UMA calls to 2197 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2198 * which calls UMA... 2199 * 2200 * We break the loop by detecting recursion and allocating out of 2201 * the bootstrap pool. 2202 */ 2203 2204 first = 0; 2205 bootstrap = (flags & PVO_BOOTSTRAP); 2206 2207 if (!moea64_initialized) 2208 bootstrap = 1; 2209 2210 PMAP_LOCK_ASSERT(pm, MA_OWNED); 2211 rw_assert(&moea64_table_lock, RA_WLOCKED); 2212 2213 /* 2214 * Compute the PTE Group index. 2215 */ 2216 va &= ~ADDR_POFF; 2217 vsid = va_to_vsid(pm, va); 2218 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2219 2220 /* 2221 * Remove any existing mapping for this page. Reuse the pvo entry if 2222 * there is a mapping. 2223 */ 2224 moea64_pvo_enter_calls++; 2225 2226 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2227 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2228 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2229 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2230 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2231 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2232 /* Re-insert if spilled */ 2233 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2234 &pvo->pvo_pte.lpte); 2235 if (i >= 0) 2236 PVO_PTEGIDX_SET(pvo, i); 2237 moea64_pte_overflow--; 2238 } 2239 return (0); 2240 } 2241 moea64_pvo_remove(mmu, pvo); 2242 break; 2243 } 2244 } 2245 2246 /* 2247 * If we aren't overwriting a mapping, try to allocate. 2248 */ 2249 if (bootstrap) { 2250 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2251 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2252 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2253 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2254 } 2255 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2256 moea64_bpvo_pool_index++; 2257 bootstrap = 1; 2258 } else { 2259 pvo = uma_zalloc(zone, M_NOWAIT); 2260 } 2261 2262 if (pvo == NULL) 2263 return (ENOMEM); 2264 2265 moea64_pvo_entries++; 2266 pvo->pvo_vaddr = va; 2267 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2268 | (vsid << 16); 2269 pvo->pvo_pmap = pm; 2270 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2271 pvo->pvo_vaddr &= ~ADDR_POFF; 2272 2273 if (flags & PVO_WIRED) 2274 pvo->pvo_vaddr |= PVO_WIRED; 2275 if (pvo_head != NULL) 2276 pvo->pvo_vaddr |= PVO_MANAGED; 2277 if (bootstrap) 2278 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2279 if (flags & PVO_LARGE) 2280 pvo->pvo_vaddr |= PVO_LARGE; 2281 2282 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2283 (uint64_t)(pa) | pte_lo, flags); 2284 2285 /* 2286 * Add to pmap list 2287 */ 2288 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 2289 2290 /* 2291 * Remember if the list was empty and therefore will be the first 2292 * item. 2293 */ 2294 if (pvo_head != NULL) { 2295 if (LIST_FIRST(pvo_head) == NULL) 2296 first = 1; 2297 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2298 } 2299 2300 if (pvo->pvo_vaddr & PVO_WIRED) { 2301 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2302 pm->pm_stats.wired_count++; 2303 } 2304 pm->pm_stats.resident_count++; 2305 2306 /* 2307 * We hope this succeeds but it isn't required. 2308 */ 2309 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2310 if (i >= 0) { 2311 PVO_PTEGIDX_SET(pvo, i); 2312 } else { 2313 panic("moea64_pvo_enter: overflow"); 2314 moea64_pte_overflow++; 2315 } 2316 2317 if (pm == kernel_pmap) 2318 isync(); 2319 2320#ifdef __powerpc64__ 2321 /* 2322 * Make sure all our bootstrap mappings are in the SLB as soon 2323 * as virtual memory is switched on. 2324 */ 2325 if (!pmap_bootstrapped) 2326 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2327#endif 2328 2329 return (first ? ENOENT : 0); 2330} 2331 2332static void 2333moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2334{ 2335 struct vm_page *pg; 2336 uintptr_t pt; 2337 2338 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2339 rw_assert(&moea64_table_lock, RA_WLOCKED); 2340 2341 /* 2342 * If there is an active pte entry, we need to deactivate it (and 2343 * save the ref & cfg bits). 2344 */ 2345 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2346 if (pt != -1) { 2347 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2348 PVO_PTEGIDX_CLR(pvo); 2349 } else { 2350 moea64_pte_overflow--; 2351 } 2352 2353 /* 2354 * Update our statistics. 2355 */ 2356 pvo->pvo_pmap->pm_stats.resident_count--; 2357 if (pvo->pvo_vaddr & PVO_WIRED) 2358 pvo->pvo_pmap->pm_stats.wired_count--; 2359 2360 /* 2361 * Remove this PVO from the pmap list. 2362 */ 2363 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2364 2365 /* 2366 * Remove this from the overflow list and return it to the pool 2367 * if we aren't going to reuse it. 2368 */ 2369 LIST_REMOVE(pvo, pvo_olink); 2370 2371 /* 2372 * Update vm about the REF/CHG bits if the page is managed. 2373 */ 2374 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2375 2376 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) { 2377 LIST_REMOVE(pvo, pvo_vlink); 2378 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 2379 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2380 vm_page_dirty(pg); 2381 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2382 vm_page_aflag_set(pg, PGA_REFERENCED); 2383 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2384 vm_page_aflag_clear(pg, PGA_WRITEABLE); 2385 } 2386 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2387 vm_page_aflag_clear(pg, PGA_EXECUTABLE); 2388 } 2389 2390 moea64_pvo_entries--; 2391 moea64_pvo_remove_calls++; 2392 2393 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2394 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2395 moea64_upvo_zone, pvo); 2396} 2397 2398static struct pvo_entry * 2399moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2400{ 2401 struct pvo_entry key; 2402 2403 key.pvo_vaddr = va & ~ADDR_POFF; 2404 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); 2405} 2406 2407static boolean_t 2408moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2409{ 2410 struct pvo_entry *pvo; 2411 uintptr_t pt; 2412 2413 LOCK_TABLE_RD(); 2414 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2415 /* 2416 * See if we saved the bit off. If so, return success. 2417 */ 2418 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2419 UNLOCK_TABLE_RD(); 2420 return (TRUE); 2421 } 2422 } 2423 2424 /* 2425 * No luck, now go through the hard part of looking at the PTEs 2426 * themselves. Sync so that any pending REF/CHG bits are flushed to 2427 * the PTEs. 2428 */ 2429 powerpc_sync(); 2430 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2431 2432 /* 2433 * See if this pvo has a valid PTE. if so, fetch the 2434 * REF/CHG bits from the valid PTE. If the appropriate 2435 * ptebit is set, return success. 2436 */ 2437 PMAP_LOCK(pvo->pvo_pmap); 2438 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2439 if (pt != -1) { 2440 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2441 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2442 PMAP_UNLOCK(pvo->pvo_pmap); 2443 UNLOCK_TABLE_RD(); 2444 return (TRUE); 2445 } 2446 } 2447 PMAP_UNLOCK(pvo->pvo_pmap); 2448 } 2449 2450 UNLOCK_TABLE_RD(); 2451 return (FALSE); 2452} 2453 2454static u_int 2455moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2456{ 2457 u_int count; 2458 struct pvo_entry *pvo; 2459 uintptr_t pt; 2460 2461 /* 2462 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2463 * we can reset the right ones). note that since the pvo entries and 2464 * list heads are accessed via BAT0 and are never placed in the page 2465 * table, we don't have to worry about further accesses setting the 2466 * REF/CHG bits. 2467 */ 2468 powerpc_sync(); 2469 2470 /* 2471 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2472 * valid pte clear the ptebit from the valid pte. 2473 */ 2474 count = 0; 2475 LOCK_TABLE_RD(); 2476 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2477 PMAP_LOCK(pvo->pvo_pmap); 2478 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2479 if (pt != -1) { 2480 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2481 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2482 count++; 2483 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2484 pvo->pvo_vpn, ptebit); 2485 } 2486 } 2487 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2488 PMAP_UNLOCK(pvo->pvo_pmap); 2489 } 2490 2491 UNLOCK_TABLE_RD(); 2492 return (count); 2493} 2494 2495boolean_t 2496moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2497{ 2498 struct pvo_entry *pvo, key; 2499 vm_offset_t ppa; 2500 int error = 0; 2501 2502 PMAP_LOCK(kernel_pmap); 2503 key.pvo_vaddr = ppa = pa & ~ADDR_POFF; 2504 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); 2505 ppa < pa + size; ppa += PAGE_SIZE, 2506 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { 2507 if (pvo == NULL || 2508 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2509 error = EFAULT; 2510 break; 2511 } 2512 } 2513 PMAP_UNLOCK(kernel_pmap); 2514 2515 return (error); 2516} 2517 2518/* 2519 * Map a set of physical memory pages into the kernel virtual 2520 * address space. Return a pointer to where it is mapped. This 2521 * routine is intended to be used for mapping device memory, 2522 * NOT real memory. 2523 */ 2524void * 2525moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2526{ 2527 vm_offset_t va, tmpva, ppa, offset; 2528 2529 ppa = trunc_page(pa); 2530 offset = pa & PAGE_MASK; 2531 size = roundup2(offset + size, PAGE_SIZE); 2532 2533 va = kva_alloc(size); 2534 2535 if (!va) 2536 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2537 2538 for (tmpva = va; size > 0;) { 2539 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2540 size -= PAGE_SIZE; 2541 tmpva += PAGE_SIZE; 2542 ppa += PAGE_SIZE; 2543 } 2544 2545 return ((void *)(va + offset)); 2546} 2547 2548void * 2549moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2550{ 2551 2552 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2553} 2554 2555void 2556moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2557{ 2558 vm_offset_t base, offset; 2559 2560 base = trunc_page(va); 2561 offset = va & PAGE_MASK; 2562 size = roundup2(offset + size, PAGE_SIZE); 2563 2564 kva_free(base, size); 2565} 2566 2567void 2568moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2569{ 2570 struct pvo_entry *pvo; 2571 vm_offset_t lim; 2572 vm_paddr_t pa; 2573 vm_size_t len; 2574 2575 PMAP_LOCK(pm); 2576 while (sz > 0) { 2577 lim = round_page(va); 2578 len = MIN(lim - va, sz); 2579 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2580 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2581 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2582 (va & ADDR_POFF); 2583 moea64_syncicache(mmu, pm, va, pa, len); 2584 } 2585 va += len; 2586 sz -= len; 2587 } 2588 PMAP_UNLOCK(pm); 2589} 2590 2591vm_offset_t 2592moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2593 vm_size_t *sz) 2594{ 2595 if (md->md_vaddr == ~0UL) 2596 return (md->md_paddr + ofs); 2597 else 2598 return (md->md_vaddr + ofs); 2599} 2600 2601struct pmap_md * 2602moea64_scan_md(mmu_t mmu, struct pmap_md *prev) 2603{ 2604 static struct pmap_md md; 2605 struct pvo_entry *pvo; 2606 vm_offset_t va; 2607 2608 if (dumpsys_minidump) { 2609 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2610 if (prev == NULL) { 2611 /* 1st: kernel .data and .bss. */ 2612 md.md_index = 1; 2613 md.md_vaddr = trunc_page((uintptr_t)_etext); 2614 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2615 return (&md); 2616 } 2617 switch (prev->md_index) { 2618 case 1: 2619 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2620 md.md_index = 2; 2621 md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr; 2622 md.md_size = round_page(msgbufp->msg_size); 2623 break; 2624 case 2: 2625 /* 3rd: kernel VM. */ 2626 va = prev->md_vaddr + prev->md_size; 2627 /* Find start of next chunk (from va). */ 2628 while (va < virtual_end) { 2629 /* Don't dump the buffer cache. */ 2630 if (va >= kmi.buffer_sva && 2631 va < kmi.buffer_eva) { 2632 va = kmi.buffer_eva; 2633 continue; 2634 } 2635 pvo = moea64_pvo_find_va(kernel_pmap, 2636 va & ~ADDR_POFF); 2637 if (pvo != NULL && 2638 (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) 2639 break; 2640 va += PAGE_SIZE; 2641 } 2642 if (va < virtual_end) { 2643 md.md_vaddr = va; 2644 va += PAGE_SIZE; 2645 /* Find last page in chunk. */ 2646 while (va < virtual_end) { 2647 /* Don't run into the buffer cache. */ 2648 if (va == kmi.buffer_sva) 2649 break; 2650 pvo = moea64_pvo_find_va(kernel_pmap, 2651 va & ~ADDR_POFF); 2652 if (pvo == NULL || 2653 !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) 2654 break; 2655 va += PAGE_SIZE; 2656 } 2657 md.md_size = va - md.md_vaddr; 2658 break; 2659 } 2660 md.md_index = 3; 2661 /* FALLTHROUGH */ 2662 default: 2663 return (NULL); 2664 } 2665 } else { /* minidumps */ 2666 if (prev == NULL) { 2667 /* first physical chunk. */ 2668 md.md_paddr = pregions[0].mr_start; 2669 md.md_size = pregions[0].mr_size; 2670 md.md_vaddr = ~0UL; 2671 md.md_index = 1; 2672 } else if (md.md_index < pregions_sz) { 2673 md.md_paddr = pregions[md.md_index].mr_start; 2674 md.md_size = pregions[md.md_index].mr_size; 2675 md.md_vaddr = ~0UL; 2676 md.md_index++; 2677 } else { 2678 /* There's no next physical chunk. */ 2679 return (NULL); 2680 } 2681 } 2682 2683 return (&md); 2684} 2685