mmu_oea64.c revision 255418
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 255418 2013-09-09 12:51:24Z nwhitehorn $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * Since the information managed by this module is also stored by the 100 * logical address mapping module, this module may throw away valid virtual 101 * to physical mappings at almost any time. However, invalidations of 102 * mappings must be done as requested. 103 * 104 * In order to cope with hardware architectures which make virtual to 105 * physical map invalidates expensive, this module may delay invalidate 106 * reduced protection operations until such time as they are actually 107 * necessary. This module is given full information as to which processors 108 * are currently using which maps, and to when physical maps must be made 109 * correct. 110 */ 111 112#include "opt_compat.h" 113#include "opt_kstack_pages.h" 114 115#include <sys/param.h> 116#include <sys/kernel.h> 117#include <sys/queue.h> 118#include <sys/cpuset.h> 119#include <sys/ktr.h> 120#include <sys/lock.h> 121#include <sys/msgbuf.h> 122#include <sys/malloc.h> 123#include <sys/mutex.h> 124#include <sys/proc.h> 125#include <sys/rwlock.h> 126#include <sys/sched.h> 127#include <sys/sysctl.h> 128#include <sys/systm.h> 129#include <sys/vmmeter.h> 130 131#include <sys/kdb.h> 132 133#include <dev/ofw/openfirm.h> 134 135#include <vm/vm.h> 136#include <vm/vm_param.h> 137#include <vm/vm_kern.h> 138#include <vm/vm_page.h> 139#include <vm/vm_map.h> 140#include <vm/vm_object.h> 141#include <vm/vm_extern.h> 142#include <vm/vm_pageout.h> 143#include <vm/uma.h> 144 145#include <machine/_inttypes.h> 146#include <machine/cpu.h> 147#include <machine/platform.h> 148#include <machine/frame.h> 149#include <machine/md_var.h> 150#include <machine/psl.h> 151#include <machine/bat.h> 152#include <machine/hid.h> 153#include <machine/pte.h> 154#include <machine/sr.h> 155#include <machine/trap.h> 156#include <machine/mmuvar.h> 157 158#include "mmu_oea64.h" 159#include "mmu_if.h" 160#include "moea64_if.h" 161 162void moea64_release_vsid(uint64_t vsid); 163uintptr_t moea64_get_unique_vsid(void); 164 165#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 166#define ENABLE_TRANS(msr) mtmsr(msr) 167 168#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 169#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 170#define VSID_HASH_MASK 0x0000007fffffffffULL 171 172/* 173 * Locking semantics: 174 * -- Read lock: if no modifications are being made to either the PVO lists 175 * or page table or if any modifications being made result in internal 176 * changes (e.g. wiring, protection) such that the existence of the PVOs 177 * is unchanged and they remain associated with the same pmap (in which 178 * case the changes should be protected by the pmap lock) 179 * -- Write lock: required if PTEs/PVOs are being inserted or removed. 180 */ 181 182#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock) 183#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock) 184#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock) 185#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock) 186 187struct ofw_map { 188 cell_t om_va; 189 cell_t om_len; 190 cell_t om_pa_hi; 191 cell_t om_pa_lo; 192 cell_t om_mode; 193}; 194 195/* 196 * Map of physical memory regions. 197 */ 198static struct mem_region *regions; 199static struct mem_region *pregions; 200static u_int phys_avail_count; 201static int regions_sz, pregions_sz; 202 203extern void bs_remap_earlyboot(void); 204 205/* 206 * Lock for the pteg and pvo tables. 207 */ 208struct rwlock moea64_table_lock; 209struct mtx moea64_slb_mutex; 210 211/* 212 * PTEG data. 213 */ 214u_int moea64_pteg_count; 215u_int moea64_pteg_mask; 216 217/* 218 * PVO data. 219 */ 220struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 221 222uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 223uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 224 225#define BPVO_POOL_SIZE 327680 226static struct pvo_entry *moea64_bpvo_pool; 227static int moea64_bpvo_pool_index = 0; 228 229#define VSID_NBPW (sizeof(u_int32_t) * 8) 230#ifdef __powerpc64__ 231#define NVSIDS (NPMAPS * 16) 232#define VSID_HASHMASK 0xffffffffUL 233#else 234#define NVSIDS NPMAPS 235#define VSID_HASHMASK 0xfffffUL 236#endif 237static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 238 239static boolean_t moea64_initialized = FALSE; 240 241/* 242 * Statistics. 243 */ 244u_int moea64_pte_valid = 0; 245u_int moea64_pte_overflow = 0; 246u_int moea64_pvo_entries = 0; 247u_int moea64_pvo_enter_calls = 0; 248u_int moea64_pvo_remove_calls = 0; 249SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 250 &moea64_pte_valid, 0, ""); 251SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 252 &moea64_pte_overflow, 0, ""); 253SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 254 &moea64_pvo_entries, 0, ""); 255SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 256 &moea64_pvo_enter_calls, 0, ""); 257SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 258 &moea64_pvo_remove_calls, 0, ""); 259 260vm_offset_t moea64_scratchpage_va[2]; 261struct pvo_entry *moea64_scratchpage_pvo[2]; 262uintptr_t moea64_scratchpage_pte[2]; 263struct mtx moea64_scratchpage_mtx; 264 265uint64_t moea64_large_page_mask = 0; 266uint64_t moea64_large_page_size = 0; 267int moea64_large_page_shift = 0; 268 269/* 270 * PVO calls. 271 */ 272static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 273 vm_offset_t, vm_offset_t, uint64_t, int); 274static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 275static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 276 277/* 278 * Utility routines. 279 */ 280static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 281static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 282static void moea64_kremove(mmu_t, vm_offset_t); 283static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 284 vm_offset_t pa, vm_size_t sz); 285 286/* 287 * Kernel MMU interface 288 */ 289void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 290void moea64_clear_modify(mmu_t, vm_page_t); 291void moea64_clear_reference(mmu_t, vm_page_t); 292void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 293void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 294 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 295void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 296void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 297 vm_prot_t); 298void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 299vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 300vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 301void moea64_init(mmu_t); 302boolean_t moea64_is_modified(mmu_t, vm_page_t); 303boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 304boolean_t moea64_is_referenced(mmu_t, vm_page_t); 305int moea64_ts_referenced(mmu_t, vm_page_t); 306vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 307boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 308int moea64_page_wired_mappings(mmu_t, vm_page_t); 309void moea64_pinit(mmu_t, pmap_t); 310void moea64_pinit0(mmu_t, pmap_t); 311void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 312void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 313void moea64_qremove(mmu_t, vm_offset_t, int); 314void moea64_release(mmu_t, pmap_t); 315void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 316void moea64_remove_pages(mmu_t, pmap_t); 317void moea64_remove_all(mmu_t, vm_page_t); 318void moea64_remove_write(mmu_t, vm_page_t); 319void moea64_zero_page(mmu_t, vm_page_t); 320void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 321void moea64_zero_page_idle(mmu_t, vm_page_t); 322void moea64_activate(mmu_t, struct thread *); 323void moea64_deactivate(mmu_t, struct thread *); 324void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); 325void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 326void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 327vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); 328void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 329void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 330void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); 331boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 332static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 333 334static mmu_method_t moea64_methods[] = { 335 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 336 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 337 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 338 MMUMETHOD(mmu_copy_page, moea64_copy_page), 339 MMUMETHOD(mmu_copy_pages, moea64_copy_pages), 340 MMUMETHOD(mmu_enter, moea64_enter), 341 MMUMETHOD(mmu_enter_object, moea64_enter_object), 342 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 343 MMUMETHOD(mmu_extract, moea64_extract), 344 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 345 MMUMETHOD(mmu_init, moea64_init), 346 MMUMETHOD(mmu_is_modified, moea64_is_modified), 347 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 348 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 349 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 350 MMUMETHOD(mmu_map, moea64_map), 351 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 352 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 353 MMUMETHOD(mmu_pinit, moea64_pinit), 354 MMUMETHOD(mmu_pinit0, moea64_pinit0), 355 MMUMETHOD(mmu_protect, moea64_protect), 356 MMUMETHOD(mmu_qenter, moea64_qenter), 357 MMUMETHOD(mmu_qremove, moea64_qremove), 358 MMUMETHOD(mmu_release, moea64_release), 359 MMUMETHOD(mmu_remove, moea64_remove), 360 MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 361 MMUMETHOD(mmu_remove_all, moea64_remove_all), 362 MMUMETHOD(mmu_remove_write, moea64_remove_write), 363 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 364 MMUMETHOD(mmu_zero_page, moea64_zero_page), 365 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 366 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 367 MMUMETHOD(mmu_activate, moea64_activate), 368 MMUMETHOD(mmu_deactivate, moea64_deactivate), 369 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 370 371 /* Internal interfaces */ 372 MMUMETHOD(mmu_mapdev, moea64_mapdev), 373 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 374 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 375 MMUMETHOD(mmu_kextract, moea64_kextract), 376 MMUMETHOD(mmu_kenter, moea64_kenter), 377 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 378 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 379 380 { 0, 0 } 381}; 382 383MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 384 385static __inline u_int 386va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 387{ 388 uint64_t hash; 389 int shift; 390 391 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 392 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 393 shift); 394 return (hash & moea64_pteg_mask); 395} 396 397static __inline struct pvo_head * 398vm_page_to_pvoh(vm_page_t m) 399{ 400 401 return (&m->md.mdpg_pvoh); 402} 403 404static __inline void 405moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 406 uint64_t pte_lo, int flags) 407{ 408 409 /* 410 * Construct a PTE. Default to IMB initially. Valid bit only gets 411 * set when the real pte is set in memory. 412 * 413 * Note: Don't set the valid bit for correct operation of tlb update. 414 */ 415 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 416 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 417 418 if (flags & PVO_LARGE) 419 pt->pte_hi |= LPTE_BIG; 420 421 pt->pte_lo = pte_lo; 422} 423 424static __inline uint64_t 425moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 426{ 427 uint64_t pte_lo; 428 int i; 429 430 if (ma != VM_MEMATTR_DEFAULT) { 431 switch (ma) { 432 case VM_MEMATTR_UNCACHEABLE: 433 return (LPTE_I | LPTE_G); 434 case VM_MEMATTR_WRITE_COMBINING: 435 case VM_MEMATTR_WRITE_BACK: 436 case VM_MEMATTR_PREFETCHABLE: 437 return (LPTE_I); 438 case VM_MEMATTR_WRITE_THROUGH: 439 return (LPTE_W | LPTE_M); 440 } 441 } 442 443 /* 444 * Assume the page is cache inhibited and access is guarded unless 445 * it's in our available memory array. 446 */ 447 pte_lo = LPTE_I | LPTE_G; 448 for (i = 0; i < pregions_sz; i++) { 449 if ((pa >= pregions[i].mr_start) && 450 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 451 pte_lo &= ~(LPTE_I | LPTE_G); 452 pte_lo |= LPTE_M; 453 break; 454 } 455 } 456 457 return pte_lo; 458} 459 460/* 461 * Quick sort callout for comparing memory regions. 462 */ 463static int om_cmp(const void *a, const void *b); 464 465static int 466om_cmp(const void *a, const void *b) 467{ 468 const struct ofw_map *mapa; 469 const struct ofw_map *mapb; 470 471 mapa = a; 472 mapb = b; 473 if (mapa->om_pa_hi < mapb->om_pa_hi) 474 return (-1); 475 else if (mapa->om_pa_hi > mapb->om_pa_hi) 476 return (1); 477 else if (mapa->om_pa_lo < mapb->om_pa_lo) 478 return (-1); 479 else if (mapa->om_pa_lo > mapb->om_pa_lo) 480 return (1); 481 else 482 return (0); 483} 484 485static void 486moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 487{ 488 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 489 register_t msr; 490 vm_offset_t off; 491 vm_paddr_t pa_base; 492 int i; 493 494 bzero(translations, sz); 495 if (OF_getprop(mmu, "translations", translations, sz) == -1) 496 panic("moea64_bootstrap: can't get ofw translations"); 497 498 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 499 sz /= sizeof(*translations); 500 qsort(translations, sz, sizeof (*translations), om_cmp); 501 502 for (i = 0; i < sz; i++) { 503 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 504 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 505 translations[i].om_len); 506 507 if (translations[i].om_pa_lo % PAGE_SIZE) 508 panic("OFW translation not page-aligned!"); 509 510 pa_base = translations[i].om_pa_lo; 511 512 #ifdef __powerpc64__ 513 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 514 #else 515 if (translations[i].om_pa_hi) 516 panic("OFW translations above 32-bit boundary!"); 517 #endif 518 519 /* Now enter the pages for this mapping */ 520 521 DISABLE_TRANS(msr); 522 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 523 if (moea64_pvo_find_va(kernel_pmap, 524 translations[i].om_va + off) != NULL) 525 continue; 526 527 moea64_kenter(mmup, translations[i].om_va + off, 528 pa_base + off); 529 } 530 ENABLE_TRANS(msr); 531 } 532} 533 534#ifdef __powerpc64__ 535static void 536moea64_probe_large_page(void) 537{ 538 uint16_t pvr = mfpvr() >> 16; 539 540 switch (pvr) { 541 case IBM970: 542 case IBM970FX: 543 case IBM970MP: 544 powerpc_sync(); isync(); 545 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 546 powerpc_sync(); isync(); 547 548 /* FALLTHROUGH */ 549 default: 550 moea64_large_page_size = 0x1000000; /* 16 MB */ 551 moea64_large_page_shift = 24; 552 } 553 554 moea64_large_page_mask = moea64_large_page_size - 1; 555} 556 557static void 558moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 559{ 560 struct slb *cache; 561 struct slb entry; 562 uint64_t esid, slbe; 563 uint64_t i; 564 565 cache = PCPU_GET(slb); 566 esid = va >> ADDR_SR_SHFT; 567 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 568 569 for (i = 0; i < 64; i++) { 570 if (cache[i].slbe == (slbe | i)) 571 return; 572 } 573 574 entry.slbe = slbe; 575 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 576 if (large) 577 entry.slbv |= SLBV_L; 578 579 slb_insert_kernel(entry.slbe, entry.slbv); 580} 581#endif 582 583static void 584moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 585 vm_offset_t kernelend) 586{ 587 register_t msr; 588 vm_paddr_t pa; 589 vm_offset_t size, off; 590 uint64_t pte_lo; 591 int i; 592 593 if (moea64_large_page_size == 0) 594 hw_direct_map = 0; 595 596 DISABLE_TRANS(msr); 597 if (hw_direct_map) { 598 LOCK_TABLE_WR(); 599 PMAP_LOCK(kernel_pmap); 600 for (i = 0; i < pregions_sz; i++) { 601 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 602 pregions[i].mr_size; pa += moea64_large_page_size) { 603 pte_lo = LPTE_M; 604 605 /* 606 * Set memory access as guarded if prefetch within 607 * the page could exit the available physmem area. 608 */ 609 if (pa & moea64_large_page_mask) { 610 pa &= moea64_large_page_mask; 611 pte_lo |= LPTE_G; 612 } 613 if (pa + moea64_large_page_size > 614 pregions[i].mr_start + pregions[i].mr_size) 615 pte_lo |= LPTE_G; 616 617 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 618 NULL, pa, pa, pte_lo, 619 PVO_WIRED | PVO_LARGE); 620 } 621 } 622 PMAP_UNLOCK(kernel_pmap); 623 UNLOCK_TABLE_WR(); 624 } else { 625 size = sizeof(struct pvo_head) * moea64_pteg_count; 626 off = (vm_offset_t)(moea64_pvo_table); 627 for (pa = off; pa < off + size; pa += PAGE_SIZE) 628 moea64_kenter(mmup, pa, pa); 629 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 630 off = (vm_offset_t)(moea64_bpvo_pool); 631 for (pa = off; pa < off + size; pa += PAGE_SIZE) 632 moea64_kenter(mmup, pa, pa); 633 634 /* 635 * Map certain important things, like ourselves. 636 * 637 * NOTE: We do not map the exception vector space. That code is 638 * used only in real mode, and leaving it unmapped allows us to 639 * catch NULL pointer deferences, instead of making NULL a valid 640 * address. 641 */ 642 643 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 644 pa += PAGE_SIZE) 645 moea64_kenter(mmup, pa, pa); 646 } 647 ENABLE_TRANS(msr); 648 649 /* 650 * Allow user to override unmapped_buf_allowed for testing. 651 * XXXKIB Only direct map implementation was tested. 652 */ 653 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", 654 &unmapped_buf_allowed)) 655 unmapped_buf_allowed = hw_direct_map; 656} 657 658void 659moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 660{ 661 int i, j; 662 vm_size_t physsz, hwphyssz; 663 664#ifndef __powerpc64__ 665 /* We don't have a direct map since there is no BAT */ 666 hw_direct_map = 0; 667 668 /* Make sure battable is zero, since we have no BAT */ 669 for (i = 0; i < 16; i++) { 670 battable[i].batu = 0; 671 battable[i].batl = 0; 672 } 673#else 674 moea64_probe_large_page(); 675 676 /* Use a direct map if we have large page support */ 677 if (moea64_large_page_size > 0) 678 hw_direct_map = 1; 679 else 680 hw_direct_map = 0; 681#endif 682 683 /* Get physical memory regions from firmware */ 684 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 685 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 686 687 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 688 panic("moea64_bootstrap: phys_avail too small"); 689 690 phys_avail_count = 0; 691 physsz = 0; 692 hwphyssz = 0; 693 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 694 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 695 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 696 regions[i].mr_start + regions[i].mr_size, 697 regions[i].mr_size); 698 if (hwphyssz != 0 && 699 (physsz + regions[i].mr_size) >= hwphyssz) { 700 if (physsz < hwphyssz) { 701 phys_avail[j] = regions[i].mr_start; 702 phys_avail[j + 1] = regions[i].mr_start + 703 hwphyssz - physsz; 704 physsz = hwphyssz; 705 phys_avail_count++; 706 } 707 break; 708 } 709 phys_avail[j] = regions[i].mr_start; 710 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 711 phys_avail_count++; 712 physsz += regions[i].mr_size; 713 } 714 715 /* Check for overlap with the kernel and exception vectors */ 716 for (j = 0; j < 2*phys_avail_count; j+=2) { 717 if (phys_avail[j] < EXC_LAST) 718 phys_avail[j] += EXC_LAST; 719 720 if (kernelstart >= phys_avail[j] && 721 kernelstart < phys_avail[j+1]) { 722 if (kernelend < phys_avail[j+1]) { 723 phys_avail[2*phys_avail_count] = 724 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 725 phys_avail[2*phys_avail_count + 1] = 726 phys_avail[j+1]; 727 phys_avail_count++; 728 } 729 730 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 731 } 732 733 if (kernelend >= phys_avail[j] && 734 kernelend < phys_avail[j+1]) { 735 if (kernelstart > phys_avail[j]) { 736 phys_avail[2*phys_avail_count] = phys_avail[j]; 737 phys_avail[2*phys_avail_count + 1] = 738 kernelstart & ~PAGE_MASK; 739 phys_avail_count++; 740 } 741 742 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 743 } 744 } 745 746 physmem = btoc(physsz); 747 748#ifdef PTEGCOUNT 749 moea64_pteg_count = PTEGCOUNT; 750#else 751 moea64_pteg_count = 0x1000; 752 753 while (moea64_pteg_count < physmem) 754 moea64_pteg_count <<= 1; 755 756 moea64_pteg_count >>= 1; 757#endif /* PTEGCOUNT */ 758} 759 760void 761moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 762{ 763 vm_size_t size; 764 register_t msr; 765 int i; 766 767 /* 768 * Set PTEG mask 769 */ 770 moea64_pteg_mask = moea64_pteg_count - 1; 771 772 /* 773 * Allocate pv/overflow lists. 774 */ 775 size = sizeof(struct pvo_head) * moea64_pteg_count; 776 777 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 778 PAGE_SIZE); 779 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 780 781 DISABLE_TRANS(msr); 782 for (i = 0; i < moea64_pteg_count; i++) 783 LIST_INIT(&moea64_pvo_table[i]); 784 ENABLE_TRANS(msr); 785 786 /* 787 * Initialize the lock that synchronizes access to the pteg and pvo 788 * tables. 789 */ 790 rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE); 791 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 792 793 /* 794 * Initialise the unmanaged pvo pool. 795 */ 796 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 797 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 798 moea64_bpvo_pool_index = 0; 799 800 /* 801 * Make sure kernel vsid is allocated as well as VSID 0. 802 */ 803 #ifndef __powerpc64__ 804 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 805 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 806 moea64_vsid_bitmap[0] |= 1; 807 #endif 808 809 /* 810 * Initialize the kernel pmap (which is statically allocated). 811 */ 812 #ifdef __powerpc64__ 813 for (i = 0; i < 64; i++) { 814 pcpup->pc_slb[i].slbv = 0; 815 pcpup->pc_slb[i].slbe = 0; 816 } 817 #else 818 for (i = 0; i < 16; i++) 819 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 820 #endif 821 822 kernel_pmap->pmap_phys = kernel_pmap; 823 CPU_FILL(&kernel_pmap->pm_active); 824 RB_INIT(&kernel_pmap->pmap_pvo); 825 826 PMAP_LOCK_INIT(kernel_pmap); 827 828 /* 829 * Now map in all the other buffers we allocated earlier 830 */ 831 832 moea64_setup_direct_map(mmup, kernelstart, kernelend); 833} 834 835void 836moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 837{ 838 ihandle_t mmui; 839 phandle_t chosen; 840 phandle_t mmu; 841 size_t sz; 842 int i; 843 vm_offset_t pa, va; 844 void *dpcpu; 845 846 /* 847 * Set up the Open Firmware pmap and add its mappings if not in real 848 * mode. 849 */ 850 851 chosen = OF_finddevice("/chosen"); 852 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 853 mmu = OF_instance_to_package(mmui); 854 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 855 sz = 0; 856 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 857 panic("moea64_bootstrap: too many ofw translations"); 858 859 if (sz > 0) 860 moea64_add_ofw_mappings(mmup, mmu, sz); 861 } 862 863 /* 864 * Calculate the last available physical address. 865 */ 866 for (i = 0; phys_avail[i + 2] != 0; i += 2) 867 ; 868 Maxmem = powerpc_btop(phys_avail[i + 1]); 869 870 /* 871 * Initialize MMU and remap early physical mappings 872 */ 873 MMU_CPU_BOOTSTRAP(mmup,0); 874 mtmsr(mfmsr() | PSL_DR | PSL_IR); 875 pmap_bootstrapped++; 876 bs_remap_earlyboot(); 877 878 /* 879 * Set the start and end of kva. 880 */ 881 virtual_avail = VM_MIN_KERNEL_ADDRESS; 882 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 883 884 /* 885 * Map the entire KVA range into the SLB. We must not fault there. 886 */ 887 #ifdef __powerpc64__ 888 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 889 moea64_bootstrap_slb_prefault(va, 0); 890 #endif 891 892 /* 893 * Figure out how far we can extend virtual_end into segment 16 894 * without running into existing mappings. Segment 16 is guaranteed 895 * to contain neither RAM nor devices (at least on Apple hardware), 896 * but will generally contain some OFW mappings we should not 897 * step on. 898 */ 899 900 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 901 PMAP_LOCK(kernel_pmap); 902 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 903 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 904 virtual_end += PAGE_SIZE; 905 PMAP_UNLOCK(kernel_pmap); 906 #endif 907 908 /* 909 * Allocate a kernel stack with a guard page for thread0 and map it 910 * into the kernel page map. 911 */ 912 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 913 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 914 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 915 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 916 thread0.td_kstack = va; 917 thread0.td_kstack_pages = KSTACK_PAGES; 918 for (i = 0; i < KSTACK_PAGES; i++) { 919 moea64_kenter(mmup, va, pa); 920 pa += PAGE_SIZE; 921 va += PAGE_SIZE; 922 } 923 924 /* 925 * Allocate virtual address space for the message buffer. 926 */ 927 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 928 msgbufp = (struct msgbuf *)virtual_avail; 929 va = virtual_avail; 930 virtual_avail += round_page(msgbufsize); 931 while (va < virtual_avail) { 932 moea64_kenter(mmup, va, pa); 933 pa += PAGE_SIZE; 934 va += PAGE_SIZE; 935 } 936 937 /* 938 * Allocate virtual address space for the dynamic percpu area. 939 */ 940 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 941 dpcpu = (void *)virtual_avail; 942 va = virtual_avail; 943 virtual_avail += DPCPU_SIZE; 944 while (va < virtual_avail) { 945 moea64_kenter(mmup, va, pa); 946 pa += PAGE_SIZE; 947 va += PAGE_SIZE; 948 } 949 dpcpu_init(dpcpu, 0); 950 951 /* 952 * Allocate some things for page zeroing. We put this directly 953 * in the page table, marked with LPTE_LOCKED, to avoid any 954 * of the PVO book-keeping or other parts of the VM system 955 * from even knowing that this hack exists. 956 */ 957 958 if (!hw_direct_map) { 959 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 960 MTX_DEF); 961 for (i = 0; i < 2; i++) { 962 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 963 virtual_end -= PAGE_SIZE; 964 965 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 966 967 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 968 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 969 LOCK_TABLE_RD(); 970 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 971 mmup, moea64_scratchpage_pvo[i]); 972 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 973 |= LPTE_LOCKED; 974 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 975 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 976 moea64_scratchpage_pvo[i]->pvo_vpn); 977 UNLOCK_TABLE_RD(); 978 } 979 } 980} 981 982/* 983 * Activate a user pmap. The pmap must be activated before its address 984 * space can be accessed in any way. 985 */ 986void 987moea64_activate(mmu_t mmu, struct thread *td) 988{ 989 pmap_t pm; 990 991 pm = &td->td_proc->p_vmspace->vm_pmap; 992 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 993 994 #ifdef __powerpc64__ 995 PCPU_SET(userslb, pm->pm_slb); 996 #else 997 PCPU_SET(curpmap, pm->pmap_phys); 998 #endif 999} 1000 1001void 1002moea64_deactivate(mmu_t mmu, struct thread *td) 1003{ 1004 pmap_t pm; 1005 1006 pm = &td->td_proc->p_vmspace->vm_pmap; 1007 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1008 #ifdef __powerpc64__ 1009 PCPU_SET(userslb, NULL); 1010 #else 1011 PCPU_SET(curpmap, NULL); 1012 #endif 1013} 1014 1015void 1016moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1017{ 1018 struct pvo_entry *pvo; 1019 uintptr_t pt; 1020 uint64_t vsid; 1021 int i, ptegidx; 1022 1023 LOCK_TABLE_WR(); 1024 PMAP_LOCK(pm); 1025 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1026 1027 if (pvo != NULL) { 1028 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1029 1030 if (wired) { 1031 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1032 pm->pm_stats.wired_count++; 1033 pvo->pvo_vaddr |= PVO_WIRED; 1034 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1035 } else { 1036 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1037 pm->pm_stats.wired_count--; 1038 pvo->pvo_vaddr &= ~PVO_WIRED; 1039 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1040 } 1041 1042 if (pt != -1) { 1043 /* Update wiring flag in page table. */ 1044 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1045 pvo->pvo_vpn); 1046 } else if (wired) { 1047 /* 1048 * If we are wiring the page, and it wasn't in the 1049 * page table before, add it. 1050 */ 1051 vsid = PVO_VSID(pvo); 1052 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1053 pvo->pvo_vaddr & PVO_LARGE); 1054 1055 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1056 1057 if (i >= 0) { 1058 PVO_PTEGIDX_CLR(pvo); 1059 PVO_PTEGIDX_SET(pvo, i); 1060 } 1061 } 1062 1063 } 1064 UNLOCK_TABLE_WR(); 1065 PMAP_UNLOCK(pm); 1066} 1067 1068/* 1069 * This goes through and sets the physical address of our 1070 * special scratch PTE to the PA we want to zero or copy. Because 1071 * of locking issues (this can get called in pvo_enter() by 1072 * the UMA allocator), we can't use most other utility functions here 1073 */ 1074 1075static __inline 1076void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1077 1078 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1079 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1080 1081 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1082 ~(LPTE_WIMG | LPTE_RPGN); 1083 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1084 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1085 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1086 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1087 moea64_scratchpage_pvo[which]->pvo_vpn); 1088 isync(); 1089} 1090 1091void 1092moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1093{ 1094 vm_offset_t dst; 1095 vm_offset_t src; 1096 1097 dst = VM_PAGE_TO_PHYS(mdst); 1098 src = VM_PAGE_TO_PHYS(msrc); 1099 1100 if (hw_direct_map) { 1101 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1102 } else { 1103 mtx_lock(&moea64_scratchpage_mtx); 1104 1105 moea64_set_scratchpage_pa(mmu, 0, src); 1106 moea64_set_scratchpage_pa(mmu, 1, dst); 1107 1108 bcopy((void *)moea64_scratchpage_va[0], 1109 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1110 1111 mtx_unlock(&moea64_scratchpage_mtx); 1112 } 1113} 1114 1115static inline void 1116moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1117 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1118{ 1119 void *a_cp, *b_cp; 1120 vm_offset_t a_pg_offset, b_pg_offset; 1121 int cnt; 1122 1123 while (xfersize > 0) { 1124 a_pg_offset = a_offset & PAGE_MASK; 1125 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1126 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1127 a_pg_offset; 1128 b_pg_offset = b_offset & PAGE_MASK; 1129 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1130 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1131 b_pg_offset; 1132 bcopy(a_cp, b_cp, cnt); 1133 a_offset += cnt; 1134 b_offset += cnt; 1135 xfersize -= cnt; 1136 } 1137} 1138 1139static inline void 1140moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1141 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1142{ 1143 void *a_cp, *b_cp; 1144 vm_offset_t a_pg_offset, b_pg_offset; 1145 int cnt; 1146 1147 mtx_lock(&moea64_scratchpage_mtx); 1148 while (xfersize > 0) { 1149 a_pg_offset = a_offset & PAGE_MASK; 1150 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1151 moea64_set_scratchpage_pa(mmu, 0, 1152 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 1153 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; 1154 b_pg_offset = b_offset & PAGE_MASK; 1155 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1156 moea64_set_scratchpage_pa(mmu, 1, 1157 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 1158 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; 1159 bcopy(a_cp, b_cp, cnt); 1160 a_offset += cnt; 1161 b_offset += cnt; 1162 xfersize -= cnt; 1163 } 1164 mtx_unlock(&moea64_scratchpage_mtx); 1165} 1166 1167void 1168moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1169 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1170{ 1171 1172 if (hw_direct_map) { 1173 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, 1174 xfersize); 1175 } else { 1176 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, 1177 xfersize); 1178 } 1179} 1180 1181void 1182moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1183{ 1184 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1185 1186 if (size + off > PAGE_SIZE) 1187 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1188 1189 if (hw_direct_map) { 1190 bzero((caddr_t)pa + off, size); 1191 } else { 1192 mtx_lock(&moea64_scratchpage_mtx); 1193 moea64_set_scratchpage_pa(mmu, 0, pa); 1194 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1195 mtx_unlock(&moea64_scratchpage_mtx); 1196 } 1197} 1198 1199/* 1200 * Zero a page of physical memory by temporarily mapping it 1201 */ 1202void 1203moea64_zero_page(mmu_t mmu, vm_page_t m) 1204{ 1205 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1206 vm_offset_t va, off; 1207 1208 if (!hw_direct_map) { 1209 mtx_lock(&moea64_scratchpage_mtx); 1210 1211 moea64_set_scratchpage_pa(mmu, 0, pa); 1212 va = moea64_scratchpage_va[0]; 1213 } else { 1214 va = pa; 1215 } 1216 1217 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1218 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1219 1220 if (!hw_direct_map) 1221 mtx_unlock(&moea64_scratchpage_mtx); 1222} 1223 1224void 1225moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1226{ 1227 1228 moea64_zero_page(mmu, m); 1229} 1230 1231/* 1232 * Map the given physical page at the specified virtual address in the 1233 * target pmap with the protection requested. If specified the page 1234 * will be wired down. 1235 */ 1236 1237void 1238moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1239 vm_prot_t prot, boolean_t wired) 1240{ 1241 struct pvo_head *pvo_head; 1242 uma_zone_t zone; 1243 vm_page_t pg; 1244 uint64_t pte_lo; 1245 u_int pvo_flags; 1246 int error; 1247 1248 if (!moea64_initialized) { 1249 pvo_head = NULL; 1250 pg = NULL; 1251 zone = moea64_upvo_zone; 1252 pvo_flags = 0; 1253 } else { 1254 pvo_head = vm_page_to_pvoh(m); 1255 pg = m; 1256 zone = moea64_mpvo_zone; 1257 pvo_flags = PVO_MANAGED; 1258 } 1259 1260 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1261 VM_OBJECT_ASSERT_LOCKED(m->object); 1262 1263 /* XXX change the pvo head for fake pages */ 1264 if ((m->oflags & VPO_UNMANAGED) != 0) { 1265 pvo_flags &= ~PVO_MANAGED; 1266 pvo_head = NULL; 1267 zone = moea64_upvo_zone; 1268 } 1269 1270 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1271 1272 if (prot & VM_PROT_WRITE) { 1273 pte_lo |= LPTE_BW; 1274 if (pmap_bootstrapped && 1275 (m->oflags & VPO_UNMANAGED) == 0) 1276 vm_page_aflag_set(m, PGA_WRITEABLE); 1277 } else 1278 pte_lo |= LPTE_BR; 1279 1280 if ((prot & VM_PROT_EXECUTE) == 0) 1281 pte_lo |= LPTE_NOEXEC; 1282 1283 if (wired) 1284 pvo_flags |= PVO_WIRED; 1285 1286 LOCK_TABLE_WR(); 1287 PMAP_LOCK(pmap); 1288 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1289 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1290 PMAP_UNLOCK(pmap); 1291 UNLOCK_TABLE_WR(); 1292 1293 /* 1294 * Flush the page from the instruction cache if this page is 1295 * mapped executable and cacheable. 1296 */ 1297 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1298 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1299 vm_page_aflag_set(m, PGA_EXECUTABLE); 1300 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1301 } 1302} 1303 1304static void 1305moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1306 vm_size_t sz) 1307{ 1308 1309 /* 1310 * This is much trickier than on older systems because 1311 * we can't sync the icache on physical addresses directly 1312 * without a direct map. Instead we check a couple of cases 1313 * where the memory is already mapped in and, failing that, 1314 * use the same trick we use for page zeroing to create 1315 * a temporary mapping for this physical address. 1316 */ 1317 1318 if (!pmap_bootstrapped) { 1319 /* 1320 * If PMAP is not bootstrapped, we are likely to be 1321 * in real mode. 1322 */ 1323 __syncicache((void *)pa, sz); 1324 } else if (pmap == kernel_pmap) { 1325 __syncicache((void *)va, sz); 1326 } else if (hw_direct_map) { 1327 __syncicache((void *)pa, sz); 1328 } else { 1329 /* Use the scratch page to set up a temp mapping */ 1330 1331 mtx_lock(&moea64_scratchpage_mtx); 1332 1333 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1334 __syncicache((void *)(moea64_scratchpage_va[1] + 1335 (va & ADDR_POFF)), sz); 1336 1337 mtx_unlock(&moea64_scratchpage_mtx); 1338 } 1339} 1340 1341/* 1342 * Maps a sequence of resident pages belonging to the same object. 1343 * The sequence begins with the given page m_start. This page is 1344 * mapped at the given virtual address start. Each subsequent page is 1345 * mapped at a virtual address that is offset from start by the same 1346 * amount as the page is offset from m_start within the object. The 1347 * last page in the sequence is the page with the largest offset from 1348 * m_start that can be mapped at a virtual address less than the given 1349 * virtual address end. Not every virtual page between start and end 1350 * is mapped; only those for which a resident page exists with the 1351 * corresponding offset from m_start are mapped. 1352 */ 1353void 1354moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1355 vm_page_t m_start, vm_prot_t prot) 1356{ 1357 vm_page_t m; 1358 vm_pindex_t diff, psize; 1359 1360 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1361 1362 psize = atop(end - start); 1363 m = m_start; 1364 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1365 moea64_enter(mmu, pm, start + ptoa(diff), m, prot & 1366 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1367 m = TAILQ_NEXT(m, listq); 1368 } 1369} 1370 1371void 1372moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1373 vm_prot_t prot) 1374{ 1375 1376 moea64_enter(mmu, pm, va, m, 1377 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1378} 1379 1380vm_paddr_t 1381moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1382{ 1383 struct pvo_entry *pvo; 1384 vm_paddr_t pa; 1385 1386 PMAP_LOCK(pm); 1387 pvo = moea64_pvo_find_va(pm, va); 1388 if (pvo == NULL) 1389 pa = 0; 1390 else 1391 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1392 (va - PVO_VADDR(pvo)); 1393 PMAP_UNLOCK(pm); 1394 return (pa); 1395} 1396 1397/* 1398 * Atomically extract and hold the physical page with the given 1399 * pmap and virtual address pair if that mapping permits the given 1400 * protection. 1401 */ 1402vm_page_t 1403moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1404{ 1405 struct pvo_entry *pvo; 1406 vm_page_t m; 1407 vm_paddr_t pa; 1408 1409 m = NULL; 1410 pa = 0; 1411 PMAP_LOCK(pmap); 1412retry: 1413 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1414 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1415 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1416 (prot & VM_PROT_WRITE) == 0)) { 1417 if (vm_page_pa_tryrelock(pmap, 1418 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1419 goto retry; 1420 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1421 vm_page_hold(m); 1422 } 1423 PA_UNLOCK_COND(pa); 1424 PMAP_UNLOCK(pmap); 1425 return (m); 1426} 1427 1428static mmu_t installed_mmu; 1429 1430static void * 1431moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1432{ 1433 /* 1434 * This entire routine is a horrible hack to avoid bothering kmem 1435 * for new KVA addresses. Because this can get called from inside 1436 * kmem allocation routines, calling kmem for a new address here 1437 * can lead to multiply locking non-recursive mutexes. 1438 */ 1439 vm_offset_t va; 1440 1441 vm_page_t m; 1442 int pflags, needed_lock; 1443 1444 *flags = UMA_SLAB_PRIV; 1445 needed_lock = !PMAP_LOCKED(kernel_pmap); 1446 pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; 1447 1448 for (;;) { 1449 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1450 if (m == NULL) { 1451 if (wait & M_NOWAIT) 1452 return (NULL); 1453 VM_WAIT; 1454 } else 1455 break; 1456 } 1457 1458 va = VM_PAGE_TO_PHYS(m); 1459 1460 LOCK_TABLE_WR(); 1461 if (needed_lock) 1462 PMAP_LOCK(kernel_pmap); 1463 1464 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1465 NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP); 1466 1467 if (needed_lock) 1468 PMAP_UNLOCK(kernel_pmap); 1469 UNLOCK_TABLE_WR(); 1470 1471 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1472 bzero((void *)va, PAGE_SIZE); 1473 1474 return (void *)va; 1475} 1476 1477extern int elf32_nxstack; 1478 1479void 1480moea64_init(mmu_t mmu) 1481{ 1482 1483 CTR0(KTR_PMAP, "moea64_init"); 1484 1485 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1486 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1487 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1488 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1489 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1490 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1491 1492 if (!hw_direct_map) { 1493 installed_mmu = mmu; 1494 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1495 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1496 } 1497 1498#ifdef COMPAT_FREEBSD32 1499 elf32_nxstack = 1; 1500#endif 1501 1502 moea64_initialized = TRUE; 1503} 1504 1505boolean_t 1506moea64_is_referenced(mmu_t mmu, vm_page_t m) 1507{ 1508 1509 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1510 ("moea64_is_referenced: page %p is not managed", m)); 1511 return (moea64_query_bit(mmu, m, PTE_REF)); 1512} 1513 1514boolean_t 1515moea64_is_modified(mmu_t mmu, vm_page_t m) 1516{ 1517 1518 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1519 ("moea64_is_modified: page %p is not managed", m)); 1520 1521 /* 1522 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1523 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1524 * is clear, no PTEs can have LPTE_CHG set. 1525 */ 1526 VM_OBJECT_ASSERT_WLOCKED(m->object); 1527 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1528 return (FALSE); 1529 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1530} 1531 1532boolean_t 1533moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1534{ 1535 struct pvo_entry *pvo; 1536 boolean_t rv; 1537 1538 PMAP_LOCK(pmap); 1539 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1540 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1541 PMAP_UNLOCK(pmap); 1542 return (rv); 1543} 1544 1545void 1546moea64_clear_reference(mmu_t mmu, vm_page_t m) 1547{ 1548 1549 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1550 ("moea64_clear_reference: page %p is not managed", m)); 1551 moea64_clear_bit(mmu, m, LPTE_REF); 1552} 1553 1554void 1555moea64_clear_modify(mmu_t mmu, vm_page_t m) 1556{ 1557 1558 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1559 ("moea64_clear_modify: page %p is not managed", m)); 1560 VM_OBJECT_ASSERT_WLOCKED(m->object); 1561 KASSERT(!vm_page_xbusied(m), 1562 ("moea64_clear_modify: page %p is exclusive busied", m)); 1563 1564 /* 1565 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1566 * set. If the object containing the page is locked and the page is 1567 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1568 */ 1569 if ((m->aflags & PGA_WRITEABLE) == 0) 1570 return; 1571 moea64_clear_bit(mmu, m, LPTE_CHG); 1572} 1573 1574/* 1575 * Clear the write and modified bits in each of the given page's mappings. 1576 */ 1577void 1578moea64_remove_write(mmu_t mmu, vm_page_t m) 1579{ 1580 struct pvo_entry *pvo; 1581 uintptr_t pt; 1582 pmap_t pmap; 1583 uint64_t lo = 0; 1584 1585 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1586 ("moea64_remove_write: page %p is not managed", m)); 1587 1588 /* 1589 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1590 * set by another thread while the object is locked. Thus, 1591 * if PGA_WRITEABLE is clear, no page table entries need updating. 1592 */ 1593 VM_OBJECT_ASSERT_WLOCKED(m->object); 1594 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1595 return; 1596 powerpc_sync(); 1597 LOCK_TABLE_RD(); 1598 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1599 pmap = pvo->pvo_pmap; 1600 PMAP_LOCK(pmap); 1601 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1602 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1603 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1604 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1605 if (pt != -1) { 1606 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1607 lo |= pvo->pvo_pte.lpte.pte_lo; 1608 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1609 MOEA64_PTE_CHANGE(mmu, pt, 1610 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1611 if (pvo->pvo_pmap == kernel_pmap) 1612 isync(); 1613 } 1614 } 1615 if ((lo & LPTE_CHG) != 0) 1616 vm_page_dirty(m); 1617 PMAP_UNLOCK(pmap); 1618 } 1619 UNLOCK_TABLE_RD(); 1620 vm_page_aflag_clear(m, PGA_WRITEABLE); 1621} 1622 1623/* 1624 * moea64_ts_referenced: 1625 * 1626 * Return a count of reference bits for a page, clearing those bits. 1627 * It is not necessary for every reference bit to be cleared, but it 1628 * is necessary that 0 only be returned when there are truly no 1629 * reference bits set. 1630 * 1631 * XXX: The exact number of bits to check and clear is a matter that 1632 * should be tested and standardized at some point in the future for 1633 * optimal aging of shared pages. 1634 */ 1635int 1636moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1637{ 1638 1639 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1640 ("moea64_ts_referenced: page %p is not managed", m)); 1641 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1642} 1643 1644/* 1645 * Modify the WIMG settings of all mappings for a page. 1646 */ 1647void 1648moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1649{ 1650 struct pvo_entry *pvo; 1651 struct pvo_head *pvo_head; 1652 uintptr_t pt; 1653 pmap_t pmap; 1654 uint64_t lo; 1655 1656 if ((m->oflags & VPO_UNMANAGED) != 0) { 1657 m->md.mdpg_cache_attrs = ma; 1658 return; 1659 } 1660 1661 pvo_head = vm_page_to_pvoh(m); 1662 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1663 LOCK_TABLE_RD(); 1664 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1665 pmap = pvo->pvo_pmap; 1666 PMAP_LOCK(pmap); 1667 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1668 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1669 pvo->pvo_pte.lpte.pte_lo |= lo; 1670 if (pt != -1) { 1671 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1672 pvo->pvo_vpn); 1673 if (pvo->pvo_pmap == kernel_pmap) 1674 isync(); 1675 } 1676 PMAP_UNLOCK(pmap); 1677 } 1678 UNLOCK_TABLE_RD(); 1679 m->md.mdpg_cache_attrs = ma; 1680} 1681 1682/* 1683 * Map a wired page into kernel virtual address space. 1684 */ 1685void 1686moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1687{ 1688 uint64_t pte_lo; 1689 int error; 1690 1691 pte_lo = moea64_calc_wimg(pa, ma); 1692 1693 LOCK_TABLE_WR(); 1694 PMAP_LOCK(kernel_pmap); 1695 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1696 NULL, va, pa, pte_lo, PVO_WIRED); 1697 PMAP_UNLOCK(kernel_pmap); 1698 UNLOCK_TABLE_WR(); 1699 1700 if (error != 0 && error != ENOENT) 1701 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1702 pa, error); 1703} 1704 1705void 1706moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1707{ 1708 1709 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1710} 1711 1712/* 1713 * Extract the physical page address associated with the given kernel virtual 1714 * address. 1715 */ 1716vm_paddr_t 1717moea64_kextract(mmu_t mmu, vm_offset_t va) 1718{ 1719 struct pvo_entry *pvo; 1720 vm_paddr_t pa; 1721 1722 /* 1723 * Shortcut the direct-mapped case when applicable. We never put 1724 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1725 */ 1726 if (va < VM_MIN_KERNEL_ADDRESS) 1727 return (va); 1728 1729 PMAP_LOCK(kernel_pmap); 1730 pvo = moea64_pvo_find_va(kernel_pmap, va); 1731 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1732 va)); 1733 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1734 PMAP_UNLOCK(kernel_pmap); 1735 return (pa); 1736} 1737 1738/* 1739 * Remove a wired page from kernel virtual address space. 1740 */ 1741void 1742moea64_kremove(mmu_t mmu, vm_offset_t va) 1743{ 1744 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1745} 1746 1747/* 1748 * Map a range of physical addresses into kernel virtual address space. 1749 * 1750 * The value passed in *virt is a suggested virtual address for the mapping. 1751 * Architectures which can support a direct-mapped physical to virtual region 1752 * can return the appropriate address within that region, leaving '*virt' 1753 * unchanged. We cannot and therefore do not; *virt is updated with the 1754 * first usable address after the mapped region. 1755 */ 1756vm_offset_t 1757moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1758 vm_paddr_t pa_end, int prot) 1759{ 1760 vm_offset_t sva, va; 1761 1762 sva = *virt; 1763 va = sva; 1764 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1765 moea64_kenter(mmu, va, pa_start); 1766 *virt = va; 1767 1768 return (sva); 1769} 1770 1771/* 1772 * Returns true if the pmap's pv is one of the first 1773 * 16 pvs linked to from this page. This count may 1774 * be changed upwards or downwards in the future; it 1775 * is only necessary that true be returned for a small 1776 * subset of pmaps for proper page aging. 1777 */ 1778boolean_t 1779moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1780{ 1781 int loops; 1782 struct pvo_entry *pvo; 1783 boolean_t rv; 1784 1785 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1786 ("moea64_page_exists_quick: page %p is not managed", m)); 1787 loops = 0; 1788 rv = FALSE; 1789 LOCK_TABLE_RD(); 1790 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1791 if (pvo->pvo_pmap == pmap) { 1792 rv = TRUE; 1793 break; 1794 } 1795 if (++loops >= 16) 1796 break; 1797 } 1798 UNLOCK_TABLE_RD(); 1799 return (rv); 1800} 1801 1802/* 1803 * Return the number of managed mappings to the given physical page 1804 * that are wired. 1805 */ 1806int 1807moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1808{ 1809 struct pvo_entry *pvo; 1810 int count; 1811 1812 count = 0; 1813 if ((m->oflags & VPO_UNMANAGED) != 0) 1814 return (count); 1815 LOCK_TABLE_RD(); 1816 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1817 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1818 count++; 1819 UNLOCK_TABLE_RD(); 1820 return (count); 1821} 1822 1823static uintptr_t moea64_vsidcontext; 1824 1825uintptr_t 1826moea64_get_unique_vsid(void) { 1827 u_int entropy; 1828 register_t hash; 1829 uint32_t mask; 1830 int i; 1831 1832 entropy = 0; 1833 __asm __volatile("mftb %0" : "=r"(entropy)); 1834 1835 mtx_lock(&moea64_slb_mutex); 1836 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1837 u_int n; 1838 1839 /* 1840 * Create a new value by mutiplying by a prime and adding in 1841 * entropy from the timebase register. This is to make the 1842 * VSID more random so that the PT hash function collides 1843 * less often. (Note that the prime casues gcc to do shifts 1844 * instead of a multiply.) 1845 */ 1846 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1847 hash = moea64_vsidcontext & (NVSIDS - 1); 1848 if (hash == 0) /* 0 is special, avoid it */ 1849 continue; 1850 n = hash >> 5; 1851 mask = 1 << (hash & (VSID_NBPW - 1)); 1852 hash = (moea64_vsidcontext & VSID_HASHMASK); 1853 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1854 /* anything free in this bucket? */ 1855 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1856 entropy = (moea64_vsidcontext >> 20); 1857 continue; 1858 } 1859 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1860 mask = 1 << i; 1861 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1862 hash |= i; 1863 } 1864 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1865 ("Allocating in-use VSID %#zx\n", hash)); 1866 moea64_vsid_bitmap[n] |= mask; 1867 mtx_unlock(&moea64_slb_mutex); 1868 return (hash); 1869 } 1870 1871 mtx_unlock(&moea64_slb_mutex); 1872 panic("%s: out of segments",__func__); 1873} 1874 1875#ifdef __powerpc64__ 1876void 1877moea64_pinit(mmu_t mmu, pmap_t pmap) 1878{ 1879 1880 RB_INIT(&pmap->pmap_pvo); 1881 1882 pmap->pm_slb_tree_root = slb_alloc_tree(); 1883 pmap->pm_slb = slb_alloc_user_cache(); 1884 pmap->pm_slb_len = 0; 1885} 1886#else 1887void 1888moea64_pinit(mmu_t mmu, pmap_t pmap) 1889{ 1890 int i; 1891 uint32_t hash; 1892 1893 RB_INIT(&pmap->pmap_pvo); 1894 1895 if (pmap_bootstrapped) 1896 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1897 (vm_offset_t)pmap); 1898 else 1899 pmap->pmap_phys = pmap; 1900 1901 /* 1902 * Allocate some segment registers for this pmap. 1903 */ 1904 hash = moea64_get_unique_vsid(); 1905 1906 for (i = 0; i < 16; i++) 1907 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1908 1909 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1910} 1911#endif 1912 1913/* 1914 * Initialize the pmap associated with process 0. 1915 */ 1916void 1917moea64_pinit0(mmu_t mmu, pmap_t pm) 1918{ 1919 1920 PMAP_LOCK_INIT(pm); 1921 moea64_pinit(mmu, pm); 1922 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1923} 1924 1925/* 1926 * Set the physical protection on the specified range of this map as requested. 1927 */ 1928static void 1929moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1930{ 1931 uintptr_t pt; 1932 struct vm_page *pg; 1933 uint64_t oldlo; 1934 1935 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1936 1937 /* 1938 * Grab the PTE pointer before we diddle with the cached PTE 1939 * copy. 1940 */ 1941 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1942 1943 /* 1944 * Change the protection of the page. 1945 */ 1946 oldlo = pvo->pvo_pte.lpte.pte_lo; 1947 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1948 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1949 if ((prot & VM_PROT_EXECUTE) == 0) 1950 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1951 if (prot & VM_PROT_WRITE) 1952 pvo->pvo_pte.lpte.pte_lo |= LPTE_BW; 1953 else 1954 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1955 1956 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1957 1958 /* 1959 * If the PVO is in the page table, update that pte as well. 1960 */ 1961 if (pt != -1) 1962 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1963 pvo->pvo_vpn); 1964 if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && 1965 (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1966 if ((pg->oflags & VPO_UNMANAGED) == 0) 1967 vm_page_aflag_set(pg, PGA_EXECUTABLE); 1968 moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1969 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE); 1970 } 1971 1972 /* 1973 * Update vm about the REF/CHG bits if the page is managed and we have 1974 * removed write access. 1975 */ 1976 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 1977 (oldlo & LPTE_PP) != LPTE_BR && !(prot & VM_PROT_WRITE)) { 1978 if (pg != NULL) { 1979 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 1980 vm_page_dirty(pg); 1981 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 1982 vm_page_aflag_set(pg, PGA_REFERENCED); 1983 } 1984 } 1985} 1986 1987void 1988moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1989 vm_prot_t prot) 1990{ 1991 struct pvo_entry *pvo, *tpvo, key; 1992 1993 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 1994 sva, eva, prot); 1995 1996 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1997 ("moea64_protect: non current pmap")); 1998 1999 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2000 moea64_remove(mmu, pm, sva, eva); 2001 return; 2002 } 2003 2004 LOCK_TABLE_RD(); 2005 PMAP_LOCK(pm); 2006 key.pvo_vaddr = sva; 2007 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2008 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2009 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2010 moea64_pvo_protect(mmu, pm, pvo, prot); 2011 } 2012 UNLOCK_TABLE_RD(); 2013 PMAP_UNLOCK(pm); 2014} 2015 2016/* 2017 * Map a list of wired pages into kernel virtual address space. This is 2018 * intended for temporary mappings which do not need page modification or 2019 * references recorded. Existing mappings in the region are overwritten. 2020 */ 2021void 2022moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2023{ 2024 while (count-- > 0) { 2025 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2026 va += PAGE_SIZE; 2027 m++; 2028 } 2029} 2030 2031/* 2032 * Remove page mappings from kernel virtual address space. Intended for 2033 * temporary mappings entered by moea64_qenter. 2034 */ 2035void 2036moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2037{ 2038 while (count-- > 0) { 2039 moea64_kremove(mmu, va); 2040 va += PAGE_SIZE; 2041 } 2042} 2043 2044void 2045moea64_release_vsid(uint64_t vsid) 2046{ 2047 int idx, mask; 2048 2049 mtx_lock(&moea64_slb_mutex); 2050 idx = vsid & (NVSIDS-1); 2051 mask = 1 << (idx % VSID_NBPW); 2052 idx /= VSID_NBPW; 2053 KASSERT(moea64_vsid_bitmap[idx] & mask, 2054 ("Freeing unallocated VSID %#jx", vsid)); 2055 moea64_vsid_bitmap[idx] &= ~mask; 2056 mtx_unlock(&moea64_slb_mutex); 2057} 2058 2059 2060void 2061moea64_release(mmu_t mmu, pmap_t pmap) 2062{ 2063 2064 /* 2065 * Free segment registers' VSIDs 2066 */ 2067 #ifdef __powerpc64__ 2068 slb_free_tree(pmap); 2069 slb_free_user_cache(pmap->pm_slb); 2070 #else 2071 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2072 2073 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2074 #endif 2075} 2076 2077/* 2078 * Remove all pages mapped by the specified pmap 2079 */ 2080void 2081moea64_remove_pages(mmu_t mmu, pmap_t pm) 2082{ 2083 struct pvo_entry *pvo, *tpvo; 2084 2085 LOCK_TABLE_WR(); 2086 PMAP_LOCK(pm); 2087 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { 2088 if (!(pvo->pvo_vaddr & PVO_WIRED)) 2089 moea64_pvo_remove(mmu, pvo); 2090 } 2091 UNLOCK_TABLE_WR(); 2092 PMAP_UNLOCK(pm); 2093} 2094 2095/* 2096 * Remove the given range of addresses from the specified map. 2097 */ 2098void 2099moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2100{ 2101 struct pvo_entry *pvo, *tpvo, key; 2102 2103 /* 2104 * Perform an unsynchronized read. This is, however, safe. 2105 */ 2106 if (pm->pm_stats.resident_count == 0) 2107 return; 2108 2109 LOCK_TABLE_WR(); 2110 PMAP_LOCK(pm); 2111 key.pvo_vaddr = sva; 2112 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2113 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2114 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2115 moea64_pvo_remove(mmu, pvo); 2116 } 2117 UNLOCK_TABLE_WR(); 2118 PMAP_UNLOCK(pm); 2119} 2120 2121/* 2122 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2123 * will reflect changes in pte's back to the vm_page. 2124 */ 2125void 2126moea64_remove_all(mmu_t mmu, vm_page_t m) 2127{ 2128 struct pvo_entry *pvo, *next_pvo; 2129 pmap_t pmap; 2130 2131 LOCK_TABLE_WR(); 2132 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2133 pmap = pvo->pvo_pmap; 2134 PMAP_LOCK(pmap); 2135 moea64_pvo_remove(mmu, pvo); 2136 PMAP_UNLOCK(pmap); 2137 } 2138 UNLOCK_TABLE_WR(); 2139 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) 2140 vm_page_dirty(m); 2141 vm_page_aflag_clear(m, PGA_WRITEABLE); 2142 vm_page_aflag_clear(m, PGA_EXECUTABLE); 2143} 2144 2145/* 2146 * Allocate a physical page of memory directly from the phys_avail map. 2147 * Can only be called from moea64_bootstrap before avail start and end are 2148 * calculated. 2149 */ 2150vm_offset_t 2151moea64_bootstrap_alloc(vm_size_t size, u_int align) 2152{ 2153 vm_offset_t s, e; 2154 int i, j; 2155 2156 size = round_page(size); 2157 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2158 if (align != 0) 2159 s = (phys_avail[i] + align - 1) & ~(align - 1); 2160 else 2161 s = phys_avail[i]; 2162 e = s + size; 2163 2164 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2165 continue; 2166 2167 if (s + size > platform_real_maxaddr()) 2168 continue; 2169 2170 if (s == phys_avail[i]) { 2171 phys_avail[i] += size; 2172 } else if (e == phys_avail[i + 1]) { 2173 phys_avail[i + 1] -= size; 2174 } else { 2175 for (j = phys_avail_count * 2; j > i; j -= 2) { 2176 phys_avail[j] = phys_avail[j - 2]; 2177 phys_avail[j + 1] = phys_avail[j - 1]; 2178 } 2179 2180 phys_avail[i + 3] = phys_avail[i + 1]; 2181 phys_avail[i + 1] = s; 2182 phys_avail[i + 2] = e; 2183 phys_avail_count++; 2184 } 2185 2186 return (s); 2187 } 2188 panic("moea64_bootstrap_alloc: could not allocate memory"); 2189} 2190 2191static int 2192moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2193 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2194 uint64_t pte_lo, int flags) 2195{ 2196 struct pvo_entry *pvo; 2197 uint64_t vsid; 2198 int first; 2199 u_int ptegidx; 2200 int i; 2201 int bootstrap; 2202 2203 /* 2204 * One nasty thing that can happen here is that the UMA calls to 2205 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2206 * which calls UMA... 2207 * 2208 * We break the loop by detecting recursion and allocating out of 2209 * the bootstrap pool. 2210 */ 2211 2212 first = 0; 2213 bootstrap = (flags & PVO_BOOTSTRAP); 2214 2215 if (!moea64_initialized) 2216 bootstrap = 1; 2217 2218 PMAP_LOCK_ASSERT(pm, MA_OWNED); 2219 rw_assert(&moea64_table_lock, RA_WLOCKED); 2220 2221 /* 2222 * Compute the PTE Group index. 2223 */ 2224 va &= ~ADDR_POFF; 2225 vsid = va_to_vsid(pm, va); 2226 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2227 2228 /* 2229 * Remove any existing mapping for this page. Reuse the pvo entry if 2230 * there is a mapping. 2231 */ 2232 moea64_pvo_enter_calls++; 2233 2234 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2235 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2236 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2237 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2238 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2239 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2240 /* Re-insert if spilled */ 2241 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2242 &pvo->pvo_pte.lpte); 2243 if (i >= 0) 2244 PVO_PTEGIDX_SET(pvo, i); 2245 moea64_pte_overflow--; 2246 } 2247 return (0); 2248 } 2249 moea64_pvo_remove(mmu, pvo); 2250 break; 2251 } 2252 } 2253 2254 /* 2255 * If we aren't overwriting a mapping, try to allocate. 2256 */ 2257 if (bootstrap) { 2258 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2259 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2260 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2261 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2262 } 2263 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2264 moea64_bpvo_pool_index++; 2265 bootstrap = 1; 2266 } else { 2267 pvo = uma_zalloc(zone, M_NOWAIT); 2268 } 2269 2270 if (pvo == NULL) 2271 return (ENOMEM); 2272 2273 moea64_pvo_entries++; 2274 pvo->pvo_vaddr = va; 2275 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2276 | (vsid << 16); 2277 pvo->pvo_pmap = pm; 2278 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2279 pvo->pvo_vaddr &= ~ADDR_POFF; 2280 2281 if (flags & PVO_WIRED) 2282 pvo->pvo_vaddr |= PVO_WIRED; 2283 if (pvo_head != NULL) 2284 pvo->pvo_vaddr |= PVO_MANAGED; 2285 if (bootstrap) 2286 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2287 if (flags & PVO_LARGE) 2288 pvo->pvo_vaddr |= PVO_LARGE; 2289 2290 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2291 (uint64_t)(pa) | pte_lo, flags); 2292 2293 /* 2294 * Add to pmap list 2295 */ 2296 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 2297 2298 /* 2299 * Remember if the list was empty and therefore will be the first 2300 * item. 2301 */ 2302 if (pvo_head != NULL) { 2303 if (LIST_FIRST(pvo_head) == NULL) 2304 first = 1; 2305 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2306 } 2307 2308 if (pvo->pvo_vaddr & PVO_WIRED) { 2309 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2310 pm->pm_stats.wired_count++; 2311 } 2312 pm->pm_stats.resident_count++; 2313 2314 /* 2315 * We hope this succeeds but it isn't required. 2316 */ 2317 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2318 if (i >= 0) { 2319 PVO_PTEGIDX_SET(pvo, i); 2320 } else { 2321 panic("moea64_pvo_enter: overflow"); 2322 moea64_pte_overflow++; 2323 } 2324 2325 if (pm == kernel_pmap) 2326 isync(); 2327 2328#ifdef __powerpc64__ 2329 /* 2330 * Make sure all our bootstrap mappings are in the SLB as soon 2331 * as virtual memory is switched on. 2332 */ 2333 if (!pmap_bootstrapped) 2334 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2335#endif 2336 2337 return (first ? ENOENT : 0); 2338} 2339 2340static void 2341moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2342{ 2343 struct vm_page *pg; 2344 uintptr_t pt; 2345 2346 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2347 rw_assert(&moea64_table_lock, RA_WLOCKED); 2348 2349 /* 2350 * If there is an active pte entry, we need to deactivate it (and 2351 * save the ref & cfg bits). 2352 */ 2353 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2354 if (pt != -1) { 2355 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2356 PVO_PTEGIDX_CLR(pvo); 2357 } else { 2358 moea64_pte_overflow--; 2359 } 2360 2361 /* 2362 * Update our statistics. 2363 */ 2364 pvo->pvo_pmap->pm_stats.resident_count--; 2365 if (pvo->pvo_vaddr & PVO_WIRED) 2366 pvo->pvo_pmap->pm_stats.wired_count--; 2367 2368 /* 2369 * Remove this PVO from the pmap list. 2370 */ 2371 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2372 2373 /* 2374 * Remove this from the overflow list and return it to the pool 2375 * if we aren't going to reuse it. 2376 */ 2377 LIST_REMOVE(pvo, pvo_olink); 2378 2379 /* 2380 * Update vm about the REF/CHG bits if the page is managed. 2381 */ 2382 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2383 2384 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) { 2385 LIST_REMOVE(pvo, pvo_vlink); 2386 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 2387 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2388 vm_page_dirty(pg); 2389 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2390 vm_page_aflag_set(pg, PGA_REFERENCED); 2391 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2392 vm_page_aflag_clear(pg, PGA_WRITEABLE); 2393 } 2394 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2395 vm_page_aflag_clear(pg, PGA_EXECUTABLE); 2396 } 2397 2398 moea64_pvo_entries--; 2399 moea64_pvo_remove_calls++; 2400 2401 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2402 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2403 moea64_upvo_zone, pvo); 2404} 2405 2406static struct pvo_entry * 2407moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2408{ 2409 struct pvo_entry key; 2410 2411 key.pvo_vaddr = va & ~ADDR_POFF; 2412 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); 2413} 2414 2415static boolean_t 2416moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2417{ 2418 struct pvo_entry *pvo; 2419 uintptr_t pt; 2420 2421 LOCK_TABLE_RD(); 2422 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2423 /* 2424 * See if we saved the bit off. If so, return success. 2425 */ 2426 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2427 UNLOCK_TABLE_RD(); 2428 return (TRUE); 2429 } 2430 } 2431 2432 /* 2433 * No luck, now go through the hard part of looking at the PTEs 2434 * themselves. Sync so that any pending REF/CHG bits are flushed to 2435 * the PTEs. 2436 */ 2437 powerpc_sync(); 2438 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2439 2440 /* 2441 * See if this pvo has a valid PTE. if so, fetch the 2442 * REF/CHG bits from the valid PTE. If the appropriate 2443 * ptebit is set, return success. 2444 */ 2445 PMAP_LOCK(pvo->pvo_pmap); 2446 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2447 if (pt != -1) { 2448 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2449 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2450 PMAP_UNLOCK(pvo->pvo_pmap); 2451 UNLOCK_TABLE_RD(); 2452 return (TRUE); 2453 } 2454 } 2455 PMAP_UNLOCK(pvo->pvo_pmap); 2456 } 2457 2458 UNLOCK_TABLE_RD(); 2459 return (FALSE); 2460} 2461 2462static u_int 2463moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2464{ 2465 u_int count; 2466 struct pvo_entry *pvo; 2467 uintptr_t pt; 2468 2469 /* 2470 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2471 * we can reset the right ones). note that since the pvo entries and 2472 * list heads are accessed via BAT0 and are never placed in the page 2473 * table, we don't have to worry about further accesses setting the 2474 * REF/CHG bits. 2475 */ 2476 powerpc_sync(); 2477 2478 /* 2479 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2480 * valid pte clear the ptebit from the valid pte. 2481 */ 2482 count = 0; 2483 LOCK_TABLE_RD(); 2484 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2485 PMAP_LOCK(pvo->pvo_pmap); 2486 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2487 if (pt != -1) { 2488 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2489 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2490 count++; 2491 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2492 pvo->pvo_vpn, ptebit); 2493 } 2494 } 2495 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2496 PMAP_UNLOCK(pvo->pvo_pmap); 2497 } 2498 2499 UNLOCK_TABLE_RD(); 2500 return (count); 2501} 2502 2503boolean_t 2504moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2505{ 2506 struct pvo_entry *pvo, key; 2507 vm_offset_t ppa; 2508 int error = 0; 2509 2510 PMAP_LOCK(kernel_pmap); 2511 key.pvo_vaddr = ppa = pa & ~ADDR_POFF; 2512 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); 2513 ppa < pa + size; ppa += PAGE_SIZE, 2514 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { 2515 if (pvo == NULL || 2516 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2517 error = EFAULT; 2518 break; 2519 } 2520 } 2521 PMAP_UNLOCK(kernel_pmap); 2522 2523 return (error); 2524} 2525 2526/* 2527 * Map a set of physical memory pages into the kernel virtual 2528 * address space. Return a pointer to where it is mapped. This 2529 * routine is intended to be used for mapping device memory, 2530 * NOT real memory. 2531 */ 2532void * 2533moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2534{ 2535 vm_offset_t va, tmpva, ppa, offset; 2536 2537 ppa = trunc_page(pa); 2538 offset = pa & PAGE_MASK; 2539 size = roundup2(offset + size, PAGE_SIZE); 2540 2541 va = kva_alloc(size); 2542 2543 if (!va) 2544 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2545 2546 for (tmpva = va; size > 0;) { 2547 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2548 size -= PAGE_SIZE; 2549 tmpva += PAGE_SIZE; 2550 ppa += PAGE_SIZE; 2551 } 2552 2553 return ((void *)(va + offset)); 2554} 2555 2556void * 2557moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2558{ 2559 2560 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2561} 2562 2563void 2564moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2565{ 2566 vm_offset_t base, offset; 2567 2568 base = trunc_page(va); 2569 offset = va & PAGE_MASK; 2570 size = roundup2(offset + size, PAGE_SIZE); 2571 2572 kva_free(base, size); 2573} 2574 2575void 2576moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2577{ 2578 struct pvo_entry *pvo; 2579 vm_offset_t lim; 2580 vm_paddr_t pa; 2581 vm_size_t len; 2582 2583 PMAP_LOCK(pm); 2584 while (sz > 0) { 2585 lim = round_page(va); 2586 len = MIN(lim - va, sz); 2587 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2588 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2589 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2590 (va & ADDR_POFF); 2591 moea64_syncicache(mmu, pm, va, pa, len); 2592 } 2593 va += len; 2594 sz -= len; 2595 } 2596 PMAP_UNLOCK(pm); 2597} 2598