mmu_oea64.c revision 233949
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 233949 2012-04-06 16:03:38Z nwhitehorn $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_compat.h" 118#include "opt_kstack_pages.h" 119 120#include <sys/param.h> 121#include <sys/kernel.h> 122#include <sys/queue.h> 123#include <sys/cpuset.h> 124#include <sys/ktr.h> 125#include <sys/lock.h> 126#include <sys/msgbuf.h> 127#include <sys/mutex.h> 128#include <sys/proc.h> 129#include <sys/rwlock.h> 130#include <sys/sched.h> 131#include <sys/sysctl.h> 132#include <sys/systm.h> 133#include <sys/vmmeter.h> 134 135#include <sys/kdb.h> 136 137#include <dev/ofw/openfirm.h> 138 139#include <vm/vm.h> 140#include <vm/vm_param.h> 141#include <vm/vm_kern.h> 142#include <vm/vm_page.h> 143#include <vm/vm_map.h> 144#include <vm/vm_object.h> 145#include <vm/vm_extern.h> 146#include <vm/vm_pageout.h> 147#include <vm/vm_pager.h> 148#include <vm/uma.h> 149 150#include <machine/_inttypes.h> 151#include <machine/cpu.h> 152#include <machine/platform.h> 153#include <machine/frame.h> 154#include <machine/md_var.h> 155#include <machine/psl.h> 156#include <machine/bat.h> 157#include <machine/hid.h> 158#include <machine/pte.h> 159#include <machine/sr.h> 160#include <machine/trap.h> 161#include <machine/mmuvar.h> 162 163#include "mmu_oea64.h" 164#include "mmu_if.h" 165#include "moea64_if.h" 166 167void moea64_release_vsid(uint64_t vsid); 168uintptr_t moea64_get_unique_vsid(void); 169 170#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 171#define ENABLE_TRANS(msr) mtmsr(msr) 172 173#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 174#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 175#define VSID_HASH_MASK 0x0000007fffffffffULL 176 177/* 178 * Locking semantics: 179 * -- Read lock: if no modifications are being made to either the PVO lists 180 * or page table or if any modifications being made result in internal 181 * changes (e.g. wiring, protection) such that the existence of the PVOs 182 * is unchanged and they remain associated with the same pmap (in which 183 * case the changes should be protected by the pmap lock) 184 * -- Write lock: required if PTEs/PVOs are being inserted or removed. 185 */ 186 187#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock) 188#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock) 189#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock) 190#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock) 191 192struct ofw_map { 193 cell_t om_va; 194 cell_t om_len; 195 cell_t om_pa_hi; 196 cell_t om_pa_lo; 197 cell_t om_mode; 198}; 199 200/* 201 * Map of physical memory regions. 202 */ 203static struct mem_region *regions; 204static struct mem_region *pregions; 205static u_int phys_avail_count; 206static int regions_sz, pregions_sz; 207 208extern void bs_remap_earlyboot(void); 209 210/* 211 * Lock for the pteg and pvo tables. 212 */ 213struct rwlock moea64_table_lock; 214struct mtx moea64_slb_mutex; 215 216/* 217 * PTEG data. 218 */ 219u_int moea64_pteg_count; 220u_int moea64_pteg_mask; 221 222/* 223 * PVO data. 224 */ 225struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 226struct pvo_head moea64_pvo_kunmanaged = /* list of unmanaged pages */ 227 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 228 229uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 230uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 231 232#define BPVO_POOL_SIZE 327680 233static struct pvo_entry *moea64_bpvo_pool; 234static int moea64_bpvo_pool_index = 0; 235 236#define VSID_NBPW (sizeof(u_int32_t) * 8) 237#ifdef __powerpc64__ 238#define NVSIDS (NPMAPS * 16) 239#define VSID_HASHMASK 0xffffffffUL 240#else 241#define NVSIDS NPMAPS 242#define VSID_HASHMASK 0xfffffUL 243#endif 244static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 245 246static boolean_t moea64_initialized = FALSE; 247 248/* 249 * Statistics. 250 */ 251u_int moea64_pte_valid = 0; 252u_int moea64_pte_overflow = 0; 253u_int moea64_pvo_entries = 0; 254u_int moea64_pvo_enter_calls = 0; 255u_int moea64_pvo_remove_calls = 0; 256SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 257 &moea64_pte_valid, 0, ""); 258SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 259 &moea64_pte_overflow, 0, ""); 260SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 261 &moea64_pvo_entries, 0, ""); 262SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 263 &moea64_pvo_enter_calls, 0, ""); 264SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 265 &moea64_pvo_remove_calls, 0, ""); 266 267vm_offset_t moea64_scratchpage_va[2]; 268struct pvo_entry *moea64_scratchpage_pvo[2]; 269uintptr_t moea64_scratchpage_pte[2]; 270struct mtx moea64_scratchpage_mtx; 271 272uint64_t moea64_large_page_mask = 0; 273int moea64_large_page_size = 0; 274int moea64_large_page_shift = 0; 275 276/* 277 * PVO calls. 278 */ 279static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 280 vm_offset_t, vm_offset_t, uint64_t, int); 281static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 282static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 283 284/* 285 * Utility routines. 286 */ 287static void moea64_enter_locked(mmu_t, pmap_t, vm_offset_t, 288 vm_page_t, vm_prot_t, boolean_t); 289static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 290static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 291static void moea64_kremove(mmu_t, vm_offset_t); 292static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 293 vm_offset_t pa, vm_size_t sz); 294 295/* 296 * Kernel MMU interface 297 */ 298void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 299void moea64_clear_modify(mmu_t, vm_page_t); 300void moea64_clear_reference(mmu_t, vm_page_t); 301void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 302void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 303void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 304 vm_prot_t); 305void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 306vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 307vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 308void moea64_init(mmu_t); 309boolean_t moea64_is_modified(mmu_t, vm_page_t); 310boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 311boolean_t moea64_is_referenced(mmu_t, vm_page_t); 312boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 313vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 314boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 315int moea64_page_wired_mappings(mmu_t, vm_page_t); 316void moea64_pinit(mmu_t, pmap_t); 317void moea64_pinit0(mmu_t, pmap_t); 318void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 319void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 320void moea64_qremove(mmu_t, vm_offset_t, int); 321void moea64_release(mmu_t, pmap_t); 322void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 323void moea64_remove_pages(mmu_t, pmap_t); 324void moea64_remove_all(mmu_t, vm_page_t); 325void moea64_remove_write(mmu_t, vm_page_t); 326void moea64_zero_page(mmu_t, vm_page_t); 327void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 328void moea64_zero_page_idle(mmu_t, vm_page_t); 329void moea64_activate(mmu_t, struct thread *); 330void moea64_deactivate(mmu_t, struct thread *); 331void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 332void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 333void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 334vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 335void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 336void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 337void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 338boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 339static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 340 341static mmu_method_t moea64_methods[] = { 342 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 343 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 344 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 345 MMUMETHOD(mmu_copy_page, moea64_copy_page), 346 MMUMETHOD(mmu_enter, moea64_enter), 347 MMUMETHOD(mmu_enter_object, moea64_enter_object), 348 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 349 MMUMETHOD(mmu_extract, moea64_extract), 350 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 351 MMUMETHOD(mmu_init, moea64_init), 352 MMUMETHOD(mmu_is_modified, moea64_is_modified), 353 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 354 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 355 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 356 MMUMETHOD(mmu_map, moea64_map), 357 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 358 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 359 MMUMETHOD(mmu_pinit, moea64_pinit), 360 MMUMETHOD(mmu_pinit0, moea64_pinit0), 361 MMUMETHOD(mmu_protect, moea64_protect), 362 MMUMETHOD(mmu_qenter, moea64_qenter), 363 MMUMETHOD(mmu_qremove, moea64_qremove), 364 MMUMETHOD(mmu_release, moea64_release), 365 MMUMETHOD(mmu_remove, moea64_remove), 366 MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 367 MMUMETHOD(mmu_remove_all, moea64_remove_all), 368 MMUMETHOD(mmu_remove_write, moea64_remove_write), 369 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 370 MMUMETHOD(mmu_zero_page, moea64_zero_page), 371 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 372 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 373 MMUMETHOD(mmu_activate, moea64_activate), 374 MMUMETHOD(mmu_deactivate, moea64_deactivate), 375 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 376 377 /* Internal interfaces */ 378 MMUMETHOD(mmu_mapdev, moea64_mapdev), 379 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 380 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 381 MMUMETHOD(mmu_kextract, moea64_kextract), 382 MMUMETHOD(mmu_kenter, moea64_kenter), 383 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 384 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 385 386 { 0, 0 } 387}; 388 389MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 390 391static __inline u_int 392va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 393{ 394 uint64_t hash; 395 int shift; 396 397 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 398 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 399 shift); 400 return (hash & moea64_pteg_mask); 401} 402 403static __inline struct pvo_head * 404vm_page_to_pvoh(vm_page_t m) 405{ 406 407 return (&m->md.mdpg_pvoh); 408} 409 410static __inline void 411moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 412 uint64_t pte_lo, int flags) 413{ 414 415 /* 416 * Construct a PTE. Default to IMB initially. Valid bit only gets 417 * set when the real pte is set in memory. 418 * 419 * Note: Don't set the valid bit for correct operation of tlb update. 420 */ 421 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 422 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 423 424 if (flags & PVO_LARGE) 425 pt->pte_hi |= LPTE_BIG; 426 427 pt->pte_lo = pte_lo; 428} 429 430static __inline uint64_t 431moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 432{ 433 uint64_t pte_lo; 434 int i; 435 436 if (ma != VM_MEMATTR_DEFAULT) { 437 switch (ma) { 438 case VM_MEMATTR_UNCACHEABLE: 439 return (LPTE_I | LPTE_G); 440 case VM_MEMATTR_WRITE_COMBINING: 441 case VM_MEMATTR_WRITE_BACK: 442 case VM_MEMATTR_PREFETCHABLE: 443 return (LPTE_I); 444 case VM_MEMATTR_WRITE_THROUGH: 445 return (LPTE_W | LPTE_M); 446 } 447 } 448 449 /* 450 * Assume the page is cache inhibited and access is guarded unless 451 * it's in our available memory array. 452 */ 453 pte_lo = LPTE_I | LPTE_G; 454 for (i = 0; i < pregions_sz; i++) { 455 if ((pa >= pregions[i].mr_start) && 456 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 457 pte_lo &= ~(LPTE_I | LPTE_G); 458 pte_lo |= LPTE_M; 459 break; 460 } 461 } 462 463 return pte_lo; 464} 465 466/* 467 * Quick sort callout for comparing memory regions. 468 */ 469static int om_cmp(const void *a, const void *b); 470 471static int 472om_cmp(const void *a, const void *b) 473{ 474 const struct ofw_map *mapa; 475 const struct ofw_map *mapb; 476 477 mapa = a; 478 mapb = b; 479 if (mapa->om_pa_hi < mapb->om_pa_hi) 480 return (-1); 481 else if (mapa->om_pa_hi > mapb->om_pa_hi) 482 return (1); 483 else if (mapa->om_pa_lo < mapb->om_pa_lo) 484 return (-1); 485 else if (mapa->om_pa_lo > mapb->om_pa_lo) 486 return (1); 487 else 488 return (0); 489} 490 491static void 492moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 493{ 494 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 495 register_t msr; 496 vm_offset_t off; 497 vm_paddr_t pa_base; 498 int i; 499 500 bzero(translations, sz); 501 if (OF_getprop(mmu, "translations", translations, sz) == -1) 502 panic("moea64_bootstrap: can't get ofw translations"); 503 504 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 505 sz /= sizeof(*translations); 506 qsort(translations, sz, sizeof (*translations), om_cmp); 507 508 for (i = 0; i < sz; i++) { 509 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 510 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 511 translations[i].om_len); 512 513 if (translations[i].om_pa_lo % PAGE_SIZE) 514 panic("OFW translation not page-aligned!"); 515 516 pa_base = translations[i].om_pa_lo; 517 518 #ifdef __powerpc64__ 519 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 520 #else 521 if (translations[i].om_pa_hi) 522 panic("OFW translations above 32-bit boundary!"); 523 #endif 524 525 /* Now enter the pages for this mapping */ 526 527 DISABLE_TRANS(msr); 528 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 529 if (moea64_pvo_find_va(kernel_pmap, 530 translations[i].om_va + off) != NULL) 531 continue; 532 533 moea64_kenter(mmup, translations[i].om_va + off, 534 pa_base + off); 535 } 536 ENABLE_TRANS(msr); 537 } 538} 539 540#ifdef __powerpc64__ 541static void 542moea64_probe_large_page(void) 543{ 544 uint16_t pvr = mfpvr() >> 16; 545 546 switch (pvr) { 547 case IBM970: 548 case IBM970FX: 549 case IBM970MP: 550 powerpc_sync(); isync(); 551 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 552 powerpc_sync(); isync(); 553 554 /* FALLTHROUGH */ 555 case IBMCELLBE: 556 moea64_large_page_size = 0x1000000; /* 16 MB */ 557 moea64_large_page_shift = 24; 558 break; 559 default: 560 moea64_large_page_size = 0; 561 } 562 563 moea64_large_page_mask = moea64_large_page_size - 1; 564} 565 566static void 567moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 568{ 569 struct slb *cache; 570 struct slb entry; 571 uint64_t esid, slbe; 572 uint64_t i; 573 574 cache = PCPU_GET(slb); 575 esid = va >> ADDR_SR_SHFT; 576 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 577 578 for (i = 0; i < 64; i++) { 579 if (cache[i].slbe == (slbe | i)) 580 return; 581 } 582 583 entry.slbe = slbe; 584 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 585 if (large) 586 entry.slbv |= SLBV_L; 587 588 slb_insert_kernel(entry.slbe, entry.slbv); 589} 590#endif 591 592static void 593moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 594 vm_offset_t kernelend) 595{ 596 register_t msr; 597 vm_paddr_t pa; 598 vm_offset_t size, off; 599 uint64_t pte_lo; 600 int i; 601 602 if (moea64_large_page_size == 0) 603 hw_direct_map = 0; 604 605 DISABLE_TRANS(msr); 606 if (hw_direct_map) { 607 LOCK_TABLE_WR(); 608 PMAP_LOCK(kernel_pmap); 609 for (i = 0; i < pregions_sz; i++) { 610 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 611 pregions[i].mr_size; pa += moea64_large_page_size) { 612 pte_lo = LPTE_M; 613 614 /* 615 * Set memory access as guarded if prefetch within 616 * the page could exit the available physmem area. 617 */ 618 if (pa & moea64_large_page_mask) { 619 pa &= moea64_large_page_mask; 620 pte_lo |= LPTE_G; 621 } 622 if (pa + moea64_large_page_size > 623 pregions[i].mr_start + pregions[i].mr_size) 624 pte_lo |= LPTE_G; 625 626 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 627 &moea64_pvo_kunmanaged, pa, pa, 628 pte_lo, PVO_WIRED | PVO_LARGE); 629 } 630 } 631 PMAP_UNLOCK(kernel_pmap); 632 UNLOCK_TABLE_WR(); 633 } else { 634 size = sizeof(struct pvo_head) * moea64_pteg_count; 635 off = (vm_offset_t)(moea64_pvo_table); 636 for (pa = off; pa < off + size; pa += PAGE_SIZE) 637 moea64_kenter(mmup, pa, pa); 638 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 639 off = (vm_offset_t)(moea64_bpvo_pool); 640 for (pa = off; pa < off + size; pa += PAGE_SIZE) 641 moea64_kenter(mmup, pa, pa); 642 643 /* 644 * Map certain important things, like ourselves. 645 * 646 * NOTE: We do not map the exception vector space. That code is 647 * used only in real mode, and leaving it unmapped allows us to 648 * catch NULL pointer deferences, instead of making NULL a valid 649 * address. 650 */ 651 652 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 653 pa += PAGE_SIZE) 654 moea64_kenter(mmup, pa, pa); 655 } 656 ENABLE_TRANS(msr); 657} 658 659void 660moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 661{ 662 int i, j; 663 vm_size_t physsz, hwphyssz; 664 665#ifndef __powerpc64__ 666 /* We don't have a direct map since there is no BAT */ 667 hw_direct_map = 0; 668 669 /* Make sure battable is zero, since we have no BAT */ 670 for (i = 0; i < 16; i++) { 671 battable[i].batu = 0; 672 battable[i].batl = 0; 673 } 674#else 675 moea64_probe_large_page(); 676 677 /* Use a direct map if we have large page support */ 678 if (moea64_large_page_size > 0) 679 hw_direct_map = 1; 680 else 681 hw_direct_map = 0; 682#endif 683 684 /* Get physical memory regions from firmware */ 685 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 686 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 687 688 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 689 panic("moea64_bootstrap: phys_avail too small"); 690 691 phys_avail_count = 0; 692 physsz = 0; 693 hwphyssz = 0; 694 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 695 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 696 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 697 regions[i].mr_start + regions[i].mr_size, 698 regions[i].mr_size); 699 if (hwphyssz != 0 && 700 (physsz + regions[i].mr_size) >= hwphyssz) { 701 if (physsz < hwphyssz) { 702 phys_avail[j] = regions[i].mr_start; 703 phys_avail[j + 1] = regions[i].mr_start + 704 hwphyssz - physsz; 705 physsz = hwphyssz; 706 phys_avail_count++; 707 } 708 break; 709 } 710 phys_avail[j] = regions[i].mr_start; 711 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 712 phys_avail_count++; 713 physsz += regions[i].mr_size; 714 } 715 716 /* Check for overlap with the kernel and exception vectors */ 717 for (j = 0; j < 2*phys_avail_count; j+=2) { 718 if (phys_avail[j] < EXC_LAST) 719 phys_avail[j] += EXC_LAST; 720 721 if (kernelstart >= phys_avail[j] && 722 kernelstart < phys_avail[j+1]) { 723 if (kernelend < phys_avail[j+1]) { 724 phys_avail[2*phys_avail_count] = 725 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 726 phys_avail[2*phys_avail_count + 1] = 727 phys_avail[j+1]; 728 phys_avail_count++; 729 } 730 731 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 732 } 733 734 if (kernelend >= phys_avail[j] && 735 kernelend < phys_avail[j+1]) { 736 if (kernelstart > phys_avail[j]) { 737 phys_avail[2*phys_avail_count] = phys_avail[j]; 738 phys_avail[2*phys_avail_count + 1] = 739 kernelstart & ~PAGE_MASK; 740 phys_avail_count++; 741 } 742 743 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 744 } 745 } 746 747 physmem = btoc(physsz); 748 749#ifdef PTEGCOUNT 750 moea64_pteg_count = PTEGCOUNT; 751#else 752 moea64_pteg_count = 0x1000; 753 754 while (moea64_pteg_count < physmem) 755 moea64_pteg_count <<= 1; 756 757 moea64_pteg_count >>= 1; 758#endif /* PTEGCOUNT */ 759} 760 761void 762moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 763{ 764 vm_size_t size; 765 register_t msr; 766 int i; 767 768 /* 769 * Set PTEG mask 770 */ 771 moea64_pteg_mask = moea64_pteg_count - 1; 772 773 /* 774 * Allocate pv/overflow lists. 775 */ 776 size = sizeof(struct pvo_head) * moea64_pteg_count; 777 778 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 779 PAGE_SIZE); 780 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 781 782 DISABLE_TRANS(msr); 783 for (i = 0; i < moea64_pteg_count; i++) 784 LIST_INIT(&moea64_pvo_table[i]); 785 ENABLE_TRANS(msr); 786 787 /* 788 * Initialize the lock that synchronizes access to the pteg and pvo 789 * tables. 790 */ 791 rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE); 792 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 793 794 /* 795 * Initialise the unmanaged pvo pool. 796 */ 797 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 798 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 799 moea64_bpvo_pool_index = 0; 800 801 /* 802 * Make sure kernel vsid is allocated as well as VSID 0. 803 */ 804 #ifndef __powerpc64__ 805 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 806 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 807 moea64_vsid_bitmap[0] |= 1; 808 #endif 809 810 /* 811 * Initialize the kernel pmap (which is statically allocated). 812 */ 813 #ifdef __powerpc64__ 814 for (i = 0; i < 64; i++) { 815 pcpup->pc_slb[i].slbv = 0; 816 pcpup->pc_slb[i].slbe = 0; 817 } 818 #else 819 for (i = 0; i < 16; i++) 820 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 821 #endif 822 823 kernel_pmap->pmap_phys = kernel_pmap; 824 CPU_FILL(&kernel_pmap->pm_active); 825 LIST_INIT(&kernel_pmap->pmap_pvo); 826 827 PMAP_LOCK_INIT(kernel_pmap); 828 829 /* 830 * Now map in all the other buffers we allocated earlier 831 */ 832 833 moea64_setup_direct_map(mmup, kernelstart, kernelend); 834} 835 836void 837moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 838{ 839 ihandle_t mmui; 840 phandle_t chosen; 841 phandle_t mmu; 842 size_t sz; 843 int i; 844 vm_offset_t pa, va; 845 void *dpcpu; 846 847 /* 848 * Set up the Open Firmware pmap and add its mappings if not in real 849 * mode. 850 */ 851 852 chosen = OF_finddevice("/chosen"); 853 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 854 mmu = OF_instance_to_package(mmui); 855 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 856 sz = 0; 857 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 858 panic("moea64_bootstrap: too many ofw translations"); 859 860 if (sz > 0) 861 moea64_add_ofw_mappings(mmup, mmu, sz); 862 } 863 864 /* 865 * Calculate the last available physical address. 866 */ 867 for (i = 0; phys_avail[i + 2] != 0; i += 2) 868 ; 869 Maxmem = powerpc_btop(phys_avail[i + 1]); 870 871 /* 872 * Initialize MMU and remap early physical mappings 873 */ 874 MMU_CPU_BOOTSTRAP(mmup,0); 875 mtmsr(mfmsr() | PSL_DR | PSL_IR); 876 pmap_bootstrapped++; 877 bs_remap_earlyboot(); 878 879 /* 880 * Set the start and end of kva. 881 */ 882 virtual_avail = VM_MIN_KERNEL_ADDRESS; 883 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 884 885 /* 886 * Map the entire KVA range into the SLB. We must not fault there. 887 */ 888 #ifdef __powerpc64__ 889 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 890 moea64_bootstrap_slb_prefault(va, 0); 891 #endif 892 893 /* 894 * Figure out how far we can extend virtual_end into segment 16 895 * without running into existing mappings. Segment 16 is guaranteed 896 * to contain neither RAM nor devices (at least on Apple hardware), 897 * but will generally contain some OFW mappings we should not 898 * step on. 899 */ 900 901 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 902 PMAP_LOCK(kernel_pmap); 903 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 904 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 905 virtual_end += PAGE_SIZE; 906 PMAP_UNLOCK(kernel_pmap); 907 #endif 908 909 /* 910 * Allocate a kernel stack with a guard page for thread0 and map it 911 * into the kernel page map. 912 */ 913 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 914 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 915 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 916 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 917 thread0.td_kstack = va; 918 thread0.td_kstack_pages = KSTACK_PAGES; 919 for (i = 0; i < KSTACK_PAGES; i++) { 920 moea64_kenter(mmup, va, pa); 921 pa += PAGE_SIZE; 922 va += PAGE_SIZE; 923 } 924 925 /* 926 * Allocate virtual address space for the message buffer. 927 */ 928 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 929 msgbufp = (struct msgbuf *)virtual_avail; 930 va = virtual_avail; 931 virtual_avail += round_page(msgbufsize); 932 while (va < virtual_avail) { 933 moea64_kenter(mmup, va, pa); 934 pa += PAGE_SIZE; 935 va += PAGE_SIZE; 936 } 937 938 /* 939 * Allocate virtual address space for the dynamic percpu area. 940 */ 941 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 942 dpcpu = (void *)virtual_avail; 943 va = virtual_avail; 944 virtual_avail += DPCPU_SIZE; 945 while (va < virtual_avail) { 946 moea64_kenter(mmup, va, pa); 947 pa += PAGE_SIZE; 948 va += PAGE_SIZE; 949 } 950 dpcpu_init(dpcpu, 0); 951 952 /* 953 * Allocate some things for page zeroing. We put this directly 954 * in the page table, marked with LPTE_LOCKED, to avoid any 955 * of the PVO book-keeping or other parts of the VM system 956 * from even knowing that this hack exists. 957 */ 958 959 if (!hw_direct_map) { 960 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 961 MTX_DEF); 962 for (i = 0; i < 2; i++) { 963 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 964 virtual_end -= PAGE_SIZE; 965 966 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 967 968 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 969 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 970 LOCK_TABLE_RD(); 971 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 972 mmup, moea64_scratchpage_pvo[i]); 973 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 974 |= LPTE_LOCKED; 975 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 976 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 977 moea64_scratchpage_pvo[i]->pvo_vpn); 978 UNLOCK_TABLE_RD(); 979 } 980 } 981} 982 983/* 984 * Activate a user pmap. The pmap must be activated before its address 985 * space can be accessed in any way. 986 */ 987void 988moea64_activate(mmu_t mmu, struct thread *td) 989{ 990 pmap_t pm; 991 992 pm = &td->td_proc->p_vmspace->vm_pmap; 993 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 994 995 #ifdef __powerpc64__ 996 PCPU_SET(userslb, pm->pm_slb); 997 #else 998 PCPU_SET(curpmap, pm->pmap_phys); 999 #endif 1000} 1001 1002void 1003moea64_deactivate(mmu_t mmu, struct thread *td) 1004{ 1005 pmap_t pm; 1006 1007 pm = &td->td_proc->p_vmspace->vm_pmap; 1008 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1009 #ifdef __powerpc64__ 1010 PCPU_SET(userslb, NULL); 1011 #else 1012 PCPU_SET(curpmap, NULL); 1013 #endif 1014} 1015 1016void 1017moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1018{ 1019 struct pvo_entry *pvo; 1020 uintptr_t pt; 1021 uint64_t vsid; 1022 int i, ptegidx; 1023 1024 LOCK_TABLE_WR(); 1025 PMAP_LOCK(pm); 1026 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1027 1028 if (pvo != NULL) { 1029 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1030 1031 if (wired) { 1032 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1033 pm->pm_stats.wired_count++; 1034 pvo->pvo_vaddr |= PVO_WIRED; 1035 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1036 } else { 1037 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1038 pm->pm_stats.wired_count--; 1039 pvo->pvo_vaddr &= ~PVO_WIRED; 1040 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1041 } 1042 1043 if (pt != -1) { 1044 /* Update wiring flag in page table. */ 1045 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1046 pvo->pvo_vpn); 1047 } else if (wired) { 1048 /* 1049 * If we are wiring the page, and it wasn't in the 1050 * page table before, add it. 1051 */ 1052 vsid = PVO_VSID(pvo); 1053 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1054 pvo->pvo_vaddr & PVO_LARGE); 1055 1056 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1057 1058 if (i >= 0) { 1059 PVO_PTEGIDX_CLR(pvo); 1060 PVO_PTEGIDX_SET(pvo, i); 1061 } 1062 } 1063 1064 } 1065 UNLOCK_TABLE_WR(); 1066 PMAP_UNLOCK(pm); 1067} 1068 1069/* 1070 * This goes through and sets the physical address of our 1071 * special scratch PTE to the PA we want to zero or copy. Because 1072 * of locking issues (this can get called in pvo_enter() by 1073 * the UMA allocator), we can't use most other utility functions here 1074 */ 1075 1076static __inline 1077void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1078 1079 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1080 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1081 1082 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1083 ~(LPTE_WIMG | LPTE_RPGN); 1084 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1085 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1086 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1087 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1088 moea64_scratchpage_pvo[which]->pvo_vpn); 1089 isync(); 1090} 1091 1092void 1093moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1094{ 1095 vm_offset_t dst; 1096 vm_offset_t src; 1097 1098 dst = VM_PAGE_TO_PHYS(mdst); 1099 src = VM_PAGE_TO_PHYS(msrc); 1100 1101 if (hw_direct_map) { 1102 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1103 } else { 1104 mtx_lock(&moea64_scratchpage_mtx); 1105 1106 moea64_set_scratchpage_pa(mmu, 0, src); 1107 moea64_set_scratchpage_pa(mmu, 1, dst); 1108 1109 kcopy((void *)moea64_scratchpage_va[0], 1110 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1111 1112 mtx_unlock(&moea64_scratchpage_mtx); 1113 } 1114} 1115 1116void 1117moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1118{ 1119 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1120 1121 if (size + off > PAGE_SIZE) 1122 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1123 1124 if (hw_direct_map) { 1125 bzero((caddr_t)pa + off, size); 1126 } else { 1127 mtx_lock(&moea64_scratchpage_mtx); 1128 moea64_set_scratchpage_pa(mmu, 0, pa); 1129 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1130 mtx_unlock(&moea64_scratchpage_mtx); 1131 } 1132} 1133 1134/* 1135 * Zero a page of physical memory by temporarily mapping it 1136 */ 1137void 1138moea64_zero_page(mmu_t mmu, vm_page_t m) 1139{ 1140 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1141 vm_offset_t va, off; 1142 1143 if (!hw_direct_map) { 1144 mtx_lock(&moea64_scratchpage_mtx); 1145 1146 moea64_set_scratchpage_pa(mmu, 0, pa); 1147 va = moea64_scratchpage_va[0]; 1148 } else { 1149 va = pa; 1150 } 1151 1152 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1153 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1154 1155 if (!hw_direct_map) 1156 mtx_unlock(&moea64_scratchpage_mtx); 1157} 1158 1159void 1160moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1161{ 1162 1163 moea64_zero_page(mmu, m); 1164} 1165 1166/* 1167 * Map the given physical page at the specified virtual address in the 1168 * target pmap with the protection requested. If specified the page 1169 * will be wired down. 1170 */ 1171void 1172moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1173 vm_prot_t prot, boolean_t wired) 1174{ 1175 1176 LOCK_TABLE_WR(); 1177 PMAP_LOCK(pmap); 1178 moea64_enter_locked(mmu, pmap, va, m, prot, wired); 1179 UNLOCK_TABLE_WR(); 1180 PMAP_UNLOCK(pmap); 1181} 1182 1183/* 1184 * Map the given physical page at the specified virtual address in the 1185 * target pmap with the protection requested. If specified the page 1186 * will be wired down. 1187 * 1188 * The table (write) and pmap must be locked. 1189 */ 1190 1191static void 1192moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1193 vm_prot_t prot, boolean_t wired) 1194{ 1195 struct pvo_head *pvo_head; 1196 uma_zone_t zone; 1197 vm_page_t pg; 1198 uint64_t pte_lo; 1199 u_int pvo_flags; 1200 int error; 1201 1202 if (!moea64_initialized) { 1203 pvo_head = &moea64_pvo_kunmanaged; 1204 pg = NULL; 1205 zone = moea64_upvo_zone; 1206 pvo_flags = 0; 1207 } else { 1208 pvo_head = vm_page_to_pvoh(m); 1209 pg = m; 1210 zone = moea64_mpvo_zone; 1211 pvo_flags = PVO_MANAGED; 1212 } 1213 1214 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1215 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1216 VM_OBJECT_LOCKED(m->object), 1217 ("moea64_enter_locked: page %p is not busy", m)); 1218 1219 /* XXX change the pvo head for fake pages */ 1220 if ((m->oflags & VPO_UNMANAGED) != 0) { 1221 pvo_flags &= ~PVO_MANAGED; 1222 pvo_head = &moea64_pvo_kunmanaged; 1223 zone = moea64_upvo_zone; 1224 } 1225 1226 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1227 1228 if (prot & VM_PROT_WRITE) { 1229 pte_lo |= LPTE_BW; 1230 if (pmap_bootstrapped && 1231 (m->oflags & VPO_UNMANAGED) == 0) 1232 vm_page_aflag_set(m, PGA_WRITEABLE); 1233 } else 1234 pte_lo |= LPTE_BR; 1235 1236 if ((prot & VM_PROT_EXECUTE) == 0) 1237 pte_lo |= LPTE_NOEXEC; 1238 1239 if (wired) 1240 pvo_flags |= PVO_WIRED; 1241 1242 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1243 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1244 1245 /* 1246 * Flush the page from the instruction cache if this page is 1247 * mapped executable and cacheable. 1248 */ 1249 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1250 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1251 vm_page_aflag_set(m, PGA_EXECUTABLE); 1252 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1253 } 1254} 1255 1256static void 1257moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1258 vm_size_t sz) 1259{ 1260 1261 /* 1262 * This is much trickier than on older systems because 1263 * we can't sync the icache on physical addresses directly 1264 * without a direct map. Instead we check a couple of cases 1265 * where the memory is already mapped in and, failing that, 1266 * use the same trick we use for page zeroing to create 1267 * a temporary mapping for this physical address. 1268 */ 1269 1270 if (!pmap_bootstrapped) { 1271 /* 1272 * If PMAP is not bootstrapped, we are likely to be 1273 * in real mode. 1274 */ 1275 __syncicache((void *)pa, sz); 1276 } else if (pmap == kernel_pmap) { 1277 __syncicache((void *)va, sz); 1278 } else if (hw_direct_map) { 1279 __syncicache((void *)pa, sz); 1280 } else { 1281 /* Use the scratch page to set up a temp mapping */ 1282 1283 mtx_lock(&moea64_scratchpage_mtx); 1284 1285 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1286 __syncicache((void *)(moea64_scratchpage_va[1] + 1287 (va & ADDR_POFF)), sz); 1288 1289 mtx_unlock(&moea64_scratchpage_mtx); 1290 } 1291} 1292 1293/* 1294 * Maps a sequence of resident pages belonging to the same object. 1295 * The sequence begins with the given page m_start. This page is 1296 * mapped at the given virtual address start. Each subsequent page is 1297 * mapped at a virtual address that is offset from start by the same 1298 * amount as the page is offset from m_start within the object. The 1299 * last page in the sequence is the page with the largest offset from 1300 * m_start that can be mapped at a virtual address less than the given 1301 * virtual address end. Not every virtual page between start and end 1302 * is mapped; only those for which a resident page exists with the 1303 * corresponding offset from m_start are mapped. 1304 */ 1305void 1306moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1307 vm_page_t m_start, vm_prot_t prot) 1308{ 1309 vm_page_t m; 1310 vm_pindex_t diff, psize; 1311 1312 psize = atop(end - start); 1313 m = m_start; 1314 LOCK_TABLE_WR(); 1315 PMAP_LOCK(pm); 1316 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1317 moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot & 1318 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1319 m = TAILQ_NEXT(m, listq); 1320 } 1321 UNLOCK_TABLE_WR(); 1322 PMAP_UNLOCK(pm); 1323} 1324 1325void 1326moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1327 vm_prot_t prot) 1328{ 1329 1330 LOCK_TABLE_WR(); 1331 PMAP_LOCK(pm); 1332 moea64_enter_locked(mmu, pm, va, m, 1333 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1334 UNLOCK_TABLE_WR(); 1335 PMAP_UNLOCK(pm); 1336} 1337 1338vm_paddr_t 1339moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1340{ 1341 struct pvo_entry *pvo; 1342 vm_paddr_t pa; 1343 1344 LOCK_TABLE_RD(); 1345 PMAP_LOCK(pm); 1346 pvo = moea64_pvo_find_va(pm, va); 1347 if (pvo == NULL) 1348 pa = 0; 1349 else 1350 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1351 (va - PVO_VADDR(pvo)); 1352 UNLOCK_TABLE_RD(); 1353 PMAP_UNLOCK(pm); 1354 return (pa); 1355} 1356 1357/* 1358 * Atomically extract and hold the physical page with the given 1359 * pmap and virtual address pair if that mapping permits the given 1360 * protection. 1361 */ 1362vm_page_t 1363moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1364{ 1365 struct pvo_entry *pvo; 1366 vm_page_t m; 1367 vm_paddr_t pa; 1368 1369 m = NULL; 1370 pa = 0; 1371 LOCK_TABLE_RD(); 1372 PMAP_LOCK(pmap); 1373retry: 1374 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1375 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1376 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1377 (prot & VM_PROT_WRITE) == 0)) { 1378 if (vm_page_pa_tryrelock(pmap, 1379 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1380 goto retry; 1381 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1382 vm_page_hold(m); 1383 } 1384 PA_UNLOCK_COND(pa); 1385 UNLOCK_TABLE_RD(); 1386 PMAP_UNLOCK(pmap); 1387 return (m); 1388} 1389 1390static mmu_t installed_mmu; 1391 1392static void * 1393moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1394{ 1395 /* 1396 * This entire routine is a horrible hack to avoid bothering kmem 1397 * for new KVA addresses. Because this can get called from inside 1398 * kmem allocation routines, calling kmem for a new address here 1399 * can lead to multiply locking non-recursive mutexes. 1400 */ 1401 vm_offset_t va; 1402 1403 vm_page_t m; 1404 int pflags, needed_lock; 1405 1406 *flags = UMA_SLAB_PRIV; 1407 needed_lock = !PMAP_LOCKED(kernel_pmap); 1408 1409 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1410 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1411 else 1412 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1413 if (wait & M_ZERO) 1414 pflags |= VM_ALLOC_ZERO; 1415 1416 for (;;) { 1417 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1418 if (m == NULL) { 1419 if (wait & M_NOWAIT) 1420 return (NULL); 1421 VM_WAIT; 1422 } else 1423 break; 1424 } 1425 1426 va = VM_PAGE_TO_PHYS(m); 1427 1428 LOCK_TABLE_WR(); 1429 if (needed_lock) 1430 PMAP_LOCK(kernel_pmap); 1431 1432 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1433 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1434 PVO_WIRED | PVO_BOOTSTRAP); 1435 1436 if (needed_lock) 1437 PMAP_UNLOCK(kernel_pmap); 1438 UNLOCK_TABLE_WR(); 1439 1440 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1441 bzero((void *)va, PAGE_SIZE); 1442 1443 return (void *)va; 1444} 1445 1446extern int elf32_nxstack; 1447 1448void 1449moea64_init(mmu_t mmu) 1450{ 1451 1452 CTR0(KTR_PMAP, "moea64_init"); 1453 1454 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1455 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1456 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1457 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1458 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1459 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1460 1461 if (!hw_direct_map) { 1462 installed_mmu = mmu; 1463 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1464 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1465 } 1466 1467#ifdef COMPAT_FREEBSD32 1468 elf32_nxstack = 1; 1469#endif 1470 1471 moea64_initialized = TRUE; 1472} 1473 1474boolean_t 1475moea64_is_referenced(mmu_t mmu, vm_page_t m) 1476{ 1477 1478 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1479 ("moea64_is_referenced: page %p is not managed", m)); 1480 return (moea64_query_bit(mmu, m, PTE_REF)); 1481} 1482 1483boolean_t 1484moea64_is_modified(mmu_t mmu, vm_page_t m) 1485{ 1486 1487 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1488 ("moea64_is_modified: page %p is not managed", m)); 1489 1490 /* 1491 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 1492 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1493 * is clear, no PTEs can have LPTE_CHG set. 1494 */ 1495 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1496 if ((m->oflags & VPO_BUSY) == 0 && 1497 (m->aflags & PGA_WRITEABLE) == 0) 1498 return (FALSE); 1499 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1500} 1501 1502boolean_t 1503moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1504{ 1505 struct pvo_entry *pvo; 1506 boolean_t rv; 1507 1508 LOCK_TABLE_RD(); 1509 PMAP_LOCK(pmap); 1510 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1511 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1512 PMAP_UNLOCK(pmap); 1513 UNLOCK_TABLE_RD(); 1514 return (rv); 1515} 1516 1517void 1518moea64_clear_reference(mmu_t mmu, vm_page_t m) 1519{ 1520 1521 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1522 ("moea64_clear_reference: page %p is not managed", m)); 1523 moea64_clear_bit(mmu, m, LPTE_REF); 1524} 1525 1526void 1527moea64_clear_modify(mmu_t mmu, vm_page_t m) 1528{ 1529 1530 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1531 ("moea64_clear_modify: page %p is not managed", m)); 1532 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1533 KASSERT((m->oflags & VPO_BUSY) == 0, 1534 ("moea64_clear_modify: page %p is busy", m)); 1535 1536 /* 1537 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1538 * set. If the object containing the page is locked and the page is 1539 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 1540 */ 1541 if ((m->aflags & PGA_WRITEABLE) == 0) 1542 return; 1543 moea64_clear_bit(mmu, m, LPTE_CHG); 1544} 1545 1546/* 1547 * Clear the write and modified bits in each of the given page's mappings. 1548 */ 1549void 1550moea64_remove_write(mmu_t mmu, vm_page_t m) 1551{ 1552 struct pvo_entry *pvo; 1553 uintptr_t pt; 1554 pmap_t pmap; 1555 uint64_t lo = 0; 1556 1557 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1558 ("moea64_remove_write: page %p is not managed", m)); 1559 1560 /* 1561 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1562 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1563 * is clear, no page table entries need updating. 1564 */ 1565 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1566 if ((m->oflags & VPO_BUSY) == 0 && 1567 (m->aflags & PGA_WRITEABLE) == 0) 1568 return; 1569 powerpc_sync(); 1570 LOCK_TABLE_RD(); 1571 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1572 pmap = pvo->pvo_pmap; 1573 PMAP_LOCK(pmap); 1574 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1575 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1576 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1577 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1578 if (pt != -1) { 1579 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1580 lo |= pvo->pvo_pte.lpte.pte_lo; 1581 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1582 MOEA64_PTE_CHANGE(mmu, pt, 1583 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1584 if (pvo->pvo_pmap == kernel_pmap) 1585 isync(); 1586 } 1587 } 1588 if ((lo & LPTE_CHG) != 0) 1589 vm_page_dirty(m); 1590 PMAP_UNLOCK(pmap); 1591 } 1592 UNLOCK_TABLE_RD(); 1593 vm_page_aflag_clear(m, PGA_WRITEABLE); 1594} 1595 1596/* 1597 * moea64_ts_referenced: 1598 * 1599 * Return a count of reference bits for a page, clearing those bits. 1600 * It is not necessary for every reference bit to be cleared, but it 1601 * is necessary that 0 only be returned when there are truly no 1602 * reference bits set. 1603 * 1604 * XXX: The exact number of bits to check and clear is a matter that 1605 * should be tested and standardized at some point in the future for 1606 * optimal aging of shared pages. 1607 */ 1608boolean_t 1609moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1610{ 1611 1612 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1613 ("moea64_ts_referenced: page %p is not managed", m)); 1614 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1615} 1616 1617/* 1618 * Modify the WIMG settings of all mappings for a page. 1619 */ 1620void 1621moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1622{ 1623 struct pvo_entry *pvo; 1624 struct pvo_head *pvo_head; 1625 uintptr_t pt; 1626 pmap_t pmap; 1627 uint64_t lo; 1628 1629 if ((m->oflags & VPO_UNMANAGED) != 0) { 1630 m->md.mdpg_cache_attrs = ma; 1631 return; 1632 } 1633 1634 pvo_head = vm_page_to_pvoh(m); 1635 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1636 LOCK_TABLE_RD(); 1637 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1638 pmap = pvo->pvo_pmap; 1639 PMAP_LOCK(pmap); 1640 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1641 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1642 pvo->pvo_pte.lpte.pte_lo |= lo; 1643 if (pt != -1) { 1644 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1645 pvo->pvo_vpn); 1646 if (pvo->pvo_pmap == kernel_pmap) 1647 isync(); 1648 } 1649 PMAP_UNLOCK(pmap); 1650 } 1651 UNLOCK_TABLE_RD(); 1652 m->md.mdpg_cache_attrs = ma; 1653} 1654 1655/* 1656 * Map a wired page into kernel virtual address space. 1657 */ 1658void 1659moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1660{ 1661 uint64_t pte_lo; 1662 int error; 1663 1664 pte_lo = moea64_calc_wimg(pa, ma); 1665 1666 LOCK_TABLE_WR(); 1667 PMAP_LOCK(kernel_pmap); 1668 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1669 &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1670 PMAP_UNLOCK(kernel_pmap); 1671 UNLOCK_TABLE_WR(); 1672 1673 if (error != 0 && error != ENOENT) 1674 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1675 pa, error); 1676} 1677 1678void 1679moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1680{ 1681 1682 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1683} 1684 1685/* 1686 * Extract the physical page address associated with the given kernel virtual 1687 * address. 1688 */ 1689vm_offset_t 1690moea64_kextract(mmu_t mmu, vm_offset_t va) 1691{ 1692 struct pvo_entry *pvo; 1693 vm_paddr_t pa; 1694 1695 /* 1696 * Shortcut the direct-mapped case when applicable. We never put 1697 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1698 */ 1699 if (va < VM_MIN_KERNEL_ADDRESS) 1700 return (va); 1701 1702 LOCK_TABLE_RD(); 1703 PMAP_LOCK(kernel_pmap); 1704 pvo = moea64_pvo_find_va(kernel_pmap, va); 1705 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1706 va)); 1707 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1708 UNLOCK_TABLE_RD(); 1709 PMAP_UNLOCK(kernel_pmap); 1710 return (pa); 1711} 1712 1713/* 1714 * Remove a wired page from kernel virtual address space. 1715 */ 1716void 1717moea64_kremove(mmu_t mmu, vm_offset_t va) 1718{ 1719 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1720} 1721 1722/* 1723 * Map a range of physical addresses into kernel virtual address space. 1724 * 1725 * The value passed in *virt is a suggested virtual address for the mapping. 1726 * Architectures which can support a direct-mapped physical to virtual region 1727 * can return the appropriate address within that region, leaving '*virt' 1728 * unchanged. We cannot and therefore do not; *virt is updated with the 1729 * first usable address after the mapped region. 1730 */ 1731vm_offset_t 1732moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1733 vm_offset_t pa_end, int prot) 1734{ 1735 vm_offset_t sva, va; 1736 1737 sva = *virt; 1738 va = sva; 1739 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1740 moea64_kenter(mmu, va, pa_start); 1741 *virt = va; 1742 1743 return (sva); 1744} 1745 1746/* 1747 * Returns true if the pmap's pv is one of the first 1748 * 16 pvs linked to from this page. This count may 1749 * be changed upwards or downwards in the future; it 1750 * is only necessary that true be returned for a small 1751 * subset of pmaps for proper page aging. 1752 */ 1753boolean_t 1754moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1755{ 1756 int loops; 1757 struct pvo_entry *pvo; 1758 boolean_t rv; 1759 1760 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1761 ("moea64_page_exists_quick: page %p is not managed", m)); 1762 loops = 0; 1763 rv = FALSE; 1764 LOCK_TABLE_RD(); 1765 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1766 if (pvo->pvo_pmap == pmap) { 1767 rv = TRUE; 1768 break; 1769 } 1770 if (++loops >= 16) 1771 break; 1772 } 1773 UNLOCK_TABLE_RD(); 1774 return (rv); 1775} 1776 1777/* 1778 * Return the number of managed mappings to the given physical page 1779 * that are wired. 1780 */ 1781int 1782moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1783{ 1784 struct pvo_entry *pvo; 1785 int count; 1786 1787 count = 0; 1788 if ((m->oflags & VPO_UNMANAGED) != 0) 1789 return (count); 1790 LOCK_TABLE_RD(); 1791 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1792 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1793 count++; 1794 UNLOCK_TABLE_RD(); 1795 return (count); 1796} 1797 1798static uintptr_t moea64_vsidcontext; 1799 1800uintptr_t 1801moea64_get_unique_vsid(void) { 1802 u_int entropy; 1803 register_t hash; 1804 uint32_t mask; 1805 int i; 1806 1807 entropy = 0; 1808 __asm __volatile("mftb %0" : "=r"(entropy)); 1809 1810 mtx_lock(&moea64_slb_mutex); 1811 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1812 u_int n; 1813 1814 /* 1815 * Create a new value by mutiplying by a prime and adding in 1816 * entropy from the timebase register. This is to make the 1817 * VSID more random so that the PT hash function collides 1818 * less often. (Note that the prime casues gcc to do shifts 1819 * instead of a multiply.) 1820 */ 1821 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1822 hash = moea64_vsidcontext & (NVSIDS - 1); 1823 if (hash == 0) /* 0 is special, avoid it */ 1824 continue; 1825 n = hash >> 5; 1826 mask = 1 << (hash & (VSID_NBPW - 1)); 1827 hash = (moea64_vsidcontext & VSID_HASHMASK); 1828 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1829 /* anything free in this bucket? */ 1830 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1831 entropy = (moea64_vsidcontext >> 20); 1832 continue; 1833 } 1834 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1835 mask = 1 << i; 1836 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1837 hash |= i; 1838 } 1839 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1840 ("Allocating in-use VSID %#zx\n", hash)); 1841 moea64_vsid_bitmap[n] |= mask; 1842 mtx_unlock(&moea64_slb_mutex); 1843 return (hash); 1844 } 1845 1846 mtx_unlock(&moea64_slb_mutex); 1847 panic("%s: out of segments",__func__); 1848} 1849 1850#ifdef __powerpc64__ 1851void 1852moea64_pinit(mmu_t mmu, pmap_t pmap) 1853{ 1854 PMAP_LOCK_INIT(pmap); 1855 LIST_INIT(&pmap->pmap_pvo); 1856 1857 pmap->pm_slb_tree_root = slb_alloc_tree(); 1858 pmap->pm_slb = slb_alloc_user_cache(); 1859 pmap->pm_slb_len = 0; 1860} 1861#else 1862void 1863moea64_pinit(mmu_t mmu, pmap_t pmap) 1864{ 1865 int i; 1866 uint32_t hash; 1867 1868 PMAP_LOCK_INIT(pmap); 1869 LIST_INIT(&pmap->pmap_pvo); 1870 1871 if (pmap_bootstrapped) 1872 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1873 (vm_offset_t)pmap); 1874 else 1875 pmap->pmap_phys = pmap; 1876 1877 /* 1878 * Allocate some segment registers for this pmap. 1879 */ 1880 hash = moea64_get_unique_vsid(); 1881 1882 for (i = 0; i < 16; i++) 1883 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1884 1885 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1886} 1887#endif 1888 1889/* 1890 * Initialize the pmap associated with process 0. 1891 */ 1892void 1893moea64_pinit0(mmu_t mmu, pmap_t pm) 1894{ 1895 moea64_pinit(mmu, pm); 1896 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1897} 1898 1899/* 1900 * Set the physical protection on the specified range of this map as requested. 1901 */ 1902static void 1903moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1904{ 1905 uintptr_t pt; 1906 struct vm_page *pg; 1907 uint64_t oldlo; 1908 1909 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1910 1911 /* 1912 * Grab the PTE pointer before we diddle with the cached PTE 1913 * copy. 1914 */ 1915 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1916 1917 /* 1918 * Change the protection of the page. 1919 */ 1920 oldlo = pvo->pvo_pte.lpte.pte_lo; 1921 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1922 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1923 if ((prot & VM_PROT_EXECUTE) == 0) 1924 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1925 if (prot & VM_PROT_WRITE) 1926 pvo->pvo_pte.lpte.pte_lo |= LPTE_BW; 1927 else 1928 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1929 1930 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1931 1932 /* 1933 * If the PVO is in the page table, update that pte as well. 1934 */ 1935 if (pt != -1) { 1936 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1937 pvo->pvo_vpn); 1938 if (pm != kernel_pmap && pg != NULL && 1939 !(pg->aflags & PGA_EXECUTABLE) && 1940 (pvo->pvo_pte.lpte.pte_lo & 1941 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1942 vm_page_aflag_set(pg, PGA_EXECUTABLE); 1943 moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1944 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE); 1945 } 1946 } 1947 1948 /* 1949 * Update vm about the REF/CHG bits if the page is managed and we have 1950 * removed write access. 1951 */ 1952 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 1953 (oldlo & LPTE_PP) != LPTE_BR && !(prot && VM_PROT_WRITE)) { 1954 if (pg != NULL) { 1955 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 1956 vm_page_dirty(pg); 1957 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 1958 vm_page_aflag_set(pg, PGA_REFERENCED); 1959 } 1960 } 1961} 1962 1963void 1964moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1965 vm_prot_t prot) 1966{ 1967 struct pvo_entry *pvo, *tpvo; 1968 1969 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 1970 sva, eva, prot); 1971 1972 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1973 ("moea64_protect: non current pmap")); 1974 1975 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1976 moea64_remove(mmu, pm, sva, eva); 1977 return; 1978 } 1979 1980 LOCK_TABLE_RD(); 1981 PMAP_LOCK(pm); 1982 if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) { 1983 while (sva < eva) { 1984 #ifdef __powerpc64__ 1985 if (pm != kernel_pmap && 1986 user_va_to_slb_entry(pm, sva) == NULL) { 1987 sva = roundup2(sva + 1, SEGMENT_LENGTH); 1988 continue; 1989 } 1990 #endif 1991 pvo = moea64_pvo_find_va(pm, sva); 1992 if (pvo != NULL) 1993 moea64_pvo_protect(mmu, pm, pvo, prot); 1994 sva += PAGE_SIZE; 1995 } 1996 } else { 1997 LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { 1998 if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva) 1999 continue; 2000 moea64_pvo_protect(mmu, pm, pvo, prot); 2001 } 2002 } 2003 UNLOCK_TABLE_RD(); 2004 PMAP_UNLOCK(pm); 2005} 2006 2007/* 2008 * Map a list of wired pages into kernel virtual address space. This is 2009 * intended for temporary mappings which do not need page modification or 2010 * references recorded. Existing mappings in the region are overwritten. 2011 */ 2012void 2013moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2014{ 2015 while (count-- > 0) { 2016 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2017 va += PAGE_SIZE; 2018 m++; 2019 } 2020} 2021 2022/* 2023 * Remove page mappings from kernel virtual address space. Intended for 2024 * temporary mappings entered by moea64_qenter. 2025 */ 2026void 2027moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2028{ 2029 while (count-- > 0) { 2030 moea64_kremove(mmu, va); 2031 va += PAGE_SIZE; 2032 } 2033} 2034 2035void 2036moea64_release_vsid(uint64_t vsid) 2037{ 2038 int idx, mask; 2039 2040 mtx_lock(&moea64_slb_mutex); 2041 idx = vsid & (NVSIDS-1); 2042 mask = 1 << (idx % VSID_NBPW); 2043 idx /= VSID_NBPW; 2044 KASSERT(moea64_vsid_bitmap[idx] & mask, 2045 ("Freeing unallocated VSID %#jx", vsid)); 2046 moea64_vsid_bitmap[idx] &= ~mask; 2047 mtx_unlock(&moea64_slb_mutex); 2048} 2049 2050 2051void 2052moea64_release(mmu_t mmu, pmap_t pmap) 2053{ 2054 2055 /* 2056 * Free segment registers' VSIDs 2057 */ 2058 #ifdef __powerpc64__ 2059 slb_free_tree(pmap); 2060 slb_free_user_cache(pmap->pm_slb); 2061 #else 2062 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2063 2064 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2065 #endif 2066 2067 PMAP_LOCK_DESTROY(pmap); 2068} 2069 2070/* 2071 * Remove all pages mapped by the specified pmap 2072 */ 2073void 2074moea64_remove_pages(mmu_t mmu, pmap_t pm) 2075{ 2076 struct pvo_entry *pvo, *tpvo; 2077 2078 LOCK_TABLE_WR(); 2079 PMAP_LOCK(pm); 2080 LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { 2081 if (!(pvo->pvo_vaddr & PVO_WIRED)) 2082 moea64_pvo_remove(mmu, pvo); 2083 } 2084 UNLOCK_TABLE_WR(); 2085 PMAP_UNLOCK(pm); 2086} 2087 2088/* 2089 * Remove the given range of addresses from the specified map. 2090 */ 2091void 2092moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2093{ 2094 struct pvo_entry *pvo, *tpvo; 2095 2096 /* 2097 * Perform an unsynchronized read. This is, however, safe. 2098 */ 2099 if (pm->pm_stats.resident_count == 0) 2100 return; 2101 2102 LOCK_TABLE_WR(); 2103 PMAP_LOCK(pm); 2104 if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) { 2105 while (sva < eva) { 2106 #ifdef __powerpc64__ 2107 if (pm != kernel_pmap && 2108 user_va_to_slb_entry(pm, sva) == NULL) { 2109 sva = roundup2(sva + 1, SEGMENT_LENGTH); 2110 continue; 2111 } 2112 #endif 2113 pvo = moea64_pvo_find_va(pm, sva); 2114 if (pvo != NULL) 2115 moea64_pvo_remove(mmu, pvo); 2116 sva += PAGE_SIZE; 2117 } 2118 } else { 2119 LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { 2120 if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva) 2121 continue; 2122 moea64_pvo_remove(mmu, pvo); 2123 } 2124 } 2125 UNLOCK_TABLE_WR(); 2126 PMAP_UNLOCK(pm); 2127} 2128 2129/* 2130 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2131 * will reflect changes in pte's back to the vm_page. 2132 */ 2133void 2134moea64_remove_all(mmu_t mmu, vm_page_t m) 2135{ 2136 struct pvo_entry *pvo, *next_pvo; 2137 pmap_t pmap; 2138 2139 LOCK_TABLE_WR(); 2140 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2141 pmap = pvo->pvo_pmap; 2142 PMAP_LOCK(pmap); 2143 moea64_pvo_remove(mmu, pvo); 2144 PMAP_UNLOCK(pmap); 2145 } 2146 UNLOCK_TABLE_WR(); 2147 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) 2148 vm_page_dirty(m); 2149 vm_page_aflag_clear(m, PGA_WRITEABLE); 2150 vm_page_aflag_clear(m, PGA_EXECUTABLE); 2151} 2152 2153/* 2154 * Allocate a physical page of memory directly from the phys_avail map. 2155 * Can only be called from moea64_bootstrap before avail start and end are 2156 * calculated. 2157 */ 2158vm_offset_t 2159moea64_bootstrap_alloc(vm_size_t size, u_int align) 2160{ 2161 vm_offset_t s, e; 2162 int i, j; 2163 2164 size = round_page(size); 2165 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2166 if (align != 0) 2167 s = (phys_avail[i] + align - 1) & ~(align - 1); 2168 else 2169 s = phys_avail[i]; 2170 e = s + size; 2171 2172 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2173 continue; 2174 2175 if (s + size > platform_real_maxaddr()) 2176 continue; 2177 2178 if (s == phys_avail[i]) { 2179 phys_avail[i] += size; 2180 } else if (e == phys_avail[i + 1]) { 2181 phys_avail[i + 1] -= size; 2182 } else { 2183 for (j = phys_avail_count * 2; j > i; j -= 2) { 2184 phys_avail[j] = phys_avail[j - 2]; 2185 phys_avail[j + 1] = phys_avail[j - 1]; 2186 } 2187 2188 phys_avail[i + 3] = phys_avail[i + 1]; 2189 phys_avail[i + 1] = s; 2190 phys_avail[i + 2] = e; 2191 phys_avail_count++; 2192 } 2193 2194 return (s); 2195 } 2196 panic("moea64_bootstrap_alloc: could not allocate memory"); 2197} 2198 2199static int 2200moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2201 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2202 uint64_t pte_lo, int flags) 2203{ 2204 struct pvo_entry *pvo; 2205 uint64_t vsid; 2206 int first; 2207 u_int ptegidx; 2208 int i; 2209 int bootstrap; 2210 2211 /* 2212 * One nasty thing that can happen here is that the UMA calls to 2213 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2214 * which calls UMA... 2215 * 2216 * We break the loop by detecting recursion and allocating out of 2217 * the bootstrap pool. 2218 */ 2219 2220 first = 0; 2221 bootstrap = (flags & PVO_BOOTSTRAP); 2222 2223 if (!moea64_initialized) 2224 bootstrap = 1; 2225 2226 PMAP_LOCK_ASSERT(pm, MA_OWNED); 2227 rw_assert(&moea64_table_lock, RA_WLOCKED); 2228 2229 /* 2230 * Compute the PTE Group index. 2231 */ 2232 va &= ~ADDR_POFF; 2233 vsid = va_to_vsid(pm, va); 2234 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2235 2236 /* 2237 * Remove any existing mapping for this page. Reuse the pvo entry if 2238 * there is a mapping. 2239 */ 2240 moea64_pvo_enter_calls++; 2241 2242 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2243 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2244 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2245 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2246 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2247 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2248 /* Re-insert if spilled */ 2249 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2250 &pvo->pvo_pte.lpte); 2251 if (i >= 0) 2252 PVO_PTEGIDX_SET(pvo, i); 2253 moea64_pte_overflow--; 2254 } 2255 return (0); 2256 } 2257 moea64_pvo_remove(mmu, pvo); 2258 break; 2259 } 2260 } 2261 2262 /* 2263 * If we aren't overwriting a mapping, try to allocate. 2264 */ 2265 if (bootstrap) { 2266 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2267 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2268 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2269 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2270 } 2271 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2272 moea64_bpvo_pool_index++; 2273 bootstrap = 1; 2274 } else { 2275 /* 2276 * Note: drop the table lock around the UMA allocation in 2277 * case the UMA allocator needs to manipulate the page 2278 * table. The mapping we are working with is already 2279 * protected by the PMAP lock. 2280 */ 2281 pvo = uma_zalloc(zone, M_NOWAIT); 2282 } 2283 2284 if (pvo == NULL) 2285 return (ENOMEM); 2286 2287 moea64_pvo_entries++; 2288 pvo->pvo_vaddr = va; 2289 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2290 | (vsid << 16); 2291 pvo->pvo_pmap = pm; 2292 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2293 pvo->pvo_vaddr &= ~ADDR_POFF; 2294 2295 if (flags & PVO_WIRED) 2296 pvo->pvo_vaddr |= PVO_WIRED; 2297 if (pvo_head != &moea64_pvo_kunmanaged) 2298 pvo->pvo_vaddr |= PVO_MANAGED; 2299 if (bootstrap) 2300 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2301 if (flags & PVO_LARGE) 2302 pvo->pvo_vaddr |= PVO_LARGE; 2303 2304 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2305 (uint64_t)(pa) | pte_lo, flags); 2306 2307 /* 2308 * Add to pmap list 2309 */ 2310 LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink); 2311 2312 /* 2313 * Remember if the list was empty and therefore will be the first 2314 * item. 2315 */ 2316 if (LIST_FIRST(pvo_head) == NULL) 2317 first = 1; 2318 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2319 2320 if (pvo->pvo_vaddr & PVO_WIRED) { 2321 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2322 pm->pm_stats.wired_count++; 2323 } 2324 pm->pm_stats.resident_count++; 2325 2326 /* 2327 * We hope this succeeds but it isn't required. 2328 */ 2329 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2330 if (i >= 0) { 2331 PVO_PTEGIDX_SET(pvo, i); 2332 } else { 2333 panic("moea64_pvo_enter: overflow"); 2334 moea64_pte_overflow++; 2335 } 2336 2337 if (pm == kernel_pmap) 2338 isync(); 2339 2340#ifdef __powerpc64__ 2341 /* 2342 * Make sure all our bootstrap mappings are in the SLB as soon 2343 * as virtual memory is switched on. 2344 */ 2345 if (!pmap_bootstrapped) 2346 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2347#endif 2348 2349 return (first ? ENOENT : 0); 2350} 2351 2352static void 2353moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2354{ 2355 struct vm_page *pg; 2356 uintptr_t pt; 2357 2358 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2359 rw_assert(&moea64_table_lock, RA_WLOCKED); 2360 2361 /* 2362 * If there is an active pte entry, we need to deactivate it (and 2363 * save the ref & cfg bits). 2364 */ 2365 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2366 if (pt != -1) { 2367 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2368 PVO_PTEGIDX_CLR(pvo); 2369 } else { 2370 moea64_pte_overflow--; 2371 } 2372 2373 /* 2374 * Update our statistics. 2375 */ 2376 pvo->pvo_pmap->pm_stats.resident_count--; 2377 if (pvo->pvo_vaddr & PVO_WIRED) 2378 pvo->pvo_pmap->pm_stats.wired_count--; 2379 2380 /* 2381 * Remove this PVO from the PV and pmap lists. 2382 */ 2383 LIST_REMOVE(pvo, pvo_vlink); 2384 LIST_REMOVE(pvo, pvo_plink); 2385 2386 /* 2387 * Remove this from the overflow list and return it to the pool 2388 * if we aren't going to reuse it. 2389 */ 2390 LIST_REMOVE(pvo, pvo_olink); 2391 2392 /* 2393 * Update vm about the REF/CHG bits if the page is managed. 2394 */ 2395 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2396 2397 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 2398 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 2399 if (pg != NULL) { 2400 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2401 vm_page_dirty(pg); 2402 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2403 vm_page_aflag_set(pg, PGA_REFERENCED); 2404 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2405 vm_page_aflag_clear(pg, PGA_WRITEABLE); 2406 } 2407 } 2408 2409 if (pg != NULL && LIST_EMPTY(vm_page_to_pvoh(pg))) 2410 vm_page_aflag_clear(pg, PGA_EXECUTABLE); 2411 2412 moea64_pvo_entries--; 2413 moea64_pvo_remove_calls++; 2414 2415 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2416 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2417 moea64_upvo_zone, pvo); 2418} 2419 2420static struct pvo_entry * 2421moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2422{ 2423 struct pvo_entry *pvo; 2424 int ptegidx; 2425 uint64_t vsid; 2426 #ifdef __powerpc64__ 2427 uint64_t slbv; 2428 2429 if (pm == kernel_pmap) { 2430 slbv = kernel_va_to_slbv(va); 2431 } else { 2432 struct slb *slb; 2433 slb = user_va_to_slb_entry(pm, va); 2434 /* The page is not mapped if the segment isn't */ 2435 if (slb == NULL) 2436 return NULL; 2437 slbv = slb->slbv; 2438 } 2439 2440 vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 2441 if (slbv & SLBV_L) 2442 va &= ~moea64_large_page_mask; 2443 else 2444 va &= ~ADDR_POFF; 2445 ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L); 2446 #else 2447 va &= ~ADDR_POFF; 2448 vsid = va_to_vsid(pm, va); 2449 ptegidx = va_to_pteg(vsid, va, 0); 2450 #endif 2451 2452 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2453 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) 2454 break; 2455 } 2456 2457 return (pvo); 2458} 2459 2460static boolean_t 2461moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2462{ 2463 struct pvo_entry *pvo; 2464 uintptr_t pt; 2465 2466 LOCK_TABLE_RD(); 2467 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2468 /* 2469 * See if we saved the bit off. If so, return success. 2470 */ 2471 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2472 UNLOCK_TABLE_RD(); 2473 return (TRUE); 2474 } 2475 } 2476 2477 /* 2478 * No luck, now go through the hard part of looking at the PTEs 2479 * themselves. Sync so that any pending REF/CHG bits are flushed to 2480 * the PTEs. 2481 */ 2482 powerpc_sync(); 2483 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2484 2485 /* 2486 * See if this pvo has a valid PTE. if so, fetch the 2487 * REF/CHG bits from the valid PTE. If the appropriate 2488 * ptebit is set, return success. 2489 */ 2490 PMAP_LOCK(pvo->pvo_pmap); 2491 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2492 if (pt != -1) { 2493 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2494 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2495 PMAP_UNLOCK(pvo->pvo_pmap); 2496 UNLOCK_TABLE_RD(); 2497 return (TRUE); 2498 } 2499 } 2500 PMAP_UNLOCK(pvo->pvo_pmap); 2501 } 2502 2503 UNLOCK_TABLE_RD(); 2504 return (FALSE); 2505} 2506 2507static u_int 2508moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2509{ 2510 u_int count; 2511 struct pvo_entry *pvo; 2512 uintptr_t pt; 2513 2514 /* 2515 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2516 * we can reset the right ones). note that since the pvo entries and 2517 * list heads are accessed via BAT0 and are never placed in the page 2518 * table, we don't have to worry about further accesses setting the 2519 * REF/CHG bits. 2520 */ 2521 powerpc_sync(); 2522 2523 /* 2524 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2525 * valid pte clear the ptebit from the valid pte. 2526 */ 2527 count = 0; 2528 LOCK_TABLE_RD(); 2529 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2530 PMAP_LOCK(pvo->pvo_pmap); 2531 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2532 if (pt != -1) { 2533 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2534 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2535 count++; 2536 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2537 pvo->pvo_vpn, ptebit); 2538 } 2539 } 2540 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2541 PMAP_UNLOCK(pvo->pvo_pmap); 2542 } 2543 2544 UNLOCK_TABLE_RD(); 2545 return (count); 2546} 2547 2548boolean_t 2549moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2550{ 2551 struct pvo_entry *pvo; 2552 vm_offset_t ppa; 2553 int error = 0; 2554 2555 LOCK_TABLE_RD(); 2556 PMAP_LOCK(kernel_pmap); 2557 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2558 pvo = moea64_pvo_find_va(kernel_pmap, ppa); 2559 if (pvo == NULL || 2560 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2561 error = EFAULT; 2562 break; 2563 } 2564 } 2565 UNLOCK_TABLE_RD(); 2566 PMAP_UNLOCK(kernel_pmap); 2567 2568 return (error); 2569} 2570 2571/* 2572 * Map a set of physical memory pages into the kernel virtual 2573 * address space. Return a pointer to where it is mapped. This 2574 * routine is intended to be used for mapping device memory, 2575 * NOT real memory. 2576 */ 2577void * 2578moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2579{ 2580 vm_offset_t va, tmpva, ppa, offset; 2581 2582 ppa = trunc_page(pa); 2583 offset = pa & PAGE_MASK; 2584 size = roundup2(offset + size, PAGE_SIZE); 2585 2586 va = kmem_alloc_nofault(kernel_map, size); 2587 2588 if (!va) 2589 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2590 2591 for (tmpva = va; size > 0;) { 2592 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2593 size -= PAGE_SIZE; 2594 tmpva += PAGE_SIZE; 2595 ppa += PAGE_SIZE; 2596 } 2597 2598 return ((void *)(va + offset)); 2599} 2600 2601void * 2602moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2603{ 2604 2605 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2606} 2607 2608void 2609moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2610{ 2611 vm_offset_t base, offset; 2612 2613 base = trunc_page(va); 2614 offset = va & PAGE_MASK; 2615 size = roundup2(offset + size, PAGE_SIZE); 2616 2617 kmem_free(kernel_map, base, size); 2618} 2619 2620void 2621moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2622{ 2623 struct pvo_entry *pvo; 2624 vm_offset_t lim; 2625 vm_paddr_t pa; 2626 vm_size_t len; 2627 2628 LOCK_TABLE_RD(); 2629 PMAP_LOCK(pm); 2630 while (sz > 0) { 2631 lim = round_page(va); 2632 len = MIN(lim - va, sz); 2633 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2634 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2635 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2636 (va & ADDR_POFF); 2637 moea64_syncicache(mmu, pm, va, pa, len); 2638 } 2639 va += len; 2640 sz -= len; 2641 } 2642 UNLOCK_TABLE_RD(); 2643 PMAP_UNLOCK(pm); 2644} 2645