mmu_oea64.c revision 222813
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 222813 2011-06-07 08:46:13Z attilio $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_kstack_pages.h" 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/queue.h> 122#include <sys/cpuset.h> 123#include <sys/ktr.h> 124#include <sys/lock.h> 125#include <sys/msgbuf.h> 126#include <sys/mutex.h> 127#include <sys/proc.h> 128#include <sys/sched.h> 129#include <sys/sysctl.h> 130#include <sys/systm.h> 131#include <sys/vmmeter.h> 132 133#include <sys/kdb.h> 134 135#include <dev/ofw/openfirm.h> 136 137#include <vm/vm.h> 138#include <vm/vm_param.h> 139#include <vm/vm_kern.h> 140#include <vm/vm_page.h> 141#include <vm/vm_map.h> 142#include <vm/vm_object.h> 143#include <vm/vm_extern.h> 144#include <vm/vm_pageout.h> 145#include <vm/vm_pager.h> 146#include <vm/uma.h> 147 148#include <machine/_inttypes.h> 149#include <machine/cpu.h> 150#include <machine/platform.h> 151#include <machine/frame.h> 152#include <machine/md_var.h> 153#include <machine/psl.h> 154#include <machine/bat.h> 155#include <machine/hid.h> 156#include <machine/pte.h> 157#include <machine/sr.h> 158#include <machine/trap.h> 159#include <machine/mmuvar.h> 160 161#include "mmu_oea64.h" 162#include "mmu_if.h" 163#include "moea64_if.h" 164 165void moea64_release_vsid(uint64_t vsid); 166uintptr_t moea64_get_unique_vsid(void); 167 168#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 169#define ENABLE_TRANS(msr) mtmsr(msr) 170 171#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 172#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 173#define VSID_HASH_MASK 0x0000007fffffffffULL 174 175#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 176#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 177#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 178 179struct ofw_map { 180 cell_t om_va; 181 cell_t om_len; 182 cell_t om_pa_hi; 183 cell_t om_pa_lo; 184 cell_t om_mode; 185}; 186 187/* 188 * Map of physical memory regions. 189 */ 190static struct mem_region *regions; 191static struct mem_region *pregions; 192static u_int phys_avail_count; 193static int regions_sz, pregions_sz; 194 195extern void bs_remap_earlyboot(void); 196 197/* 198 * Lock for the pteg and pvo tables. 199 */ 200struct mtx moea64_table_mutex; 201struct mtx moea64_slb_mutex; 202 203/* 204 * PTEG data. 205 */ 206u_int moea64_pteg_count; 207u_int moea64_pteg_mask; 208 209/* 210 * PVO data. 211 */ 212struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 213struct pvo_head moea64_pvo_kunmanaged = /* list of unmanaged pages */ 214 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 215 216uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 217uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 218 219#define BPVO_POOL_SIZE 327680 220static struct pvo_entry *moea64_bpvo_pool; 221static int moea64_bpvo_pool_index = 0; 222 223#define VSID_NBPW (sizeof(u_int32_t) * 8) 224#ifdef __powerpc64__ 225#define NVSIDS (NPMAPS * 16) 226#define VSID_HASHMASK 0xffffffffUL 227#else 228#define NVSIDS NPMAPS 229#define VSID_HASHMASK 0xfffffUL 230#endif 231static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 232 233static boolean_t moea64_initialized = FALSE; 234 235/* 236 * Statistics. 237 */ 238u_int moea64_pte_valid = 0; 239u_int moea64_pte_overflow = 0; 240u_int moea64_pvo_entries = 0; 241u_int moea64_pvo_enter_calls = 0; 242u_int moea64_pvo_remove_calls = 0; 243SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 244 &moea64_pte_valid, 0, ""); 245SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 246 &moea64_pte_overflow, 0, ""); 247SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 248 &moea64_pvo_entries, 0, ""); 249SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 250 &moea64_pvo_enter_calls, 0, ""); 251SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 252 &moea64_pvo_remove_calls, 0, ""); 253 254vm_offset_t moea64_scratchpage_va[2]; 255struct pvo_entry *moea64_scratchpage_pvo[2]; 256uintptr_t moea64_scratchpage_pte[2]; 257struct mtx moea64_scratchpage_mtx; 258 259uint64_t moea64_large_page_mask = 0; 260int moea64_large_page_size = 0; 261int moea64_large_page_shift = 0; 262 263/* 264 * PVO calls. 265 */ 266static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 267 vm_offset_t, vm_offset_t, uint64_t, int); 268static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 269static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 270 271/* 272 * Utility routines. 273 */ 274static void moea64_enter_locked(mmu_t, pmap_t, vm_offset_t, 275 vm_page_t, vm_prot_t, boolean_t); 276static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 277static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 278static void moea64_kremove(mmu_t, vm_offset_t); 279static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 280 vm_offset_t pa, vm_size_t sz); 281 282/* 283 * Kernel MMU interface 284 */ 285void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 286void moea64_clear_modify(mmu_t, vm_page_t); 287void moea64_clear_reference(mmu_t, vm_page_t); 288void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 289void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 290void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 291 vm_prot_t); 292void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 293vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 294vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 295void moea64_init(mmu_t); 296boolean_t moea64_is_modified(mmu_t, vm_page_t); 297boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 298boolean_t moea64_is_referenced(mmu_t, vm_page_t); 299boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 300vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 301boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 302int moea64_page_wired_mappings(mmu_t, vm_page_t); 303void moea64_pinit(mmu_t, pmap_t); 304void moea64_pinit0(mmu_t, pmap_t); 305void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 306void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 307void moea64_qremove(mmu_t, vm_offset_t, int); 308void moea64_release(mmu_t, pmap_t); 309void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 310void moea64_remove_all(mmu_t, vm_page_t); 311void moea64_remove_write(mmu_t, vm_page_t); 312void moea64_zero_page(mmu_t, vm_page_t); 313void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 314void moea64_zero_page_idle(mmu_t, vm_page_t); 315void moea64_activate(mmu_t, struct thread *); 316void moea64_deactivate(mmu_t, struct thread *); 317void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 318void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 319void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 320vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 321void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 322void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 323void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 324boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 325static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 326 327static mmu_method_t moea64_methods[] = { 328 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 329 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 330 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 331 MMUMETHOD(mmu_copy_page, moea64_copy_page), 332 MMUMETHOD(mmu_enter, moea64_enter), 333 MMUMETHOD(mmu_enter_object, moea64_enter_object), 334 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 335 MMUMETHOD(mmu_extract, moea64_extract), 336 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 337 MMUMETHOD(mmu_init, moea64_init), 338 MMUMETHOD(mmu_is_modified, moea64_is_modified), 339 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 340 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 341 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 342 MMUMETHOD(mmu_map, moea64_map), 343 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 344 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 345 MMUMETHOD(mmu_pinit, moea64_pinit), 346 MMUMETHOD(mmu_pinit0, moea64_pinit0), 347 MMUMETHOD(mmu_protect, moea64_protect), 348 MMUMETHOD(mmu_qenter, moea64_qenter), 349 MMUMETHOD(mmu_qremove, moea64_qremove), 350 MMUMETHOD(mmu_release, moea64_release), 351 MMUMETHOD(mmu_remove, moea64_remove), 352 MMUMETHOD(mmu_remove_all, moea64_remove_all), 353 MMUMETHOD(mmu_remove_write, moea64_remove_write), 354 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 355 MMUMETHOD(mmu_zero_page, moea64_zero_page), 356 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 357 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 358 MMUMETHOD(mmu_activate, moea64_activate), 359 MMUMETHOD(mmu_deactivate, moea64_deactivate), 360 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 361 362 /* Internal interfaces */ 363 MMUMETHOD(mmu_mapdev, moea64_mapdev), 364 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 365 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 366 MMUMETHOD(mmu_kextract, moea64_kextract), 367 MMUMETHOD(mmu_kenter, moea64_kenter), 368 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 369 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 370 371 { 0, 0 } 372}; 373 374MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 375 376static __inline u_int 377va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 378{ 379 uint64_t hash; 380 int shift; 381 382 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 383 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 384 shift); 385 return (hash & moea64_pteg_mask); 386} 387 388static __inline struct pvo_head * 389vm_page_to_pvoh(vm_page_t m) 390{ 391 392 return (&m->md.mdpg_pvoh); 393} 394 395static __inline void 396moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 397{ 398 399 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 400 m->md.mdpg_attrs &= ~ptebit; 401} 402 403static __inline u_int64_t 404moea64_attr_fetch(vm_page_t m) 405{ 406 407 return (m->md.mdpg_attrs); 408} 409 410static __inline void 411moea64_attr_save(vm_page_t m, u_int64_t ptebit) 412{ 413 414 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 415 m->md.mdpg_attrs |= ptebit; 416} 417 418static __inline void 419moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 420 uint64_t pte_lo, int flags) 421{ 422 423 ASSERT_TABLE_LOCK(); 424 425 /* 426 * Construct a PTE. Default to IMB initially. Valid bit only gets 427 * set when the real pte is set in memory. 428 * 429 * Note: Don't set the valid bit for correct operation of tlb update. 430 */ 431 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 432 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 433 434 if (flags & PVO_LARGE) 435 pt->pte_hi |= LPTE_BIG; 436 437 pt->pte_lo = pte_lo; 438} 439 440static __inline uint64_t 441moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 442{ 443 uint64_t pte_lo; 444 int i; 445 446 if (ma != VM_MEMATTR_DEFAULT) { 447 switch (ma) { 448 case VM_MEMATTR_UNCACHEABLE: 449 return (LPTE_I | LPTE_G); 450 case VM_MEMATTR_WRITE_COMBINING: 451 case VM_MEMATTR_WRITE_BACK: 452 case VM_MEMATTR_PREFETCHABLE: 453 return (LPTE_I); 454 case VM_MEMATTR_WRITE_THROUGH: 455 return (LPTE_W | LPTE_M); 456 } 457 } 458 459 /* 460 * Assume the page is cache inhibited and access is guarded unless 461 * it's in our available memory array. 462 */ 463 pte_lo = LPTE_I | LPTE_G; 464 for (i = 0; i < pregions_sz; i++) { 465 if ((pa >= pregions[i].mr_start) && 466 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 467 pte_lo &= ~(LPTE_I | LPTE_G); 468 pte_lo |= LPTE_M; 469 break; 470 } 471 } 472 473 return pte_lo; 474} 475 476/* 477 * Quick sort callout for comparing memory regions. 478 */ 479static int om_cmp(const void *a, const void *b); 480 481static int 482om_cmp(const void *a, const void *b) 483{ 484 const struct ofw_map *mapa; 485 const struct ofw_map *mapb; 486 487 mapa = a; 488 mapb = b; 489 if (mapa->om_pa_hi < mapb->om_pa_hi) 490 return (-1); 491 else if (mapa->om_pa_hi > mapb->om_pa_hi) 492 return (1); 493 else if (mapa->om_pa_lo < mapb->om_pa_lo) 494 return (-1); 495 else if (mapa->om_pa_lo > mapb->om_pa_lo) 496 return (1); 497 else 498 return (0); 499} 500 501static void 502moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 503{ 504 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 505 register_t msr; 506 vm_offset_t off; 507 vm_paddr_t pa_base; 508 int i; 509 510 bzero(translations, sz); 511 if (OF_getprop(mmu, "translations", translations, sz) == -1) 512 panic("moea64_bootstrap: can't get ofw translations"); 513 514 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 515 sz /= sizeof(*translations); 516 qsort(translations, sz, sizeof (*translations), om_cmp); 517 518 for (i = 0; i < sz; i++) { 519 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 520 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 521 translations[i].om_len); 522 523 if (translations[i].om_pa_lo % PAGE_SIZE) 524 panic("OFW translation not page-aligned!"); 525 526 pa_base = translations[i].om_pa_lo; 527 528 #ifdef __powerpc64__ 529 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 530 #else 531 if (translations[i].om_pa_hi) 532 panic("OFW translations above 32-bit boundary!"); 533 #endif 534 535 /* Now enter the pages for this mapping */ 536 537 DISABLE_TRANS(msr); 538 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 539 if (moea64_pvo_find_va(kernel_pmap, 540 translations[i].om_va + off) != NULL) 541 continue; 542 543 moea64_kenter(mmup, translations[i].om_va + off, 544 pa_base + off); 545 } 546 ENABLE_TRANS(msr); 547 } 548} 549 550#ifdef __powerpc64__ 551static void 552moea64_probe_large_page(void) 553{ 554 uint16_t pvr = mfpvr() >> 16; 555 556 switch (pvr) { 557 case IBM970: 558 case IBM970FX: 559 case IBM970MP: 560 powerpc_sync(); isync(); 561 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 562 powerpc_sync(); isync(); 563 564 /* FALLTHROUGH */ 565 case IBMCELLBE: 566 moea64_large_page_size = 0x1000000; /* 16 MB */ 567 moea64_large_page_shift = 24; 568 break; 569 default: 570 moea64_large_page_size = 0; 571 } 572 573 moea64_large_page_mask = moea64_large_page_size - 1; 574} 575 576static void 577moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 578{ 579 struct slb *cache; 580 struct slb entry; 581 uint64_t esid, slbe; 582 uint64_t i; 583 584 cache = PCPU_GET(slb); 585 esid = va >> ADDR_SR_SHFT; 586 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 587 588 for (i = 0; i < 64; i++) { 589 if (cache[i].slbe == (slbe | i)) 590 return; 591 } 592 593 entry.slbe = slbe; 594 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 595 if (large) 596 entry.slbv |= SLBV_L; 597 598 slb_insert_kernel(entry.slbe, entry.slbv); 599} 600#endif 601 602static void 603moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 604 vm_offset_t kernelend) 605{ 606 register_t msr; 607 vm_paddr_t pa; 608 vm_offset_t size, off; 609 uint64_t pte_lo; 610 int i; 611 612 if (moea64_large_page_size == 0) 613 hw_direct_map = 0; 614 615 DISABLE_TRANS(msr); 616 if (hw_direct_map) { 617 PMAP_LOCK(kernel_pmap); 618 for (i = 0; i < pregions_sz; i++) { 619 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 620 pregions[i].mr_size; pa += moea64_large_page_size) { 621 pte_lo = LPTE_M; 622 623 /* 624 * Set memory access as guarded if prefetch within 625 * the page could exit the available physmem area. 626 */ 627 if (pa & moea64_large_page_mask) { 628 pa &= moea64_large_page_mask; 629 pte_lo |= LPTE_G; 630 } 631 if (pa + moea64_large_page_size > 632 pregions[i].mr_start + pregions[i].mr_size) 633 pte_lo |= LPTE_G; 634 635 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 636 &moea64_pvo_kunmanaged, pa, pa, 637 pte_lo, PVO_WIRED | PVO_LARGE); 638 } 639 } 640 PMAP_UNLOCK(kernel_pmap); 641 } else { 642 size = sizeof(struct pvo_head) * moea64_pteg_count; 643 off = (vm_offset_t)(moea64_pvo_table); 644 for (pa = off; pa < off + size; pa += PAGE_SIZE) 645 moea64_kenter(mmup, pa, pa); 646 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 647 off = (vm_offset_t)(moea64_bpvo_pool); 648 for (pa = off; pa < off + size; pa += PAGE_SIZE) 649 moea64_kenter(mmup, pa, pa); 650 651 /* 652 * Map certain important things, like ourselves. 653 * 654 * NOTE: We do not map the exception vector space. That code is 655 * used only in real mode, and leaving it unmapped allows us to 656 * catch NULL pointer deferences, instead of making NULL a valid 657 * address. 658 */ 659 660 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 661 pa += PAGE_SIZE) 662 moea64_kenter(mmup, pa, pa); 663 } 664 ENABLE_TRANS(msr); 665} 666 667void 668moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 669{ 670 int i, j; 671 vm_size_t physsz, hwphyssz; 672 673#ifndef __powerpc64__ 674 /* We don't have a direct map since there is no BAT */ 675 hw_direct_map = 0; 676 677 /* Make sure battable is zero, since we have no BAT */ 678 for (i = 0; i < 16; i++) { 679 battable[i].batu = 0; 680 battable[i].batl = 0; 681 } 682#else 683 moea64_probe_large_page(); 684 685 /* Use a direct map if we have large page support */ 686 if (moea64_large_page_size > 0) 687 hw_direct_map = 1; 688 else 689 hw_direct_map = 0; 690#endif 691 692 /* Get physical memory regions from firmware */ 693 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 694 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 695 696 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 697 panic("moea64_bootstrap: phys_avail too small"); 698 699 phys_avail_count = 0; 700 physsz = 0; 701 hwphyssz = 0; 702 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 703 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 704 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 705 regions[i].mr_start + regions[i].mr_size, 706 regions[i].mr_size); 707 if (hwphyssz != 0 && 708 (physsz + regions[i].mr_size) >= hwphyssz) { 709 if (physsz < hwphyssz) { 710 phys_avail[j] = regions[i].mr_start; 711 phys_avail[j + 1] = regions[i].mr_start + 712 hwphyssz - physsz; 713 physsz = hwphyssz; 714 phys_avail_count++; 715 } 716 break; 717 } 718 phys_avail[j] = regions[i].mr_start; 719 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 720 phys_avail_count++; 721 physsz += regions[i].mr_size; 722 } 723 724 /* Check for overlap with the kernel and exception vectors */ 725 for (j = 0; j < 2*phys_avail_count; j+=2) { 726 if (phys_avail[j] < EXC_LAST) 727 phys_avail[j] += EXC_LAST; 728 729 if (kernelstart >= phys_avail[j] && 730 kernelstart < phys_avail[j+1]) { 731 if (kernelend < phys_avail[j+1]) { 732 phys_avail[2*phys_avail_count] = 733 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 734 phys_avail[2*phys_avail_count + 1] = 735 phys_avail[j+1]; 736 phys_avail_count++; 737 } 738 739 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 740 } 741 742 if (kernelend >= phys_avail[j] && 743 kernelend < phys_avail[j+1]) { 744 if (kernelstart > phys_avail[j]) { 745 phys_avail[2*phys_avail_count] = phys_avail[j]; 746 phys_avail[2*phys_avail_count + 1] = 747 kernelstart & ~PAGE_MASK; 748 phys_avail_count++; 749 } 750 751 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 752 } 753 } 754 755 physmem = btoc(physsz); 756 757#ifdef PTEGCOUNT 758 moea64_pteg_count = PTEGCOUNT; 759#else 760 moea64_pteg_count = 0x1000; 761 762 while (moea64_pteg_count < physmem) 763 moea64_pteg_count <<= 1; 764 765 moea64_pteg_count >>= 1; 766#endif /* PTEGCOUNT */ 767} 768 769void 770moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 771{ 772 vm_size_t size; 773 register_t msr; 774 int i; 775 776 /* 777 * Set PTEG mask 778 */ 779 moea64_pteg_mask = moea64_pteg_count - 1; 780 781 /* 782 * Allocate pv/overflow lists. 783 */ 784 size = sizeof(struct pvo_head) * moea64_pteg_count; 785 786 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 787 PAGE_SIZE); 788 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 789 790 DISABLE_TRANS(msr); 791 for (i = 0; i < moea64_pteg_count; i++) 792 LIST_INIT(&moea64_pvo_table[i]); 793 ENABLE_TRANS(msr); 794 795 /* 796 * Initialize the lock that synchronizes access to the pteg and pvo 797 * tables. 798 */ 799 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 800 MTX_RECURSE); 801 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 802 803 /* 804 * Initialise the unmanaged pvo pool. 805 */ 806 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 807 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 808 moea64_bpvo_pool_index = 0; 809 810 /* 811 * Make sure kernel vsid is allocated as well as VSID 0. 812 */ 813 #ifndef __powerpc64__ 814 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 815 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 816 moea64_vsid_bitmap[0] |= 1; 817 #endif 818 819 /* 820 * Initialize the kernel pmap (which is statically allocated). 821 */ 822 #ifdef __powerpc64__ 823 for (i = 0; i < 64; i++) { 824 pcpup->pc_slb[i].slbv = 0; 825 pcpup->pc_slb[i].slbe = 0; 826 } 827 #else 828 for (i = 0; i < 16; i++) 829 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 830 #endif 831 832 kernel_pmap->pmap_phys = kernel_pmap; 833 CPU_FILL(&kernel_pmap->pm_active); 834 835 PMAP_LOCK_INIT(kernel_pmap); 836 837 /* 838 * Now map in all the other buffers we allocated earlier 839 */ 840 841 moea64_setup_direct_map(mmup, kernelstart, kernelend); 842} 843 844void 845moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 846{ 847 ihandle_t mmui; 848 phandle_t chosen; 849 phandle_t mmu; 850 size_t sz; 851 int i; 852 vm_offset_t pa, va; 853 void *dpcpu; 854 855 /* 856 * Set up the Open Firmware pmap and add its mappings if not in real 857 * mode. 858 */ 859 860 chosen = OF_finddevice("/chosen"); 861 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 862 mmu = OF_instance_to_package(mmui); 863 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 864 sz = 0; 865 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 866 panic("moea64_bootstrap: too many ofw translations"); 867 868 if (sz > 0) 869 moea64_add_ofw_mappings(mmup, mmu, sz); 870 } 871 872 /* 873 * Calculate the last available physical address. 874 */ 875 for (i = 0; phys_avail[i + 2] != 0; i += 2) 876 ; 877 Maxmem = powerpc_btop(phys_avail[i + 1]); 878 879 /* 880 * Initialize MMU and remap early physical mappings 881 */ 882 MMU_CPU_BOOTSTRAP(mmup,0); 883 mtmsr(mfmsr() | PSL_DR | PSL_IR); 884 pmap_bootstrapped++; 885 bs_remap_earlyboot(); 886 887 /* 888 * Set the start and end of kva. 889 */ 890 virtual_avail = VM_MIN_KERNEL_ADDRESS; 891 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 892 893 /* 894 * Map the entire KVA range into the SLB. We must not fault there. 895 */ 896 #ifdef __powerpc64__ 897 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 898 moea64_bootstrap_slb_prefault(va, 0); 899 #endif 900 901 /* 902 * Figure out how far we can extend virtual_end into segment 16 903 * without running into existing mappings. Segment 16 is guaranteed 904 * to contain neither RAM nor devices (at least on Apple hardware), 905 * but will generally contain some OFW mappings we should not 906 * step on. 907 */ 908 909 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 910 PMAP_LOCK(kernel_pmap); 911 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 912 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 913 virtual_end += PAGE_SIZE; 914 PMAP_UNLOCK(kernel_pmap); 915 #endif 916 917 /* 918 * Allocate a kernel stack with a guard page for thread0 and map it 919 * into the kernel page map. 920 */ 921 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 922 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 923 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 924 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 925 thread0.td_kstack = va; 926 thread0.td_kstack_pages = KSTACK_PAGES; 927 for (i = 0; i < KSTACK_PAGES; i++) { 928 moea64_kenter(mmup, va, pa); 929 pa += PAGE_SIZE; 930 va += PAGE_SIZE; 931 } 932 933 /* 934 * Allocate virtual address space for the message buffer. 935 */ 936 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 937 msgbufp = (struct msgbuf *)virtual_avail; 938 va = virtual_avail; 939 virtual_avail += round_page(msgbufsize); 940 while (va < virtual_avail) { 941 moea64_kenter(mmup, va, pa); 942 pa += PAGE_SIZE; 943 va += PAGE_SIZE; 944 } 945 946 /* 947 * Allocate virtual address space for the dynamic percpu area. 948 */ 949 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 950 dpcpu = (void *)virtual_avail; 951 va = virtual_avail; 952 virtual_avail += DPCPU_SIZE; 953 while (va < virtual_avail) { 954 moea64_kenter(mmup, va, pa); 955 pa += PAGE_SIZE; 956 va += PAGE_SIZE; 957 } 958 dpcpu_init(dpcpu, 0); 959 960 /* 961 * Allocate some things for page zeroing. We put this directly 962 * in the page table, marked with LPTE_LOCKED, to avoid any 963 * of the PVO book-keeping or other parts of the VM system 964 * from even knowing that this hack exists. 965 */ 966 967 if (!hw_direct_map) { 968 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 969 MTX_DEF); 970 for (i = 0; i < 2; i++) { 971 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 972 virtual_end -= PAGE_SIZE; 973 974 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 975 976 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 977 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 978 LOCK_TABLE(); 979 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 980 mmup, moea64_scratchpage_pvo[i]); 981 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 982 |= LPTE_LOCKED; 983 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 984 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 985 moea64_scratchpage_pvo[i]->pvo_vpn); 986 UNLOCK_TABLE(); 987 } 988 } 989} 990 991/* 992 * Activate a user pmap. The pmap must be activated before its address 993 * space can be accessed in any way. 994 */ 995void 996moea64_activate(mmu_t mmu, struct thread *td) 997{ 998 pmap_t pm; 999 1000 pm = &td->td_proc->p_vmspace->vm_pmap; 1001 sched_pin(); 1002 CPU_OR(&pm->pm_active, PCPU_PTR(cpumask)); 1003 sched_unpin(); 1004 1005 #ifdef __powerpc64__ 1006 PCPU_SET(userslb, pm->pm_slb); 1007 #else 1008 PCPU_SET(curpmap, pm->pmap_phys); 1009 #endif 1010} 1011 1012void 1013moea64_deactivate(mmu_t mmu, struct thread *td) 1014{ 1015 pmap_t pm; 1016 1017 pm = &td->td_proc->p_vmspace->vm_pmap; 1018 sched_pin(); 1019 CPU_NAND(&pm->pm_active, PCPU_PTR(cpumask)); 1020 sched_unpin(); 1021 #ifdef __powerpc64__ 1022 PCPU_SET(userslb, NULL); 1023 #else 1024 PCPU_SET(curpmap, NULL); 1025 #endif 1026} 1027 1028void 1029moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1030{ 1031 struct pvo_entry *pvo; 1032 uintptr_t pt; 1033 uint64_t vsid; 1034 int i, ptegidx; 1035 1036 PMAP_LOCK(pm); 1037 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1038 1039 if (pvo != NULL) { 1040 LOCK_TABLE(); 1041 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1042 1043 if (wired) { 1044 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1045 pm->pm_stats.wired_count++; 1046 pvo->pvo_vaddr |= PVO_WIRED; 1047 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1048 } else { 1049 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1050 pm->pm_stats.wired_count--; 1051 pvo->pvo_vaddr &= ~PVO_WIRED; 1052 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1053 } 1054 1055 if (pt != -1) { 1056 /* Update wiring flag in page table. */ 1057 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1058 pvo->pvo_vpn); 1059 } else if (wired) { 1060 /* 1061 * If we are wiring the page, and it wasn't in the 1062 * page table before, add it. 1063 */ 1064 vsid = PVO_VSID(pvo); 1065 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1066 pvo->pvo_vaddr & PVO_LARGE); 1067 1068 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1069 1070 if (i >= 0) { 1071 PVO_PTEGIDX_CLR(pvo); 1072 PVO_PTEGIDX_SET(pvo, i); 1073 } 1074 } 1075 1076 UNLOCK_TABLE(); 1077 } 1078 PMAP_UNLOCK(pm); 1079} 1080 1081/* 1082 * This goes through and sets the physical address of our 1083 * special scratch PTE to the PA we want to zero or copy. Because 1084 * of locking issues (this can get called in pvo_enter() by 1085 * the UMA allocator), we can't use most other utility functions here 1086 */ 1087 1088static __inline 1089void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1090 1091 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1092 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1093 1094 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1095 ~(LPTE_WIMG | LPTE_RPGN); 1096 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1097 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1098 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1099 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1100 moea64_scratchpage_pvo[which]->pvo_vpn); 1101 isync(); 1102} 1103 1104void 1105moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1106{ 1107 vm_offset_t dst; 1108 vm_offset_t src; 1109 1110 dst = VM_PAGE_TO_PHYS(mdst); 1111 src = VM_PAGE_TO_PHYS(msrc); 1112 1113 if (hw_direct_map) { 1114 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1115 } else { 1116 mtx_lock(&moea64_scratchpage_mtx); 1117 1118 moea64_set_scratchpage_pa(mmu, 0, src); 1119 moea64_set_scratchpage_pa(mmu, 1, dst); 1120 1121 kcopy((void *)moea64_scratchpage_va[0], 1122 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1123 1124 mtx_unlock(&moea64_scratchpage_mtx); 1125 } 1126} 1127 1128void 1129moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1130{ 1131 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1132 1133 if (size + off > PAGE_SIZE) 1134 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1135 1136 if (hw_direct_map) { 1137 bzero((caddr_t)pa + off, size); 1138 } else { 1139 mtx_lock(&moea64_scratchpage_mtx); 1140 moea64_set_scratchpage_pa(mmu, 0, pa); 1141 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1142 mtx_unlock(&moea64_scratchpage_mtx); 1143 } 1144} 1145 1146/* 1147 * Zero a page of physical memory by temporarily mapping it 1148 */ 1149void 1150moea64_zero_page(mmu_t mmu, vm_page_t m) 1151{ 1152 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1153 vm_offset_t va, off; 1154 1155 if (!hw_direct_map) { 1156 mtx_lock(&moea64_scratchpage_mtx); 1157 1158 moea64_set_scratchpage_pa(mmu, 0, pa); 1159 va = moea64_scratchpage_va[0]; 1160 } else { 1161 va = pa; 1162 } 1163 1164 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1165 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1166 1167 if (!hw_direct_map) 1168 mtx_unlock(&moea64_scratchpage_mtx); 1169} 1170 1171void 1172moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1173{ 1174 1175 moea64_zero_page(mmu, m); 1176} 1177 1178/* 1179 * Map the given physical page at the specified virtual address in the 1180 * target pmap with the protection requested. If specified the page 1181 * will be wired down. 1182 */ 1183void 1184moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1185 vm_prot_t prot, boolean_t wired) 1186{ 1187 1188 vm_page_lock_queues(); 1189 PMAP_LOCK(pmap); 1190 moea64_enter_locked(mmu, pmap, va, m, prot, wired); 1191 vm_page_unlock_queues(); 1192 PMAP_UNLOCK(pmap); 1193} 1194 1195/* 1196 * Map the given physical page at the specified virtual address in the 1197 * target pmap with the protection requested. If specified the page 1198 * will be wired down. 1199 * 1200 * The page queues and pmap must be locked. 1201 */ 1202 1203static void 1204moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1205 vm_prot_t prot, boolean_t wired) 1206{ 1207 struct pvo_head *pvo_head; 1208 uma_zone_t zone; 1209 vm_page_t pg; 1210 uint64_t pte_lo; 1211 u_int pvo_flags; 1212 int error; 1213 1214 if (!moea64_initialized) { 1215 pvo_head = &moea64_pvo_kunmanaged; 1216 pg = NULL; 1217 zone = moea64_upvo_zone; 1218 pvo_flags = 0; 1219 } else { 1220 pvo_head = vm_page_to_pvoh(m); 1221 pg = m; 1222 zone = moea64_mpvo_zone; 1223 pvo_flags = PVO_MANAGED; 1224 } 1225 1226 if (pmap_bootstrapped) 1227 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1228 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1229 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1230 (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1231 ("moea64_enter_locked: page %p is not busy", m)); 1232 1233 /* XXX change the pvo head for fake pages */ 1234 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1235 pvo_flags &= ~PVO_MANAGED; 1236 pvo_head = &moea64_pvo_kunmanaged; 1237 zone = moea64_upvo_zone; 1238 } 1239 1240 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1241 1242 if (prot & VM_PROT_WRITE) { 1243 pte_lo |= LPTE_BW; 1244 if (pmap_bootstrapped && 1245 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1246 vm_page_flag_set(m, PG_WRITEABLE); 1247 } else 1248 pte_lo |= LPTE_BR; 1249 1250 if ((prot & VM_PROT_EXECUTE) == 0) 1251 pte_lo |= LPTE_NOEXEC; 1252 1253 if (wired) 1254 pvo_flags |= PVO_WIRED; 1255 1256 if ((m->flags & PG_FICTITIOUS) != 0) 1257 pvo_flags |= PVO_FAKE; 1258 1259 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1260 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1261 1262 /* 1263 * Flush the page from the instruction cache if this page is 1264 * mapped executable and cacheable. 1265 */ 1266 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) 1267 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1268} 1269 1270static void 1271moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1272 vm_size_t sz) 1273{ 1274 1275 /* 1276 * This is much trickier than on older systems because 1277 * we can't sync the icache on physical addresses directly 1278 * without a direct map. Instead we check a couple of cases 1279 * where the memory is already mapped in and, failing that, 1280 * use the same trick we use for page zeroing to create 1281 * a temporary mapping for this physical address. 1282 */ 1283 1284 if (!pmap_bootstrapped) { 1285 /* 1286 * If PMAP is not bootstrapped, we are likely to be 1287 * in real mode. 1288 */ 1289 __syncicache((void *)pa, sz); 1290 } else if (pmap == kernel_pmap) { 1291 __syncicache((void *)va, sz); 1292 } else if (hw_direct_map) { 1293 __syncicache((void *)pa, sz); 1294 } else { 1295 /* Use the scratch page to set up a temp mapping */ 1296 1297 mtx_lock(&moea64_scratchpage_mtx); 1298 1299 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1300 __syncicache((void *)(moea64_scratchpage_va[1] + 1301 (va & ADDR_POFF)), sz); 1302 1303 mtx_unlock(&moea64_scratchpage_mtx); 1304 } 1305} 1306 1307/* 1308 * Maps a sequence of resident pages belonging to the same object. 1309 * The sequence begins with the given page m_start. This page is 1310 * mapped at the given virtual address start. Each subsequent page is 1311 * mapped at a virtual address that is offset from start by the same 1312 * amount as the page is offset from m_start within the object. The 1313 * last page in the sequence is the page with the largest offset from 1314 * m_start that can be mapped at a virtual address less than the given 1315 * virtual address end. Not every virtual page between start and end 1316 * is mapped; only those for which a resident page exists with the 1317 * corresponding offset from m_start are mapped. 1318 */ 1319void 1320moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1321 vm_page_t m_start, vm_prot_t prot) 1322{ 1323 vm_page_t m; 1324 vm_pindex_t diff, psize; 1325 1326 psize = atop(end - start); 1327 m = m_start; 1328 vm_page_lock_queues(); 1329 PMAP_LOCK(pm); 1330 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1331 moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot & 1332 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1333 m = TAILQ_NEXT(m, listq); 1334 } 1335 vm_page_unlock_queues(); 1336 PMAP_UNLOCK(pm); 1337} 1338 1339void 1340moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1341 vm_prot_t prot) 1342{ 1343 1344 vm_page_lock_queues(); 1345 PMAP_LOCK(pm); 1346 moea64_enter_locked(mmu, pm, va, m, 1347 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1348 vm_page_unlock_queues(); 1349 PMAP_UNLOCK(pm); 1350} 1351 1352vm_paddr_t 1353moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1354{ 1355 struct pvo_entry *pvo; 1356 vm_paddr_t pa; 1357 1358 PMAP_LOCK(pm); 1359 pvo = moea64_pvo_find_va(pm, va); 1360 if (pvo == NULL) 1361 pa = 0; 1362 else 1363 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1364 (va - PVO_VADDR(pvo)); 1365 PMAP_UNLOCK(pm); 1366 return (pa); 1367} 1368 1369/* 1370 * Atomically extract and hold the physical page with the given 1371 * pmap and virtual address pair if that mapping permits the given 1372 * protection. 1373 */ 1374vm_page_t 1375moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1376{ 1377 struct pvo_entry *pvo; 1378 vm_page_t m; 1379 vm_paddr_t pa; 1380 1381 m = NULL; 1382 pa = 0; 1383 PMAP_LOCK(pmap); 1384retry: 1385 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1386 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1387 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1388 (prot & VM_PROT_WRITE) == 0)) { 1389 if (vm_page_pa_tryrelock(pmap, 1390 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1391 goto retry; 1392 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1393 vm_page_hold(m); 1394 } 1395 PA_UNLOCK_COND(pa); 1396 PMAP_UNLOCK(pmap); 1397 return (m); 1398} 1399 1400static mmu_t installed_mmu; 1401 1402static void * 1403moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1404{ 1405 /* 1406 * This entire routine is a horrible hack to avoid bothering kmem 1407 * for new KVA addresses. Because this can get called from inside 1408 * kmem allocation routines, calling kmem for a new address here 1409 * can lead to multiply locking non-recursive mutexes. 1410 */ 1411 static vm_pindex_t color; 1412 vm_offset_t va; 1413 1414 vm_page_t m; 1415 int pflags, needed_lock; 1416 1417 *flags = UMA_SLAB_PRIV; 1418 needed_lock = !PMAP_LOCKED(kernel_pmap); 1419 1420 if (needed_lock) 1421 PMAP_LOCK(kernel_pmap); 1422 1423 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1424 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1425 else 1426 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1427 if (wait & M_ZERO) 1428 pflags |= VM_ALLOC_ZERO; 1429 1430 for (;;) { 1431 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1432 if (m == NULL) { 1433 if (wait & M_NOWAIT) 1434 return (NULL); 1435 VM_WAIT; 1436 } else 1437 break; 1438 } 1439 1440 va = VM_PAGE_TO_PHYS(m); 1441 1442 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1443 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1444 PVO_WIRED | PVO_BOOTSTRAP); 1445 1446 if (needed_lock) 1447 PMAP_UNLOCK(kernel_pmap); 1448 1449 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1450 bzero((void *)va, PAGE_SIZE); 1451 1452 return (void *)va; 1453} 1454 1455void 1456moea64_init(mmu_t mmu) 1457{ 1458 1459 CTR0(KTR_PMAP, "moea64_init"); 1460 1461 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1462 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1463 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1464 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1465 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1466 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1467 1468 if (!hw_direct_map) { 1469 installed_mmu = mmu; 1470 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1471 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1472 } 1473 1474 moea64_initialized = TRUE; 1475} 1476 1477boolean_t 1478moea64_is_referenced(mmu_t mmu, vm_page_t m) 1479{ 1480 1481 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1482 ("moea64_is_referenced: page %p is not managed", m)); 1483 return (moea64_query_bit(mmu, m, PTE_REF)); 1484} 1485 1486boolean_t 1487moea64_is_modified(mmu_t mmu, vm_page_t m) 1488{ 1489 1490 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1491 ("moea64_is_modified: page %p is not managed", m)); 1492 1493 /* 1494 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1495 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1496 * is clear, no PTEs can have LPTE_CHG set. 1497 */ 1498 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1499 if ((m->oflags & VPO_BUSY) == 0 && 1500 (m->flags & PG_WRITEABLE) == 0) 1501 return (FALSE); 1502 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1503} 1504 1505boolean_t 1506moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1507{ 1508 struct pvo_entry *pvo; 1509 boolean_t rv; 1510 1511 PMAP_LOCK(pmap); 1512 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1513 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1514 PMAP_UNLOCK(pmap); 1515 return (rv); 1516} 1517 1518void 1519moea64_clear_reference(mmu_t mmu, vm_page_t m) 1520{ 1521 1522 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1523 ("moea64_clear_reference: page %p is not managed", m)); 1524 moea64_clear_bit(mmu, m, LPTE_REF); 1525} 1526 1527void 1528moea64_clear_modify(mmu_t mmu, vm_page_t m) 1529{ 1530 1531 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1532 ("moea64_clear_modify: page %p is not managed", m)); 1533 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1534 KASSERT((m->oflags & VPO_BUSY) == 0, 1535 ("moea64_clear_modify: page %p is busy", m)); 1536 1537 /* 1538 * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG 1539 * set. If the object containing the page is locked and the page is 1540 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1541 */ 1542 if ((m->flags & PG_WRITEABLE) == 0) 1543 return; 1544 moea64_clear_bit(mmu, m, LPTE_CHG); 1545} 1546 1547/* 1548 * Clear the write and modified bits in each of the given page's mappings. 1549 */ 1550void 1551moea64_remove_write(mmu_t mmu, vm_page_t m) 1552{ 1553 struct pvo_entry *pvo; 1554 uintptr_t pt; 1555 pmap_t pmap; 1556 uint64_t lo; 1557 1558 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1559 ("moea64_remove_write: page %p is not managed", m)); 1560 1561 /* 1562 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1563 * another thread while the object is locked. Thus, if PG_WRITEABLE 1564 * is clear, no page table entries need updating. 1565 */ 1566 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1567 if ((m->oflags & VPO_BUSY) == 0 && 1568 (m->flags & PG_WRITEABLE) == 0) 1569 return; 1570 vm_page_lock_queues(); 1571 lo = moea64_attr_fetch(m); 1572 powerpc_sync(); 1573 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1574 pmap = pvo->pvo_pmap; 1575 PMAP_LOCK(pmap); 1576 LOCK_TABLE(); 1577 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1578 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1579 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1580 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1581 if (pt != -1) { 1582 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1583 lo |= pvo->pvo_pte.lpte.pte_lo; 1584 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1585 MOEA64_PTE_CHANGE(mmu, pt, 1586 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1587 if (pvo->pvo_pmap == kernel_pmap) 1588 isync(); 1589 } 1590 } 1591 UNLOCK_TABLE(); 1592 PMAP_UNLOCK(pmap); 1593 } 1594 if ((lo & LPTE_CHG) != 0) { 1595 moea64_attr_clear(m, LPTE_CHG); 1596 vm_page_dirty(m); 1597 } 1598 vm_page_flag_clear(m, PG_WRITEABLE); 1599 vm_page_unlock_queues(); 1600} 1601 1602/* 1603 * moea64_ts_referenced: 1604 * 1605 * Return a count of reference bits for a page, clearing those bits. 1606 * It is not necessary for every reference bit to be cleared, but it 1607 * is necessary that 0 only be returned when there are truly no 1608 * reference bits set. 1609 * 1610 * XXX: The exact number of bits to check and clear is a matter that 1611 * should be tested and standardized at some point in the future for 1612 * optimal aging of shared pages. 1613 */ 1614boolean_t 1615moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1616{ 1617 1618 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1619 ("moea64_ts_referenced: page %p is not managed", m)); 1620 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1621} 1622 1623/* 1624 * Modify the WIMG settings of all mappings for a page. 1625 */ 1626void 1627moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1628{ 1629 struct pvo_entry *pvo; 1630 struct pvo_head *pvo_head; 1631 uintptr_t pt; 1632 pmap_t pmap; 1633 uint64_t lo; 1634 1635 if (m->flags & PG_FICTITIOUS) { 1636 m->md.mdpg_cache_attrs = ma; 1637 return; 1638 } 1639 1640 vm_page_lock_queues(); 1641 pvo_head = vm_page_to_pvoh(m); 1642 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1643 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1644 pmap = pvo->pvo_pmap; 1645 PMAP_LOCK(pmap); 1646 LOCK_TABLE(); 1647 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1648 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1649 pvo->pvo_pte.lpte.pte_lo |= lo; 1650 if (pt != -1) { 1651 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1652 pvo->pvo_vpn); 1653 if (pvo->pvo_pmap == kernel_pmap) 1654 isync(); 1655 } 1656 UNLOCK_TABLE(); 1657 PMAP_UNLOCK(pmap); 1658 } 1659 m->md.mdpg_cache_attrs = ma; 1660 vm_page_unlock_queues(); 1661} 1662 1663/* 1664 * Map a wired page into kernel virtual address space. 1665 */ 1666void 1667moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1668{ 1669 uint64_t pte_lo; 1670 int error; 1671 1672 pte_lo = moea64_calc_wimg(pa, ma); 1673 1674 PMAP_LOCK(kernel_pmap); 1675 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1676 &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1677 1678 if (error != 0 && error != ENOENT) 1679 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1680 pa, error); 1681 1682 /* 1683 * Flush the memory from the instruction cache. 1684 */ 1685 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) 1686 __syncicache((void *)va, PAGE_SIZE); 1687 PMAP_UNLOCK(kernel_pmap); 1688} 1689 1690void 1691moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1692{ 1693 1694 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1695} 1696 1697/* 1698 * Extract the physical page address associated with the given kernel virtual 1699 * address. 1700 */ 1701vm_offset_t 1702moea64_kextract(mmu_t mmu, vm_offset_t va) 1703{ 1704 struct pvo_entry *pvo; 1705 vm_paddr_t pa; 1706 1707 /* 1708 * Shortcut the direct-mapped case when applicable. We never put 1709 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1710 */ 1711 if (va < VM_MIN_KERNEL_ADDRESS) 1712 return (va); 1713 1714 PMAP_LOCK(kernel_pmap); 1715 pvo = moea64_pvo_find_va(kernel_pmap, va); 1716 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1717 va)); 1718 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) + (va - PVO_VADDR(pvo)); 1719 PMAP_UNLOCK(kernel_pmap); 1720 return (pa); 1721} 1722 1723/* 1724 * Remove a wired page from kernel virtual address space. 1725 */ 1726void 1727moea64_kremove(mmu_t mmu, vm_offset_t va) 1728{ 1729 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1730} 1731 1732/* 1733 * Map a range of physical addresses into kernel virtual address space. 1734 * 1735 * The value passed in *virt is a suggested virtual address for the mapping. 1736 * Architectures which can support a direct-mapped physical to virtual region 1737 * can return the appropriate address within that region, leaving '*virt' 1738 * unchanged. We cannot and therefore do not; *virt is updated with the 1739 * first usable address after the mapped region. 1740 */ 1741vm_offset_t 1742moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1743 vm_offset_t pa_end, int prot) 1744{ 1745 vm_offset_t sva, va; 1746 1747 sva = *virt; 1748 va = sva; 1749 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1750 moea64_kenter(mmu, va, pa_start); 1751 *virt = va; 1752 1753 return (sva); 1754} 1755 1756/* 1757 * Returns true if the pmap's pv is one of the first 1758 * 16 pvs linked to from this page. This count may 1759 * be changed upwards or downwards in the future; it 1760 * is only necessary that true be returned for a small 1761 * subset of pmaps for proper page aging. 1762 */ 1763boolean_t 1764moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1765{ 1766 int loops; 1767 struct pvo_entry *pvo; 1768 boolean_t rv; 1769 1770 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1771 ("moea64_page_exists_quick: page %p is not managed", m)); 1772 loops = 0; 1773 rv = FALSE; 1774 vm_page_lock_queues(); 1775 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1776 if (pvo->pvo_pmap == pmap) { 1777 rv = TRUE; 1778 break; 1779 } 1780 if (++loops >= 16) 1781 break; 1782 } 1783 vm_page_unlock_queues(); 1784 return (rv); 1785} 1786 1787/* 1788 * Return the number of managed mappings to the given physical page 1789 * that are wired. 1790 */ 1791int 1792moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1793{ 1794 struct pvo_entry *pvo; 1795 int count; 1796 1797 count = 0; 1798 if ((m->flags & PG_FICTITIOUS) != 0) 1799 return (count); 1800 vm_page_lock_queues(); 1801 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1802 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1803 count++; 1804 vm_page_unlock_queues(); 1805 return (count); 1806} 1807 1808static uintptr_t moea64_vsidcontext; 1809 1810uintptr_t 1811moea64_get_unique_vsid(void) { 1812 u_int entropy; 1813 register_t hash; 1814 uint32_t mask; 1815 int i; 1816 1817 entropy = 0; 1818 __asm __volatile("mftb %0" : "=r"(entropy)); 1819 1820 mtx_lock(&moea64_slb_mutex); 1821 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1822 u_int n; 1823 1824 /* 1825 * Create a new value by mutiplying by a prime and adding in 1826 * entropy from the timebase register. This is to make the 1827 * VSID more random so that the PT hash function collides 1828 * less often. (Note that the prime casues gcc to do shifts 1829 * instead of a multiply.) 1830 */ 1831 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1832 hash = moea64_vsidcontext & (NVSIDS - 1); 1833 if (hash == 0) /* 0 is special, avoid it */ 1834 continue; 1835 n = hash >> 5; 1836 mask = 1 << (hash & (VSID_NBPW - 1)); 1837 hash = (moea64_vsidcontext & VSID_HASHMASK); 1838 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1839 /* anything free in this bucket? */ 1840 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1841 entropy = (moea64_vsidcontext >> 20); 1842 continue; 1843 } 1844 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1845 mask = 1 << i; 1846 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1847 hash |= i; 1848 } 1849 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1850 ("Allocating in-use VSID %#zx\n", hash)); 1851 moea64_vsid_bitmap[n] |= mask; 1852 mtx_unlock(&moea64_slb_mutex); 1853 return (hash); 1854 } 1855 1856 mtx_unlock(&moea64_slb_mutex); 1857 panic("%s: out of segments",__func__); 1858} 1859 1860#ifdef __powerpc64__ 1861void 1862moea64_pinit(mmu_t mmu, pmap_t pmap) 1863{ 1864 PMAP_LOCK_INIT(pmap); 1865 1866 pmap->pm_slb_tree_root = slb_alloc_tree(); 1867 pmap->pm_slb = slb_alloc_user_cache(); 1868 pmap->pm_slb_len = 0; 1869} 1870#else 1871void 1872moea64_pinit(mmu_t mmu, pmap_t pmap) 1873{ 1874 int i; 1875 uint32_t hash; 1876 1877 PMAP_LOCK_INIT(pmap); 1878 1879 if (pmap_bootstrapped) 1880 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1881 (vm_offset_t)pmap); 1882 else 1883 pmap->pmap_phys = pmap; 1884 1885 /* 1886 * Allocate some segment registers for this pmap. 1887 */ 1888 hash = moea64_get_unique_vsid(); 1889 1890 for (i = 0; i < 16; i++) 1891 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1892 1893 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1894} 1895#endif 1896 1897/* 1898 * Initialize the pmap associated with process 0. 1899 */ 1900void 1901moea64_pinit0(mmu_t mmu, pmap_t pm) 1902{ 1903 moea64_pinit(mmu, pm); 1904 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1905} 1906 1907/* 1908 * Set the physical protection on the specified range of this map as requested. 1909 */ 1910void 1911moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1912 vm_prot_t prot) 1913{ 1914 struct pvo_entry *pvo; 1915 uintptr_t pt; 1916 1917 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1918 eva, prot); 1919 1920 1921 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1922 ("moea64_protect: non current pmap")); 1923 1924 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1925 moea64_remove(mmu, pm, sva, eva); 1926 return; 1927 } 1928 1929 vm_page_lock_queues(); 1930 PMAP_LOCK(pm); 1931 for (; sva < eva; sva += PAGE_SIZE) { 1932 pvo = moea64_pvo_find_va(pm, sva); 1933 if (pvo == NULL) 1934 continue; 1935 1936 /* 1937 * Grab the PTE pointer before we diddle with the cached PTE 1938 * copy. 1939 */ 1940 LOCK_TABLE(); 1941 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1942 1943 /* 1944 * Change the protection of the page. 1945 */ 1946 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1947 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1948 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1949 if ((prot & VM_PROT_EXECUTE) == 0) 1950 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1951 1952 /* 1953 * If the PVO is in the page table, update that pte as well. 1954 */ 1955 if (pt != -1) { 1956 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1957 pvo->pvo_vpn); 1958 if ((pvo->pvo_pte.lpte.pte_lo & 1959 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1960 moea64_syncicache(mmu, pm, sva, 1961 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1962 PAGE_SIZE); 1963 } 1964 } 1965 UNLOCK_TABLE(); 1966 } 1967 vm_page_unlock_queues(); 1968 PMAP_UNLOCK(pm); 1969} 1970 1971/* 1972 * Map a list of wired pages into kernel virtual address space. This is 1973 * intended for temporary mappings which do not need page modification or 1974 * references recorded. Existing mappings in the region are overwritten. 1975 */ 1976void 1977moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1978{ 1979 while (count-- > 0) { 1980 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1981 va += PAGE_SIZE; 1982 m++; 1983 } 1984} 1985 1986/* 1987 * Remove page mappings from kernel virtual address space. Intended for 1988 * temporary mappings entered by moea64_qenter. 1989 */ 1990void 1991moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1992{ 1993 while (count-- > 0) { 1994 moea64_kremove(mmu, va); 1995 va += PAGE_SIZE; 1996 } 1997} 1998 1999void 2000moea64_release_vsid(uint64_t vsid) 2001{ 2002 int idx, mask; 2003 2004 mtx_lock(&moea64_slb_mutex); 2005 idx = vsid & (NVSIDS-1); 2006 mask = 1 << (idx % VSID_NBPW); 2007 idx /= VSID_NBPW; 2008 KASSERT(moea64_vsid_bitmap[idx] & mask, 2009 ("Freeing unallocated VSID %#jx", vsid)); 2010 moea64_vsid_bitmap[idx] &= ~mask; 2011 mtx_unlock(&moea64_slb_mutex); 2012} 2013 2014 2015void 2016moea64_release(mmu_t mmu, pmap_t pmap) 2017{ 2018 2019 /* 2020 * Free segment registers' VSIDs 2021 */ 2022 #ifdef __powerpc64__ 2023 slb_free_tree(pmap); 2024 slb_free_user_cache(pmap->pm_slb); 2025 #else 2026 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2027 2028 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2029 #endif 2030 2031 PMAP_LOCK_DESTROY(pmap); 2032} 2033 2034/* 2035 * Remove the given range of addresses from the specified map. 2036 */ 2037void 2038moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2039{ 2040 struct pvo_entry *pvo; 2041 2042 vm_page_lock_queues(); 2043 PMAP_LOCK(pm); 2044 for (; sva < eva; sva += PAGE_SIZE) { 2045 pvo = moea64_pvo_find_va(pm, sva); 2046 if (pvo != NULL) 2047 moea64_pvo_remove(mmu, pvo); 2048 } 2049 vm_page_unlock_queues(); 2050 PMAP_UNLOCK(pm); 2051} 2052 2053/* 2054 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2055 * will reflect changes in pte's back to the vm_page. 2056 */ 2057void 2058moea64_remove_all(mmu_t mmu, vm_page_t m) 2059{ 2060 struct pvo_head *pvo_head; 2061 struct pvo_entry *pvo, *next_pvo; 2062 pmap_t pmap; 2063 2064 vm_page_lock_queues(); 2065 pvo_head = vm_page_to_pvoh(m); 2066 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2067 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2068 2069 pmap = pvo->pvo_pmap; 2070 PMAP_LOCK(pmap); 2071 moea64_pvo_remove(mmu, pvo); 2072 PMAP_UNLOCK(pmap); 2073 } 2074 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 2075 moea64_attr_clear(m, LPTE_CHG); 2076 vm_page_dirty(m); 2077 } 2078 vm_page_flag_clear(m, PG_WRITEABLE); 2079 vm_page_unlock_queues(); 2080} 2081 2082/* 2083 * Allocate a physical page of memory directly from the phys_avail map. 2084 * Can only be called from moea64_bootstrap before avail start and end are 2085 * calculated. 2086 */ 2087vm_offset_t 2088moea64_bootstrap_alloc(vm_size_t size, u_int align) 2089{ 2090 vm_offset_t s, e; 2091 int i, j; 2092 2093 size = round_page(size); 2094 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2095 if (align != 0) 2096 s = (phys_avail[i] + align - 1) & ~(align - 1); 2097 else 2098 s = phys_avail[i]; 2099 e = s + size; 2100 2101 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2102 continue; 2103 2104 if (s + size > platform_real_maxaddr()) 2105 continue; 2106 2107 if (s == phys_avail[i]) { 2108 phys_avail[i] += size; 2109 } else if (e == phys_avail[i + 1]) { 2110 phys_avail[i + 1] -= size; 2111 } else { 2112 for (j = phys_avail_count * 2; j > i; j -= 2) { 2113 phys_avail[j] = phys_avail[j - 2]; 2114 phys_avail[j + 1] = phys_avail[j - 1]; 2115 } 2116 2117 phys_avail[i + 3] = phys_avail[i + 1]; 2118 phys_avail[i + 1] = s; 2119 phys_avail[i + 2] = e; 2120 phys_avail_count++; 2121 } 2122 2123 return (s); 2124 } 2125 panic("moea64_bootstrap_alloc: could not allocate memory"); 2126} 2127 2128static int 2129moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2130 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2131 uint64_t pte_lo, int flags) 2132{ 2133 struct pvo_entry *pvo; 2134 uint64_t vsid; 2135 int first; 2136 u_int ptegidx; 2137 int i; 2138 int bootstrap; 2139 2140 /* 2141 * One nasty thing that can happen here is that the UMA calls to 2142 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2143 * which calls UMA... 2144 * 2145 * We break the loop by detecting recursion and allocating out of 2146 * the bootstrap pool. 2147 */ 2148 2149 first = 0; 2150 bootstrap = (flags & PVO_BOOTSTRAP); 2151 2152 if (!moea64_initialized) 2153 bootstrap = 1; 2154 2155 /* 2156 * Compute the PTE Group index. 2157 */ 2158 va &= ~ADDR_POFF; 2159 vsid = va_to_vsid(pm, va); 2160 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2161 2162 /* 2163 * Remove any existing mapping for this page. Reuse the pvo entry if 2164 * there is a mapping. 2165 */ 2166 LOCK_TABLE(); 2167 2168 moea64_pvo_enter_calls++; 2169 2170 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2171 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2172 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2173 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2174 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2175 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2176 /* Re-insert if spilled */ 2177 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2178 &pvo->pvo_pte.lpte); 2179 if (i >= 0) 2180 PVO_PTEGIDX_SET(pvo, i); 2181 moea64_pte_overflow--; 2182 } 2183 UNLOCK_TABLE(); 2184 return (0); 2185 } 2186 moea64_pvo_remove(mmu, pvo); 2187 break; 2188 } 2189 } 2190 2191 /* 2192 * If we aren't overwriting a mapping, try to allocate. 2193 */ 2194 if (bootstrap) { 2195 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2196 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2197 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2198 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2199 } 2200 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2201 moea64_bpvo_pool_index++; 2202 bootstrap = 1; 2203 } else { 2204 /* 2205 * Note: drop the table lock around the UMA allocation in 2206 * case the UMA allocator needs to manipulate the page 2207 * table. The mapping we are working with is already 2208 * protected by the PMAP lock. 2209 */ 2210 UNLOCK_TABLE(); 2211 pvo = uma_zalloc(zone, M_NOWAIT); 2212 LOCK_TABLE(); 2213 } 2214 2215 if (pvo == NULL) { 2216 UNLOCK_TABLE(); 2217 return (ENOMEM); 2218 } 2219 2220 moea64_pvo_entries++; 2221 pvo->pvo_vaddr = va; 2222 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2223 | (vsid << 16); 2224 pvo->pvo_pmap = pm; 2225 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2226 pvo->pvo_vaddr &= ~ADDR_POFF; 2227 2228 if (flags & PVO_WIRED) 2229 pvo->pvo_vaddr |= PVO_WIRED; 2230 if (pvo_head != &moea64_pvo_kunmanaged) 2231 pvo->pvo_vaddr |= PVO_MANAGED; 2232 if (bootstrap) 2233 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2234 if (flags & PVO_FAKE) 2235 pvo->pvo_vaddr |= PVO_FAKE; 2236 if (flags & PVO_LARGE) 2237 pvo->pvo_vaddr |= PVO_LARGE; 2238 2239 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2240 (uint64_t)(pa) | pte_lo, flags); 2241 2242 /* 2243 * Remember if the list was empty and therefore will be the first 2244 * item. 2245 */ 2246 if (LIST_FIRST(pvo_head) == NULL) 2247 first = 1; 2248 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2249 2250 if (pvo->pvo_vaddr & PVO_WIRED) { 2251 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2252 pm->pm_stats.wired_count++; 2253 } 2254 pm->pm_stats.resident_count++; 2255 2256 /* 2257 * We hope this succeeds but it isn't required. 2258 */ 2259 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2260 if (i >= 0) { 2261 PVO_PTEGIDX_SET(pvo, i); 2262 } else { 2263 panic("moea64_pvo_enter: overflow"); 2264 moea64_pte_overflow++; 2265 } 2266 2267 if (pm == kernel_pmap) 2268 isync(); 2269 2270 UNLOCK_TABLE(); 2271 2272#ifdef __powerpc64__ 2273 /* 2274 * Make sure all our bootstrap mappings are in the SLB as soon 2275 * as virtual memory is switched on. 2276 */ 2277 if (!pmap_bootstrapped) 2278 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2279#endif 2280 2281 return (first ? ENOENT : 0); 2282} 2283 2284static void 2285moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2286{ 2287 uintptr_t pt; 2288 2289 /* 2290 * If there is an active pte entry, we need to deactivate it (and 2291 * save the ref & cfg bits). 2292 */ 2293 LOCK_TABLE(); 2294 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2295 if (pt != -1) { 2296 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2297 PVO_PTEGIDX_CLR(pvo); 2298 } else { 2299 moea64_pte_overflow--; 2300 } 2301 2302 /* 2303 * Update our statistics. 2304 */ 2305 pvo->pvo_pmap->pm_stats.resident_count--; 2306 if (pvo->pvo_vaddr & PVO_WIRED) 2307 pvo->pvo_pmap->pm_stats.wired_count--; 2308 2309 /* 2310 * Save the REF/CHG bits into their cache if the page is managed. 2311 */ 2312 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2313 struct vm_page *pg; 2314 2315 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2316 if (pg != NULL) { 2317 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2318 (LPTE_REF | LPTE_CHG)); 2319 } 2320 } 2321 2322 /* 2323 * Remove this PVO from the PV list. 2324 */ 2325 LIST_REMOVE(pvo, pvo_vlink); 2326 2327 /* 2328 * Remove this from the overflow list and return it to the pool 2329 * if we aren't going to reuse it. 2330 */ 2331 LIST_REMOVE(pvo, pvo_olink); 2332 2333 moea64_pvo_entries--; 2334 moea64_pvo_remove_calls++; 2335 2336 UNLOCK_TABLE(); 2337 2338 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2339 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2340 moea64_upvo_zone, pvo); 2341} 2342 2343static struct pvo_entry * 2344moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2345{ 2346 struct pvo_entry *pvo; 2347 int ptegidx; 2348 uint64_t vsid; 2349 #ifdef __powerpc64__ 2350 uint64_t slbv; 2351 2352 if (pm == kernel_pmap) { 2353 slbv = kernel_va_to_slbv(va); 2354 } else { 2355 struct slb *slb; 2356 slb = user_va_to_slb_entry(pm, va); 2357 /* The page is not mapped if the segment isn't */ 2358 if (slb == NULL) 2359 return NULL; 2360 slbv = slb->slbv; 2361 } 2362 2363 vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 2364 if (slbv & SLBV_L) 2365 va &= ~moea64_large_page_mask; 2366 else 2367 va &= ~ADDR_POFF; 2368 ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L); 2369 #else 2370 va &= ~ADDR_POFF; 2371 vsid = va_to_vsid(pm, va); 2372 ptegidx = va_to_pteg(vsid, va, 0); 2373 #endif 2374 2375 LOCK_TABLE(); 2376 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2377 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) 2378 break; 2379 } 2380 UNLOCK_TABLE(); 2381 2382 return (pvo); 2383} 2384 2385static boolean_t 2386moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2387{ 2388 struct pvo_entry *pvo; 2389 uintptr_t pt; 2390 2391 if (moea64_attr_fetch(m) & ptebit) 2392 return (TRUE); 2393 2394 vm_page_lock_queues(); 2395 2396 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2397 2398 /* 2399 * See if we saved the bit off. If so, cache it and return 2400 * success. 2401 */ 2402 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2403 moea64_attr_save(m, ptebit); 2404 vm_page_unlock_queues(); 2405 return (TRUE); 2406 } 2407 } 2408 2409 /* 2410 * No luck, now go through the hard part of looking at the PTEs 2411 * themselves. Sync so that any pending REF/CHG bits are flushed to 2412 * the PTEs. 2413 */ 2414 powerpc_sync(); 2415 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2416 2417 /* 2418 * See if this pvo has a valid PTE. if so, fetch the 2419 * REF/CHG bits from the valid PTE. If the appropriate 2420 * ptebit is set, cache it and return success. 2421 */ 2422 LOCK_TABLE(); 2423 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2424 if (pt != -1) { 2425 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2426 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2427 UNLOCK_TABLE(); 2428 2429 moea64_attr_save(m, ptebit); 2430 vm_page_unlock_queues(); 2431 return (TRUE); 2432 } 2433 } 2434 UNLOCK_TABLE(); 2435 } 2436 2437 vm_page_unlock_queues(); 2438 return (FALSE); 2439} 2440 2441static u_int 2442moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2443{ 2444 u_int count; 2445 struct pvo_entry *pvo; 2446 uintptr_t pt; 2447 2448 vm_page_lock_queues(); 2449 2450 /* 2451 * Clear the cached value. 2452 */ 2453 moea64_attr_clear(m, ptebit); 2454 2455 /* 2456 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2457 * we can reset the right ones). note that since the pvo entries and 2458 * list heads are accessed via BAT0 and are never placed in the page 2459 * table, we don't have to worry about further accesses setting the 2460 * REF/CHG bits. 2461 */ 2462 powerpc_sync(); 2463 2464 /* 2465 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2466 * valid pte clear the ptebit from the valid pte. 2467 */ 2468 count = 0; 2469 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2470 2471 LOCK_TABLE(); 2472 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2473 if (pt != -1) { 2474 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2475 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2476 count++; 2477 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2478 pvo->pvo_vpn, ptebit); 2479 } 2480 } 2481 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2482 UNLOCK_TABLE(); 2483 } 2484 2485 vm_page_unlock_queues(); 2486 return (count); 2487} 2488 2489boolean_t 2490moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2491{ 2492 struct pvo_entry *pvo; 2493 vm_offset_t ppa; 2494 int error = 0; 2495 2496 PMAP_LOCK(kernel_pmap); 2497 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2498 pvo = moea64_pvo_find_va(kernel_pmap, ppa); 2499 if (pvo == NULL || 2500 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2501 error = EFAULT; 2502 break; 2503 } 2504 } 2505 PMAP_UNLOCK(kernel_pmap); 2506 2507 return (error); 2508} 2509 2510/* 2511 * Map a set of physical memory pages into the kernel virtual 2512 * address space. Return a pointer to where it is mapped. This 2513 * routine is intended to be used for mapping device memory, 2514 * NOT real memory. 2515 */ 2516void * 2517moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2518{ 2519 vm_offset_t va, tmpva, ppa, offset; 2520 2521 ppa = trunc_page(pa); 2522 offset = pa & PAGE_MASK; 2523 size = roundup(offset + size, PAGE_SIZE); 2524 2525 va = kmem_alloc_nofault(kernel_map, size); 2526 2527 if (!va) 2528 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2529 2530 for (tmpva = va; size > 0;) { 2531 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2532 size -= PAGE_SIZE; 2533 tmpva += PAGE_SIZE; 2534 ppa += PAGE_SIZE; 2535 } 2536 2537 return ((void *)(va + offset)); 2538} 2539 2540void * 2541moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2542{ 2543 2544 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2545} 2546 2547void 2548moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2549{ 2550 vm_offset_t base, offset; 2551 2552 base = trunc_page(va); 2553 offset = va & PAGE_MASK; 2554 size = roundup(offset + size, PAGE_SIZE); 2555 2556 kmem_free(kernel_map, base, size); 2557} 2558 2559void 2560moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2561{ 2562 struct pvo_entry *pvo; 2563 vm_offset_t lim; 2564 vm_paddr_t pa; 2565 vm_size_t len; 2566 2567 PMAP_LOCK(pm); 2568 while (sz > 0) { 2569 lim = round_page(va); 2570 len = MIN(lim - va, sz); 2571 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2572 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2573 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2574 (va & ADDR_POFF); 2575 moea64_syncicache(mmu, pm, va, pa, len); 2576 } 2577 va += len; 2578 sz -= len; 2579 } 2580 PMAP_UNLOCK(pm); 2581} 2582