mmu_oea64.c revision 208574
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 208574 2010-05-26 18:00:44Z alc $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_kstack_pages.h" 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <sys/kdb.h> 131 132#include <dev/ofw/openfirm.h> 133 134#include <vm/vm.h> 135#include <vm/vm_param.h> 136#include <vm/vm_kern.h> 137#include <vm/vm_page.h> 138#include <vm/vm_map.h> 139#include <vm/vm_object.h> 140#include <vm/vm_extern.h> 141#include <vm/vm_pageout.h> 142#include <vm/vm_pager.h> 143#include <vm/uma.h> 144 145#include <machine/cpu.h> 146#include <machine/platform.h> 147#include <machine/frame.h> 148#include <machine/md_var.h> 149#include <machine/psl.h> 150#include <machine/bat.h> 151#include <machine/pte.h> 152#include <machine/sr.h> 153#include <machine/trap.h> 154#include <machine/mmuvar.h> 155 156#include "mmu_if.h" 157 158#define MOEA_DEBUG 159 160#define TODO panic("%s: not implemented", __func__); 161 162static __inline u_int32_t 163cntlzw(volatile u_int32_t a) { 164 u_int32_t b; 165 __asm ("cntlzw %0, %1" : "=r"(b) : "r"(a)); 166 return b; 167} 168 169static __inline uint64_t 170va_to_vsid(pmap_t pm, vm_offset_t va) 171{ 172 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 173} 174 175#define PTESYNC() __asm __volatile("ptesync"); 176#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 177#define SYNC() __asm __volatile("sync"); 178#define EIEIO() __asm __volatile("eieio"); 179 180/* 181 * The tlbie instruction must be executed in 64-bit mode 182 * so we have to twiddle MSR[SF] around every invocation. 183 * Just to add to the fun, exceptions must be off as well 184 * so that we can't trap in 64-bit mode. What a pain. 185 */ 186struct mtx tlbie_mutex; 187 188static __inline void 189TLBIE(pmap_t pmap, vm_offset_t va) { 190 uint64_t vpn; 191 register_t vpn_hi, vpn_lo; 192 register_t msr; 193 register_t scratch; 194 195 vpn = (uint64_t)(va & ADDR_PIDX); 196 if (pmap != NULL) 197 vpn |= (va_to_vsid(pmap,va) << 28); 198 vpn &= ~(0xffffULL << 48); 199 200 vpn_hi = (uint32_t)(vpn >> 32); 201 vpn_lo = (uint32_t)vpn; 202 203 mtx_lock_spin(&tlbie_mutex); 204 __asm __volatile("\ 205 mfmsr %0; \ 206 mr %1, %0; \ 207 insrdi %1,%5,1,0; \ 208 mtmsrd %1; \ 209 ptesync; \ 210 \ 211 sld %1,%2,%4; \ 212 or %1,%1,%3; \ 213 tlbie %1; \ 214 \ 215 mtmsrd %0; \ 216 eieio; \ 217 tlbsync; \ 218 ptesync;" 219 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 220 : "memory"); 221 mtx_unlock_spin(&tlbie_mutex); 222} 223 224#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 225#define ENABLE_TRANS(msr) mtmsr(msr); isync() 226 227#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 228#define VSID_TO_SR(vsid) ((vsid) & 0xf) 229#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 230#define VSID_HASH_MASK 0x0000007fffffffffULL 231 232#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 233#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 234#define PVO_WIRED 0x010UL /* PVO entry is wired */ 235#define PVO_MANAGED 0x020UL /* PVO entry is managed */ 236#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 237 bootstrap */ 238#define PVO_FAKE 0x100UL /* fictitious phys page */ 239#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 240#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 241#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 242#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 243#define PVO_PTEGIDX_CLR(pvo) \ 244 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 245#define PVO_PTEGIDX_SET(pvo, i) \ 246 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 247 248#define MOEA_PVO_CHECK(pvo) 249 250#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 251#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 252#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 253 254struct ofw_map { 255 vm_offset_t om_va; 256 vm_size_t om_len; 257 vm_offset_t om_pa_hi; 258 vm_offset_t om_pa_lo; 259 u_int om_mode; 260}; 261 262/* 263 * Map of physical memory regions. 264 */ 265static struct mem_region *regions; 266static struct mem_region *pregions; 267extern u_int phys_avail_count; 268extern int regions_sz, pregions_sz; 269extern int ofw_real_mode; 270 271extern struct pmap ofw_pmap; 272 273extern void bs_remap_earlyboot(void); 274 275 276/* 277 * Lock for the pteg and pvo tables. 278 */ 279struct mtx moea64_table_mutex; 280 281/* 282 * PTEG data. 283 */ 284static struct lpteg *moea64_pteg_table; 285u_int moea64_pteg_count; 286u_int moea64_pteg_mask; 287 288/* 289 * PVO data. 290 */ 291struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 292/* lists of unmanaged pages */ 293struct pvo_head moea64_pvo_kunmanaged = 294 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 295struct pvo_head moea64_pvo_unmanaged = 296 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged); 297 298uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 299uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 300 301#define BPVO_POOL_SIZE 327680 302static struct pvo_entry *moea64_bpvo_pool; 303static int moea64_bpvo_pool_index = 0; 304 305#define VSID_NBPW (sizeof(u_int32_t) * 8) 306static u_int moea64_vsid_bitmap[NPMAPS / VSID_NBPW]; 307 308static boolean_t moea64_initialized = FALSE; 309 310/* 311 * Statistics. 312 */ 313u_int moea64_pte_valid = 0; 314u_int moea64_pte_overflow = 0; 315u_int moea64_pvo_entries = 0; 316u_int moea64_pvo_enter_calls = 0; 317u_int moea64_pvo_remove_calls = 0; 318SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 319 &moea64_pte_valid, 0, ""); 320SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 321 &moea64_pte_overflow, 0, ""); 322SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 323 &moea64_pvo_entries, 0, ""); 324SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 325 &moea64_pvo_enter_calls, 0, ""); 326SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 327 &moea64_pvo_remove_calls, 0, ""); 328 329vm_offset_t moea64_scratchpage_va[2]; 330struct lpte *moea64_scratchpage_pte[2]; 331struct mtx moea64_scratchpage_mtx; 332 333/* 334 * Allocate physical memory for use in moea64_bootstrap. 335 */ 336static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 337 338/* 339 * PTE calls. 340 */ 341static int moea64_pte_insert(u_int, struct lpte *); 342 343/* 344 * PVO calls. 345 */ 346static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 347 vm_offset_t, vm_offset_t, uint64_t, int); 348static void moea64_pvo_remove(struct pvo_entry *, int); 349static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); 350static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); 351 352/* 353 * Utility routines. 354 */ 355static void moea64_bridge_bootstrap(mmu_t mmup, 356 vm_offset_t kernelstart, vm_offset_t kernelend); 357static void moea64_bridge_cpu_bootstrap(mmu_t, int ap); 358static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 359 vm_prot_t, boolean_t); 360static boolean_t moea64_query_bit(vm_page_t, u_int64_t); 361static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *); 362static void moea64_kremove(mmu_t, vm_offset_t); 363static void moea64_syncicache(pmap_t pmap, vm_offset_t va, 364 vm_offset_t pa, vm_size_t sz); 365static void tlbia(void); 366 367/* 368 * Kernel MMU interface 369 */ 370void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 371void moea64_clear_modify(mmu_t, vm_page_t); 372void moea64_clear_reference(mmu_t, vm_page_t); 373void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 374void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 375void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 376 vm_prot_t); 377void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 378vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 379vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 380void moea64_init(mmu_t); 381boolean_t moea64_is_modified(mmu_t, vm_page_t); 382boolean_t moea64_is_referenced(mmu_t, vm_page_t); 383boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 384vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 385boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 386int moea64_page_wired_mappings(mmu_t, vm_page_t); 387void moea64_pinit(mmu_t, pmap_t); 388void moea64_pinit0(mmu_t, pmap_t); 389void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 390void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 391void moea64_qremove(mmu_t, vm_offset_t, int); 392void moea64_release(mmu_t, pmap_t); 393void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 394void moea64_remove_all(mmu_t, vm_page_t); 395void moea64_remove_write(mmu_t, vm_page_t); 396void moea64_zero_page(mmu_t, vm_page_t); 397void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 398void moea64_zero_page_idle(mmu_t, vm_page_t); 399void moea64_activate(mmu_t, struct thread *); 400void moea64_deactivate(mmu_t, struct thread *); 401void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 402void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 403vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 404void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 405boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 406static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 407 408static mmu_method_t moea64_bridge_methods[] = { 409 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 410 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 411 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 412 MMUMETHOD(mmu_copy_page, moea64_copy_page), 413 MMUMETHOD(mmu_enter, moea64_enter), 414 MMUMETHOD(mmu_enter_object, moea64_enter_object), 415 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 416 MMUMETHOD(mmu_extract, moea64_extract), 417 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 418 MMUMETHOD(mmu_init, moea64_init), 419 MMUMETHOD(mmu_is_modified, moea64_is_modified), 420 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 421 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 422 MMUMETHOD(mmu_map, moea64_map), 423 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 424 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 425 MMUMETHOD(mmu_pinit, moea64_pinit), 426 MMUMETHOD(mmu_pinit0, moea64_pinit0), 427 MMUMETHOD(mmu_protect, moea64_protect), 428 MMUMETHOD(mmu_qenter, moea64_qenter), 429 MMUMETHOD(mmu_qremove, moea64_qremove), 430 MMUMETHOD(mmu_release, moea64_release), 431 MMUMETHOD(mmu_remove, moea64_remove), 432 MMUMETHOD(mmu_remove_all, moea64_remove_all), 433 MMUMETHOD(mmu_remove_write, moea64_remove_write), 434 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 435 MMUMETHOD(mmu_zero_page, moea64_zero_page), 436 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 437 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 438 MMUMETHOD(mmu_activate, moea64_activate), 439 MMUMETHOD(mmu_deactivate, moea64_deactivate), 440 441 /* Internal interfaces */ 442 MMUMETHOD(mmu_bootstrap, moea64_bridge_bootstrap), 443 MMUMETHOD(mmu_cpu_bootstrap, moea64_bridge_cpu_bootstrap), 444 MMUMETHOD(mmu_mapdev, moea64_mapdev), 445 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 446 MMUMETHOD(mmu_kextract, moea64_kextract), 447 MMUMETHOD(mmu_kenter, moea64_kenter), 448 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 449 450 { 0, 0 } 451}; 452 453static mmu_def_t oea64_bridge_mmu = { 454 MMU_TYPE_G5, 455 moea64_bridge_methods, 456 0 457}; 458MMU_DEF(oea64_bridge_mmu); 459 460static __inline u_int 461va_to_pteg(uint64_t vsid, vm_offset_t addr) 462{ 463 uint64_t hash; 464 465 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 466 ADDR_PIDX_SHFT); 467 return (hash & moea64_pteg_mask); 468} 469 470static __inline struct pvo_head * 471pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 472{ 473 struct vm_page *pg; 474 475 pg = PHYS_TO_VM_PAGE(pa); 476 477 if (pg_p != NULL) 478 *pg_p = pg; 479 480 if (pg == NULL) 481 return (&moea64_pvo_unmanaged); 482 483 return (&pg->md.mdpg_pvoh); 484} 485 486static __inline struct pvo_head * 487vm_page_to_pvoh(vm_page_t m) 488{ 489 490 return (&m->md.mdpg_pvoh); 491} 492 493static __inline void 494moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 495{ 496 497 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 498 m->md.mdpg_attrs &= ~ptebit; 499} 500 501static __inline u_int64_t 502moea64_attr_fetch(vm_page_t m) 503{ 504 505 return (m->md.mdpg_attrs); 506} 507 508static __inline void 509moea64_attr_save(vm_page_t m, u_int64_t ptebit) 510{ 511 512 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 513 m->md.mdpg_attrs |= ptebit; 514} 515 516static __inline void 517moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 518 uint64_t pte_lo) 519{ 520 ASSERT_TABLE_LOCK(); 521 522 /* 523 * Construct a PTE. Default to IMB initially. Valid bit only gets 524 * set when the real pte is set in memory. 525 * 526 * Note: Don't set the valid bit for correct operation of tlb update. 527 */ 528 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 529 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 530 531 pt->pte_lo = pte_lo; 532} 533 534static __inline void 535moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 536{ 537 538 ASSERT_TABLE_LOCK(); 539 540 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 541} 542 543static __inline void 544moea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit) 545{ 546 ASSERT_TABLE_LOCK(); 547 548 /* 549 * As shown in Section 7.6.3.2.3 550 */ 551 pt->pte_lo &= ~ptebit; 552 TLBIE(pmap,va); 553} 554 555static __inline void 556moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 557{ 558 559 ASSERT_TABLE_LOCK(); 560 pvo_pt->pte_hi |= LPTE_VALID; 561 562 /* 563 * Update the PTE as defined in section 7.6.3.1. 564 * Note that the REF/CHG bits are from pvo_pt and thus should have 565 * been saved so this routine can restore them (if desired). 566 */ 567 pt->pte_lo = pvo_pt->pte_lo; 568 EIEIO(); 569 pt->pte_hi = pvo_pt->pte_hi; 570 PTESYNC(); 571 moea64_pte_valid++; 572} 573 574static __inline void 575moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 576{ 577 ASSERT_TABLE_LOCK(); 578 pvo_pt->pte_hi &= ~LPTE_VALID; 579 580 /* 581 * Force the reg & chg bits back into the PTEs. 582 */ 583 SYNC(); 584 585 /* 586 * Invalidate the pte. 587 */ 588 pt->pte_hi &= ~LPTE_VALID; 589 TLBIE(pmap,va); 590 591 /* 592 * Save the reg & chg bits. 593 */ 594 moea64_pte_synch(pt, pvo_pt); 595 moea64_pte_valid--; 596} 597 598static __inline void 599moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 600{ 601 602 /* 603 * Invalidate the PTE 604 */ 605 moea64_pte_unset(pt, pvo_pt, pmap, va); 606 moea64_pte_set(pt, pvo_pt); 607 if (pmap == kernel_pmap) 608 isync(); 609} 610 611static __inline uint64_t 612moea64_calc_wimg(vm_offset_t pa) 613{ 614 uint64_t pte_lo; 615 int i; 616 617 /* 618 * Assume the page is cache inhibited and access is guarded unless 619 * it's in our available memory array. 620 */ 621 pte_lo = LPTE_I | LPTE_G; 622 for (i = 0; i < pregions_sz; i++) { 623 if ((pa >= pregions[i].mr_start) && 624 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 625 pte_lo &= ~(LPTE_I | LPTE_G); 626 pte_lo |= LPTE_M; 627 break; 628 } 629 } 630 631 return pte_lo; 632} 633 634/* 635 * Quick sort callout for comparing memory regions. 636 */ 637static int mr_cmp(const void *a, const void *b); 638static int om_cmp(const void *a, const void *b); 639 640static int 641mr_cmp(const void *a, const void *b) 642{ 643 const struct mem_region *regiona; 644 const struct mem_region *regionb; 645 646 regiona = a; 647 regionb = b; 648 if (regiona->mr_start < regionb->mr_start) 649 return (-1); 650 else if (regiona->mr_start > regionb->mr_start) 651 return (1); 652 else 653 return (0); 654} 655 656static int 657om_cmp(const void *a, const void *b) 658{ 659 const struct ofw_map *mapa; 660 const struct ofw_map *mapb; 661 662 mapa = a; 663 mapb = b; 664 if (mapa->om_pa_hi < mapb->om_pa_hi) 665 return (-1); 666 else if (mapa->om_pa_hi > mapb->om_pa_hi) 667 return (1); 668 else if (mapa->om_pa_lo < mapb->om_pa_lo) 669 return (-1); 670 else if (mapa->om_pa_lo > mapb->om_pa_lo) 671 return (1); 672 else 673 return (0); 674} 675 676static void 677moea64_bridge_cpu_bootstrap(mmu_t mmup, int ap) 678{ 679 int i = 0; 680 681 /* 682 * Initialize segment registers and MMU 683 */ 684 685 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 686 for (i = 0; i < 16; i++) { 687 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 688 } 689 __asm __volatile ("ptesync; mtsdr1 %0; isync" 690 :: "r"((u_int)moea64_pteg_table 691 | (32 - cntlzw(moea64_pteg_mask >> 11)))); 692 tlbia(); 693} 694 695static void 696moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 697{ 698 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 699 register_t msr; 700 vm_offset_t off; 701 vm_paddr_t pa_base; 702 int i, ofw_mappings; 703 704 bzero(translations, sz); 705 if (OF_getprop(mmu, "translations", translations, sz) == -1) 706 panic("moea64_bootstrap: can't get ofw translations"); 707 708 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 709 sz /= sizeof(*translations); 710 qsort(translations, sz, sizeof (*translations), om_cmp); 711 712 for (i = 0, ofw_mappings = 0; i < sz; i++) { 713 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 714 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 715 translations[i].om_len); 716 717 if (translations[i].om_pa_lo % PAGE_SIZE) 718 panic("OFW translation not page-aligned!"); 719 720 if (translations[i].om_pa_hi) 721 panic("OFW translations above 32-bit boundary!"); 722 723 pa_base = translations[i].om_pa_lo; 724 725 /* Now enter the pages for this mapping */ 726 727 DISABLE_TRANS(msr); 728 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 729 moea64_kenter(mmup, translations[i].om_va + off, 730 pa_base + off); 731 732 ofw_mappings++; 733 } 734 ENABLE_TRANS(msr); 735 } 736} 737 738static void 739moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 740{ 741 ihandle_t mmui; 742 phandle_t chosen; 743 phandle_t mmu; 744 size_t sz; 745 int i, j; 746 vm_size_t size, physsz, hwphyssz; 747 vm_offset_t pa, va, off; 748 register_t msr; 749 void *dpcpu; 750 751 /* We don't have a direct map since there is no BAT */ 752 hw_direct_map = 0; 753 754 /* Make sure battable is zero, since we have no BAT */ 755 for (i = 0; i < 16; i++) { 756 battable[i].batu = 0; 757 battable[i].batl = 0; 758 } 759 760 /* Get physical memory regions from firmware */ 761 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 762 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 763 764 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 765 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 766 panic("moea64_bootstrap: phys_avail too small"); 767 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 768 phys_avail_count = 0; 769 physsz = 0; 770 hwphyssz = 0; 771 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 772 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 773 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 774 regions[i].mr_start + regions[i].mr_size, 775 regions[i].mr_size); 776 if (hwphyssz != 0 && 777 (physsz + regions[i].mr_size) >= hwphyssz) { 778 if (physsz < hwphyssz) { 779 phys_avail[j] = regions[i].mr_start; 780 phys_avail[j + 1] = regions[i].mr_start + 781 hwphyssz - physsz; 782 physsz = hwphyssz; 783 phys_avail_count++; 784 } 785 break; 786 } 787 phys_avail[j] = regions[i].mr_start; 788 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 789 phys_avail_count++; 790 physsz += regions[i].mr_size; 791 } 792 physmem = btoc(physsz); 793 794 /* 795 * Allocate PTEG table. 796 */ 797#ifdef PTEGCOUNT 798 moea64_pteg_count = PTEGCOUNT; 799#else 800 moea64_pteg_count = 0x1000; 801 802 while (moea64_pteg_count < physmem) 803 moea64_pteg_count <<= 1; 804#endif /* PTEGCOUNT */ 805 806 size = moea64_pteg_count * sizeof(struct lpteg); 807 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 808 moea64_pteg_count, size); 809 810 /* 811 * We now need to allocate memory. This memory, to be allocated, 812 * has to reside in a page table. The page table we are about to 813 * allocate. We don't have BAT. So drop to data real mode for a minute 814 * as a measure of last resort. We do this a couple times. 815 */ 816 817 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 818 DISABLE_TRANS(msr); 819 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 820 ENABLE_TRANS(msr); 821 822 moea64_pteg_mask = moea64_pteg_count - 1; 823 824 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 825 826 /* 827 * Allocate pv/overflow lists. 828 */ 829 size = sizeof(struct pvo_head) * moea64_pteg_count; 830 831 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 832 PAGE_SIZE); 833 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 834 835 DISABLE_TRANS(msr); 836 for (i = 0; i < moea64_pteg_count; i++) 837 LIST_INIT(&moea64_pvo_table[i]); 838 ENABLE_TRANS(msr); 839 840 /* 841 * Initialize the lock that synchronizes access to the pteg and pvo 842 * tables. 843 */ 844 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 845 MTX_RECURSE); 846 847 /* 848 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 849 */ 850 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 851 852 /* 853 * Initialise the unmanaged pvo pool. 854 */ 855 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 856 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 857 moea64_bpvo_pool_index = 0; 858 859 /* 860 * Make sure kernel vsid is allocated as well as VSID 0. 861 */ 862 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 863 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 864 moea64_vsid_bitmap[0] |= 1; 865 866 /* 867 * Initialize the kernel pmap (which is statically allocated). 868 */ 869 for (i = 0; i < 16; i++) 870 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 871 872 kernel_pmap->pmap_phys = kernel_pmap; 873 kernel_pmap->pm_active = ~0; 874 875 PMAP_LOCK_INIT(kernel_pmap); 876 877 /* 878 * Now map in all the other buffers we allocated earlier 879 */ 880 881 DISABLE_TRANS(msr); 882 size = moea64_pteg_count * sizeof(struct lpteg); 883 off = (vm_offset_t)(moea64_pteg_table); 884 for (pa = off; pa < off + size; pa += PAGE_SIZE) 885 moea64_kenter(mmup, pa, pa); 886 size = sizeof(struct pvo_head) * moea64_pteg_count; 887 off = (vm_offset_t)(moea64_pvo_table); 888 for (pa = off; pa < off + size; pa += PAGE_SIZE) 889 moea64_kenter(mmup, pa, pa); 890 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 891 off = (vm_offset_t)(moea64_bpvo_pool); 892 for (pa = off; pa < off + size; pa += PAGE_SIZE) 893 moea64_kenter(mmup, pa, pa); 894 895 /* 896 * Map certain important things, like ourselves. 897 * 898 * NOTE: We do not map the exception vector space. That code is 899 * used only in real mode, and leaving it unmapped allows us to 900 * catch NULL pointer deferences, instead of making NULL a valid 901 * address. 902 */ 903 904 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE) 905 moea64_kenter(mmup, pa, pa); 906 ENABLE_TRANS(msr); 907 908 if (!ofw_real_mode) { 909 /* 910 * Set up the Open Firmware pmap and add its mappings. 911 */ 912 913 moea64_pinit(mmup, &ofw_pmap); 914 for (i = 0; i < 16; i++) 915 ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i]; 916 917 if ((chosen = OF_finddevice("/chosen")) == -1) 918 panic("moea64_bootstrap: can't find /chosen"); 919 OF_getprop(chosen, "mmu", &mmui, 4); 920 if ((mmu = OF_instance_to_package(mmui)) == -1) 921 panic("moea64_bootstrap: can't get mmu package"); 922 if ((sz = OF_getproplen(mmu, "translations")) == -1) 923 panic("moea64_bootstrap: can't get ofw translation count"); 924 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 925 panic("moea64_bootstrap: too many ofw translations"); 926 927 moea64_add_ofw_mappings(mmup, mmu, sz); 928 } 929 930#ifdef SMP 931 TLBSYNC(); 932#endif 933 934 /* 935 * Calculate the last available physical address. 936 */ 937 for (i = 0; phys_avail[i + 2] != 0; i += 2) 938 ; 939 Maxmem = powerpc_btop(phys_avail[i + 1]); 940 941 /* 942 * Initialize MMU and remap early physical mappings 943 */ 944 moea64_bridge_cpu_bootstrap(mmup,0); 945 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 946 pmap_bootstrapped++; 947 bs_remap_earlyboot(); 948 949 /* 950 * Set the start and end of kva. 951 */ 952 virtual_avail = VM_MIN_KERNEL_ADDRESS; 953 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 954 955 /* 956 * Figure out how far we can extend virtual_end into segment 16 957 * without running into existing mappings. Segment 16 is guaranteed 958 * to contain neither RAM nor devices (at least on Apple hardware), 959 * but will generally contain some OFW mappings we should not 960 * step on. 961 */ 962 963 PMAP_LOCK(kernel_pmap); 964 while (moea64_pvo_find_va(kernel_pmap, virtual_end+1, NULL) == NULL) 965 virtual_end += PAGE_SIZE; 966 PMAP_UNLOCK(kernel_pmap); 967 968 /* 969 * Allocate some things for page zeroing. We put this directly 970 * in the page table, marked with LPTE_LOCKED, to avoid any 971 * of the PVO book-keeping or other parts of the VM system 972 * from even knowing that this hack exists. 973 */ 974 975 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF); 976 for (i = 0; i < 2; i++) { 977 struct lpte pt; 978 uint64_t vsid; 979 int pteidx, ptegidx; 980 981 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 982 virtual_end -= PAGE_SIZE; 983 984 LOCK_TABLE(); 985 986 vsid = va_to_vsid(kernel_pmap, moea64_scratchpage_va[i]); 987 moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i], 988 LPTE_NOEXEC); 989 pt.pte_hi |= LPTE_LOCKED; 990 991 ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i]); 992 pteidx = moea64_pte_insert(ptegidx, &pt); 993 if (pt.pte_hi & LPTE_HID) 994 ptegidx ^= moea64_pteg_mask; 995 996 moea64_scratchpage_pte[i] = 997 &moea64_pteg_table[ptegidx].pt[pteidx]; 998 999 UNLOCK_TABLE(); 1000 } 1001 1002 /* 1003 * Allocate a kernel stack with a guard page for thread0 and map it 1004 * into the kernel page map. 1005 */ 1006 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 1007 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1008 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 1009 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 1010 thread0.td_kstack = va; 1011 thread0.td_kstack_pages = KSTACK_PAGES; 1012 for (i = 0; i < KSTACK_PAGES; i++) { 1013 moea64_kenter(mmup, va, pa); 1014 pa += PAGE_SIZE; 1015 va += PAGE_SIZE; 1016 } 1017 1018 /* 1019 * Allocate virtual address space for the message buffer. 1020 */ 1021 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1022 msgbufp = (struct msgbuf *)virtual_avail; 1023 va = virtual_avail; 1024 virtual_avail += round_page(MSGBUF_SIZE); 1025 while (va < virtual_avail) { 1026 moea64_kenter(mmup, va, pa); 1027 pa += PAGE_SIZE; 1028 va += PAGE_SIZE; 1029 } 1030 1031 /* 1032 * Allocate virtual address space for the dynamic percpu area. 1033 */ 1034 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1035 dpcpu = (void *)virtual_avail; 1036 virtual_avail += DPCPU_SIZE; 1037 while (va < virtual_avail) { 1038 moea64_kenter(mmup, va, pa); 1039 pa += PAGE_SIZE; 1040 va += PAGE_SIZE; 1041 } 1042 dpcpu_init(dpcpu, 0); 1043} 1044 1045/* 1046 * Activate a user pmap. The pmap must be activated before it's address 1047 * space can be accessed in any way. 1048 */ 1049void 1050moea64_activate(mmu_t mmu, struct thread *td) 1051{ 1052 pmap_t pm, pmr; 1053 1054 /* 1055 * Load all the data we need up front to encourage the compiler to 1056 * not issue any loads while we have interrupts disabled below. 1057 */ 1058 pm = &td->td_proc->p_vmspace->vm_pmap; 1059 pmr = pm->pmap_phys; 1060 1061 pm->pm_active |= PCPU_GET(cpumask); 1062 PCPU_SET(curpmap, pmr); 1063} 1064 1065void 1066moea64_deactivate(mmu_t mmu, struct thread *td) 1067{ 1068 pmap_t pm; 1069 1070 pm = &td->td_proc->p_vmspace->vm_pmap; 1071 pm->pm_active &= ~(PCPU_GET(cpumask)); 1072 PCPU_SET(curpmap, NULL); 1073} 1074 1075void 1076moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1077{ 1078 struct pvo_entry *pvo; 1079 1080 PMAP_LOCK(pm); 1081 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1082 1083 if (pvo != NULL) { 1084 if (wired) { 1085 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1086 pm->pm_stats.wired_count++; 1087 pvo->pvo_vaddr |= PVO_WIRED; 1088 } else { 1089 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1090 pm->pm_stats.wired_count--; 1091 pvo->pvo_vaddr &= ~PVO_WIRED; 1092 } 1093 } 1094 PMAP_UNLOCK(pm); 1095} 1096 1097/* 1098 * This goes through and sets the physical address of our 1099 * special scratch PTE to the PA we want to zero or copy. Because 1100 * of locking issues (this can get called in pvo_enter() by 1101 * the UMA allocator), we can't use most other utility functions here 1102 */ 1103 1104static __inline 1105void moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1106 1107 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1108 1109 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1110 TLBIE(kernel_pmap, moea64_scratchpage_va[which]); 1111 1112 moea64_scratchpage_pte[which]->pte_lo &= 1113 ~(LPTE_WIMG | LPTE_RPGN); 1114 moea64_scratchpage_pte[which]->pte_lo |= 1115 moea64_calc_wimg(pa) | (uint64_t)pa; 1116 EIEIO(); 1117 1118 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1119 PTESYNC(); isync(); 1120} 1121 1122void 1123moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1124{ 1125 vm_offset_t dst; 1126 vm_offset_t src; 1127 1128 dst = VM_PAGE_TO_PHYS(mdst); 1129 src = VM_PAGE_TO_PHYS(msrc); 1130 1131 mtx_lock(&moea64_scratchpage_mtx); 1132 1133 moea64_set_scratchpage_pa(0,src); 1134 moea64_set_scratchpage_pa(1,dst); 1135 1136 kcopy((void *)moea64_scratchpage_va[0], 1137 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1138 1139 mtx_unlock(&moea64_scratchpage_mtx); 1140} 1141 1142void 1143moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1144{ 1145 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1146 1147 if (!moea64_initialized) 1148 panic("moea64_zero_page: can't zero pa %#x", pa); 1149 if (size + off > PAGE_SIZE) 1150 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1151 1152 mtx_lock(&moea64_scratchpage_mtx); 1153 1154 moea64_set_scratchpage_pa(0,pa); 1155 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1156 mtx_unlock(&moea64_scratchpage_mtx); 1157} 1158 1159/* 1160 * Zero a page of physical memory by temporarily mapping it 1161 */ 1162void 1163moea64_zero_page(mmu_t mmu, vm_page_t m) 1164{ 1165 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1166 vm_offset_t off; 1167 1168 if (!moea64_initialized) 1169 panic("moea64_zero_page: can't zero pa %#x", pa); 1170 1171 mtx_lock(&moea64_scratchpage_mtx); 1172 1173 moea64_set_scratchpage_pa(0,pa); 1174 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1175 __asm __volatile("dcbz 0,%0" :: 1176 "r"(moea64_scratchpage_va[0] + off)); 1177 mtx_unlock(&moea64_scratchpage_mtx); 1178} 1179 1180void 1181moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1182{ 1183 1184 moea64_zero_page(mmu, m); 1185} 1186 1187/* 1188 * Map the given physical page at the specified virtual address in the 1189 * target pmap with the protection requested. If specified the page 1190 * will be wired down. 1191 */ 1192void 1193moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1194 vm_prot_t prot, boolean_t wired) 1195{ 1196 1197 vm_page_lock_queues(); 1198 PMAP_LOCK(pmap); 1199 moea64_enter_locked(pmap, va, m, prot, wired); 1200 vm_page_unlock_queues(); 1201 PMAP_UNLOCK(pmap); 1202} 1203 1204/* 1205 * Map the given physical page at the specified virtual address in the 1206 * target pmap with the protection requested. If specified the page 1207 * will be wired down. 1208 * 1209 * The page queues and pmap must be locked. 1210 */ 1211 1212static void 1213moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1214 boolean_t wired) 1215{ 1216 struct pvo_head *pvo_head; 1217 uma_zone_t zone; 1218 vm_page_t pg; 1219 uint64_t pte_lo; 1220 u_int pvo_flags; 1221 int error; 1222 1223 if (!moea64_initialized) { 1224 pvo_head = &moea64_pvo_kunmanaged; 1225 pg = NULL; 1226 zone = moea64_upvo_zone; 1227 pvo_flags = 0; 1228 } else { 1229 pvo_head = vm_page_to_pvoh(m); 1230 pg = m; 1231 zone = moea64_mpvo_zone; 1232 pvo_flags = PVO_MANAGED; 1233 } 1234 1235 if (pmap_bootstrapped) 1236 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1237 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1238 KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1239 ("moea64_enter_locked: page %p is not busy", m)); 1240 1241 /* XXX change the pvo head for fake pages */ 1242 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1243 pvo_flags &= ~PVO_MANAGED; 1244 pvo_head = &moea64_pvo_kunmanaged; 1245 zone = moea64_upvo_zone; 1246 } 1247 1248 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m)); 1249 1250 if (prot & VM_PROT_WRITE) { 1251 pte_lo |= LPTE_BW; 1252 if (pmap_bootstrapped) 1253 vm_page_flag_set(m, PG_WRITEABLE); 1254 } else 1255 pte_lo |= LPTE_BR; 1256 1257 if (prot & VM_PROT_EXECUTE) 1258 pvo_flags |= VM_PROT_EXECUTE; 1259 1260 if (wired) 1261 pvo_flags |= PVO_WIRED; 1262 1263 if ((m->flags & PG_FICTITIOUS) != 0) 1264 pvo_flags |= PVO_FAKE; 1265 1266 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1267 pte_lo, pvo_flags); 1268 1269 /* 1270 * Flush the page from the instruction cache if this page is 1271 * mapped executable and cacheable. 1272 */ 1273 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1274 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1275 } 1276} 1277 1278static void 1279moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1280{ 1281 1282 /* 1283 * This is much trickier than on older systems because 1284 * we can't sync the icache on physical addresses directly 1285 * without a direct map. Instead we check a couple of cases 1286 * where the memory is already mapped in and, failing that, 1287 * use the same trick we use for page zeroing to create 1288 * a temporary mapping for this physical address. 1289 */ 1290 1291 if (!pmap_bootstrapped) { 1292 /* 1293 * If PMAP is not bootstrapped, we are likely to be 1294 * in real mode. 1295 */ 1296 __syncicache((void *)pa, sz); 1297 } else if (pmap == kernel_pmap) { 1298 __syncicache((void *)va, sz); 1299 } else { 1300 /* Use the scratch page to set up a temp mapping */ 1301 1302 mtx_lock(&moea64_scratchpage_mtx); 1303 1304 moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); 1305 __syncicache((void *)(moea64_scratchpage_va[1] + 1306 (va & ADDR_POFF)), sz); 1307 1308 mtx_unlock(&moea64_scratchpage_mtx); 1309 } 1310} 1311 1312/* 1313 * Maps a sequence of resident pages belonging to the same object. 1314 * The sequence begins with the given page m_start. This page is 1315 * mapped at the given virtual address start. Each subsequent page is 1316 * mapped at a virtual address that is offset from start by the same 1317 * amount as the page is offset from m_start within the object. The 1318 * last page in the sequence is the page with the largest offset from 1319 * m_start that can be mapped at a virtual address less than the given 1320 * virtual address end. Not every virtual page between start and end 1321 * is mapped; only those for which a resident page exists with the 1322 * corresponding offset from m_start are mapped. 1323 */ 1324void 1325moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1326 vm_page_t m_start, vm_prot_t prot) 1327{ 1328 vm_page_t m; 1329 vm_pindex_t diff, psize; 1330 1331 psize = atop(end - start); 1332 m = m_start; 1333 vm_page_lock_queues(); 1334 PMAP_LOCK(pm); 1335 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1336 moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1337 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1338 m = TAILQ_NEXT(m, listq); 1339 } 1340 vm_page_unlock_queues(); 1341 PMAP_UNLOCK(pm); 1342} 1343 1344void 1345moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1346 vm_prot_t prot) 1347{ 1348 1349 vm_page_lock_queues(); 1350 PMAP_LOCK(pm); 1351 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1352 FALSE); 1353 vm_page_unlock_queues(); 1354 PMAP_UNLOCK(pm); 1355} 1356 1357vm_paddr_t 1358moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1359{ 1360 struct pvo_entry *pvo; 1361 vm_paddr_t pa; 1362 1363 PMAP_LOCK(pm); 1364 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1365 if (pvo == NULL) 1366 pa = 0; 1367 else 1368 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1369 PMAP_UNLOCK(pm); 1370 return (pa); 1371} 1372 1373/* 1374 * Atomically extract and hold the physical page with the given 1375 * pmap and virtual address pair if that mapping permits the given 1376 * protection. 1377 */ 1378vm_page_t 1379moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1380{ 1381 struct pvo_entry *pvo; 1382 vm_page_t m; 1383 vm_paddr_t pa; 1384 1385 m = NULL; 1386 pa = 0; 1387 PMAP_LOCK(pmap); 1388retry: 1389 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1390 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1391 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1392 (prot & VM_PROT_WRITE) == 0)) { 1393 if (vm_page_pa_tryrelock(pmap, 1394 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1395 goto retry; 1396 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1397 vm_page_hold(m); 1398 } 1399 PA_UNLOCK_COND(pa); 1400 PMAP_UNLOCK(pmap); 1401 return (m); 1402} 1403 1404static void * 1405moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1406{ 1407 /* 1408 * This entire routine is a horrible hack to avoid bothering kmem 1409 * for new KVA addresses. Because this can get called from inside 1410 * kmem allocation routines, calling kmem for a new address here 1411 * can lead to multiply locking non-recursive mutexes. 1412 */ 1413 static vm_pindex_t color; 1414 vm_offset_t va; 1415 1416 vm_page_t m; 1417 int pflags, needed_lock; 1418 1419 *flags = UMA_SLAB_PRIV; 1420 needed_lock = !PMAP_LOCKED(kernel_pmap); 1421 1422 if (needed_lock) 1423 PMAP_LOCK(kernel_pmap); 1424 1425 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1426 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1427 else 1428 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1429 if (wait & M_ZERO) 1430 pflags |= VM_ALLOC_ZERO; 1431 1432 for (;;) { 1433 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1434 if (m == NULL) { 1435 if (wait & M_NOWAIT) 1436 return (NULL); 1437 VM_WAIT; 1438 } else 1439 break; 1440 } 1441 1442 va = VM_PAGE_TO_PHYS(m); 1443 1444 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1445 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1446 PVO_WIRED | PVO_BOOTSTRAP); 1447 1448 if (needed_lock) 1449 PMAP_UNLOCK(kernel_pmap); 1450 1451 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1452 bzero((void *)va, PAGE_SIZE); 1453 1454 return (void *)va; 1455} 1456 1457void 1458moea64_init(mmu_t mmu) 1459{ 1460 1461 CTR0(KTR_PMAP, "moea64_init"); 1462 1463 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1464 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1465 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1466 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1467 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1468 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1469 1470 if (!hw_direct_map) { 1471 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1472 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1473 } 1474 1475 moea64_initialized = TRUE; 1476} 1477 1478boolean_t 1479moea64_is_referenced(mmu_t mmu, vm_page_t m) 1480{ 1481 1482 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1483 ("moea64_is_referenced: page %p is not managed", m)); 1484 return (moea64_query_bit(m, PTE_REF)); 1485} 1486 1487boolean_t 1488moea64_is_modified(mmu_t mmu, vm_page_t m) 1489{ 1490 1491 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1492 ("moea64_is_modified: page %p is not managed", m)); 1493 1494 /* 1495 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1496 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1497 * is clear, no PTEs can have LPTE_CHG set. 1498 */ 1499 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1500 if ((m->oflags & VPO_BUSY) == 0 && 1501 (m->flags & PG_WRITEABLE) == 0) 1502 return (FALSE); 1503 return (moea64_query_bit(m, LPTE_CHG)); 1504} 1505 1506void 1507moea64_clear_reference(mmu_t mmu, vm_page_t m) 1508{ 1509 1510 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1511 ("moea64_clear_reference: page %p is not managed", m)); 1512 vm_page_lock_queues(); 1513 moea64_clear_bit(m, LPTE_REF, NULL); 1514 vm_page_unlock_queues(); 1515} 1516 1517void 1518moea64_clear_modify(mmu_t mmu, vm_page_t m) 1519{ 1520 1521 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1522 ("moea64_clear_modify: page %p is not managed", m)); 1523 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1524 KASSERT((m->oflags & VPO_BUSY) == 0, 1525 ("moea64_clear_modify: page %p is busy", m)); 1526 1527 /* 1528 * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG 1529 * set. If the object containing the page is locked and the page is 1530 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1531 */ 1532 if ((m->flags & PG_WRITEABLE) == 0) 1533 return; 1534 vm_page_lock_queues(); 1535 moea64_clear_bit(m, LPTE_CHG, NULL); 1536 vm_page_unlock_queues(); 1537} 1538 1539/* 1540 * Clear the write and modified bits in each of the given page's mappings. 1541 */ 1542void 1543moea64_remove_write(mmu_t mmu, vm_page_t m) 1544{ 1545 struct pvo_entry *pvo; 1546 struct lpte *pt; 1547 pmap_t pmap; 1548 uint64_t lo; 1549 1550 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1551 ("moea64_remove_write: page %p is not managed", m)); 1552 1553 /* 1554 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1555 * another thread while the object is locked. Thus, if PG_WRITEABLE 1556 * is clear, no page table entries need updating. 1557 */ 1558 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1559 if ((m->oflags & VPO_BUSY) == 0 && 1560 (m->flags & PG_WRITEABLE) == 0) 1561 return; 1562 vm_page_lock_queues(); 1563 lo = moea64_attr_fetch(m); 1564 SYNC(); 1565 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1566 pmap = pvo->pvo_pmap; 1567 PMAP_LOCK(pmap); 1568 LOCK_TABLE(); 1569 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1570 pt = moea64_pvo_to_pte(pvo, -1); 1571 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1572 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1573 if (pt != NULL) { 1574 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1575 lo |= pvo->pvo_pte.lpte.pte_lo; 1576 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1577 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1578 pvo->pvo_pmap, PVO_VADDR(pvo)); 1579 } 1580 } 1581 UNLOCK_TABLE(); 1582 PMAP_UNLOCK(pmap); 1583 } 1584 if ((lo & LPTE_CHG) != 0) { 1585 moea64_attr_clear(m, LPTE_CHG); 1586 vm_page_dirty(m); 1587 } 1588 vm_page_flag_clear(m, PG_WRITEABLE); 1589 vm_page_unlock_queues(); 1590} 1591 1592/* 1593 * moea64_ts_referenced: 1594 * 1595 * Return a count of reference bits for a page, clearing those bits. 1596 * It is not necessary for every reference bit to be cleared, but it 1597 * is necessary that 0 only be returned when there are truly no 1598 * reference bits set. 1599 * 1600 * XXX: The exact number of bits to check and clear is a matter that 1601 * should be tested and standardized at some point in the future for 1602 * optimal aging of shared pages. 1603 */ 1604boolean_t 1605moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1606{ 1607 int count; 1608 1609 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1610 return (0); 1611 1612 count = moea64_clear_bit(m, LPTE_REF, NULL); 1613 1614 return (count); 1615} 1616 1617/* 1618 * Map a wired page into kernel virtual address space. 1619 */ 1620void 1621moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1622{ 1623 uint64_t pte_lo; 1624 int error; 1625 1626#if 0 1627 if (!pmap_bootstrapped) { 1628 if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end) 1629 panic("Trying to enter an address in KVA -- %#x!\n",pa); 1630 } 1631#endif 1632 1633 pte_lo = moea64_calc_wimg(pa); 1634 1635 PMAP_LOCK(kernel_pmap); 1636 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1637 &moea64_pvo_kunmanaged, va, pa, pte_lo, 1638 PVO_WIRED | VM_PROT_EXECUTE); 1639 1640 if (error != 0 && error != ENOENT) 1641 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va, 1642 pa, error); 1643 1644 /* 1645 * Flush the memory from the instruction cache. 1646 */ 1647 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1648 __syncicache((void *)va, PAGE_SIZE); 1649 } 1650 PMAP_UNLOCK(kernel_pmap); 1651} 1652 1653/* 1654 * Extract the physical page address associated with the given kernel virtual 1655 * address. 1656 */ 1657vm_offset_t 1658moea64_kextract(mmu_t mmu, vm_offset_t va) 1659{ 1660 struct pvo_entry *pvo; 1661 vm_paddr_t pa; 1662 1663 /* 1664 * Shortcut the direct-mapped case when applicable. We never put 1665 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1666 */ 1667 if (va < VM_MIN_KERNEL_ADDRESS) 1668 return (va); 1669 1670 PMAP_LOCK(kernel_pmap); 1671 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1672 KASSERT(pvo != NULL, ("moea64_kextract: no addr found")); 1673 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1674 PMAP_UNLOCK(kernel_pmap); 1675 return (pa); 1676} 1677 1678/* 1679 * Remove a wired page from kernel virtual address space. 1680 */ 1681void 1682moea64_kremove(mmu_t mmu, vm_offset_t va) 1683{ 1684 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1685} 1686 1687/* 1688 * Map a range of physical addresses into kernel virtual address space. 1689 * 1690 * The value passed in *virt is a suggested virtual address for the mapping. 1691 * Architectures which can support a direct-mapped physical to virtual region 1692 * can return the appropriate address within that region, leaving '*virt' 1693 * unchanged. We cannot and therefore do not; *virt is updated with the 1694 * first usable address after the mapped region. 1695 */ 1696vm_offset_t 1697moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1698 vm_offset_t pa_end, int prot) 1699{ 1700 vm_offset_t sva, va; 1701 1702 sva = *virt; 1703 va = sva; 1704 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1705 moea64_kenter(mmu, va, pa_start); 1706 *virt = va; 1707 1708 return (sva); 1709} 1710 1711/* 1712 * Returns true if the pmap's pv is one of the first 1713 * 16 pvs linked to from this page. This count may 1714 * be changed upwards or downwards in the future; it 1715 * is only necessary that true be returned for a small 1716 * subset of pmaps for proper page aging. 1717 */ 1718boolean_t 1719moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1720{ 1721 int loops; 1722 struct pvo_entry *pvo; 1723 1724 if (!moea64_initialized || (m->flags & PG_FICTITIOUS)) 1725 return FALSE; 1726 1727 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1728 1729 loops = 0; 1730 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1731 if (pvo->pvo_pmap == pmap) 1732 return (TRUE); 1733 if (++loops >= 16) 1734 break; 1735 } 1736 1737 return (FALSE); 1738} 1739 1740/* 1741 * Return the number of managed mappings to the given physical page 1742 * that are wired. 1743 */ 1744int 1745moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1746{ 1747 struct pvo_entry *pvo; 1748 int count; 1749 1750 count = 0; 1751 if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0) 1752 return (count); 1753 vm_page_lock_queues(); 1754 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1755 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1756 count++; 1757 vm_page_unlock_queues(); 1758 return (count); 1759} 1760 1761static u_int moea64_vsidcontext; 1762 1763void 1764moea64_pinit(mmu_t mmu, pmap_t pmap) 1765{ 1766 int i, mask; 1767 u_int entropy; 1768 1769 PMAP_LOCK_INIT(pmap); 1770 1771 entropy = 0; 1772 __asm __volatile("mftb %0" : "=r"(entropy)); 1773 1774 if (pmap_bootstrapped) 1775 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap); 1776 else 1777 pmap->pmap_phys = pmap; 1778 1779 /* 1780 * Allocate some segment registers for this pmap. 1781 */ 1782 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1783 u_int hash, n; 1784 1785 /* 1786 * Create a new value by mutiplying by a prime and adding in 1787 * entropy from the timebase register. This is to make the 1788 * VSID more random so that the PT hash function collides 1789 * less often. (Note that the prime casues gcc to do shifts 1790 * instead of a multiply.) 1791 */ 1792 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1793 hash = moea64_vsidcontext & (NPMAPS - 1); 1794 if (hash == 0) /* 0 is special, avoid it */ 1795 continue; 1796 n = hash >> 5; 1797 mask = 1 << (hash & (VSID_NBPW - 1)); 1798 hash = (moea64_vsidcontext & 0xfffff); 1799 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1800 /* anything free in this bucket? */ 1801 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1802 entropy = (moea64_vsidcontext >> 20); 1803 continue; 1804 } 1805 i = ffs(~moea64_vsid_bitmap[i]) - 1; 1806 mask = 1 << i; 1807 hash &= 0xfffff & ~(VSID_NBPW - 1); 1808 hash |= i; 1809 } 1810 moea64_vsid_bitmap[n] |= mask; 1811 for (i = 0; i < 16; i++) { 1812 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1813 } 1814 return; 1815 } 1816 1817 panic("moea64_pinit: out of segments"); 1818} 1819 1820/* 1821 * Initialize the pmap associated with process 0. 1822 */ 1823void 1824moea64_pinit0(mmu_t mmu, pmap_t pm) 1825{ 1826 moea64_pinit(mmu, pm); 1827 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1828} 1829 1830/* 1831 * Set the physical protection on the specified range of this map as requested. 1832 */ 1833void 1834moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1835 vm_prot_t prot) 1836{ 1837 struct pvo_entry *pvo; 1838 struct lpte *pt; 1839 int pteidx; 1840 1841 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1842 eva, prot); 1843 1844 1845 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1846 ("moea64_protect: non current pmap")); 1847 1848 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1849 moea64_remove(mmu, pm, sva, eva); 1850 return; 1851 } 1852 1853 vm_page_lock_queues(); 1854 PMAP_LOCK(pm); 1855 for (; sva < eva; sva += PAGE_SIZE) { 1856 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1857 if (pvo == NULL) 1858 continue; 1859 1860 /* 1861 * Grab the PTE pointer before we diddle with the cached PTE 1862 * copy. 1863 */ 1864 LOCK_TABLE(); 1865 pt = moea64_pvo_to_pte(pvo, pteidx); 1866 1867 /* 1868 * Change the protection of the page. 1869 */ 1870 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1871 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1872 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1873 if ((prot & VM_PROT_EXECUTE) == 0) 1874 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1875 1876 /* 1877 * If the PVO is in the page table, update that pte as well. 1878 */ 1879 if (pt != NULL) { 1880 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1881 pvo->pvo_pmap, PVO_VADDR(pvo)); 1882 if ((pvo->pvo_pte.lpte.pte_lo & 1883 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1884 moea64_syncicache(pm, sva, 1885 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1886 PAGE_SIZE); 1887 } 1888 } 1889 UNLOCK_TABLE(); 1890 } 1891 vm_page_unlock_queues(); 1892 PMAP_UNLOCK(pm); 1893} 1894 1895/* 1896 * Map a list of wired pages into kernel virtual address space. This is 1897 * intended for temporary mappings which do not need page modification or 1898 * references recorded. Existing mappings in the region are overwritten. 1899 */ 1900void 1901moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1902{ 1903 while (count-- > 0) { 1904 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1905 va += PAGE_SIZE; 1906 m++; 1907 } 1908} 1909 1910/* 1911 * Remove page mappings from kernel virtual address space. Intended for 1912 * temporary mappings entered by moea64_qenter. 1913 */ 1914void 1915moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1916{ 1917 while (count-- > 0) { 1918 moea64_kremove(mmu, va); 1919 va += PAGE_SIZE; 1920 } 1921} 1922 1923void 1924moea64_release(mmu_t mmu, pmap_t pmap) 1925{ 1926 int idx, mask; 1927 1928 /* 1929 * Free segment register's VSID 1930 */ 1931 if (pmap->pm_sr[0] == 0) 1932 panic("moea64_release"); 1933 1934 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1935 mask = 1 << (idx % VSID_NBPW); 1936 idx /= VSID_NBPW; 1937 moea64_vsid_bitmap[idx] &= ~mask; 1938 PMAP_LOCK_DESTROY(pmap); 1939} 1940 1941/* 1942 * Remove the given range of addresses from the specified map. 1943 */ 1944void 1945moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1946{ 1947 struct pvo_entry *pvo; 1948 int pteidx; 1949 1950 vm_page_lock_queues(); 1951 PMAP_LOCK(pm); 1952 for (; sva < eva; sva += PAGE_SIZE) { 1953 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1954 if (pvo != NULL) { 1955 moea64_pvo_remove(pvo, pteidx); 1956 } 1957 } 1958 vm_page_unlock_queues(); 1959 PMAP_UNLOCK(pm); 1960} 1961 1962/* 1963 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 1964 * will reflect changes in pte's back to the vm_page. 1965 */ 1966void 1967moea64_remove_all(mmu_t mmu, vm_page_t m) 1968{ 1969 struct pvo_head *pvo_head; 1970 struct pvo_entry *pvo, *next_pvo; 1971 pmap_t pmap; 1972 1973 vm_page_lock_queues(); 1974 pvo_head = vm_page_to_pvoh(m); 1975 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1976 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1977 1978 MOEA_PVO_CHECK(pvo); /* sanity check */ 1979 pmap = pvo->pvo_pmap; 1980 PMAP_LOCK(pmap); 1981 moea64_pvo_remove(pvo, -1); 1982 PMAP_UNLOCK(pmap); 1983 } 1984 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 1985 moea64_attr_clear(m, LPTE_CHG); 1986 vm_page_dirty(m); 1987 } 1988 vm_page_flag_clear(m, PG_WRITEABLE); 1989 vm_page_unlock_queues(); 1990} 1991 1992/* 1993 * Allocate a physical page of memory directly from the phys_avail map. 1994 * Can only be called from moea64_bootstrap before avail start and end are 1995 * calculated. 1996 */ 1997static vm_offset_t 1998moea64_bootstrap_alloc(vm_size_t size, u_int align) 1999{ 2000 vm_offset_t s, e; 2001 int i, j; 2002 2003 size = round_page(size); 2004 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2005 if (align != 0) 2006 s = (phys_avail[i] + align - 1) & ~(align - 1); 2007 else 2008 s = phys_avail[i]; 2009 e = s + size; 2010 2011 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2012 continue; 2013 2014 if (s == phys_avail[i]) { 2015 phys_avail[i] += size; 2016 } else if (e == phys_avail[i + 1]) { 2017 phys_avail[i + 1] -= size; 2018 } else { 2019 for (j = phys_avail_count * 2; j > i; j -= 2) { 2020 phys_avail[j] = phys_avail[j - 2]; 2021 phys_avail[j + 1] = phys_avail[j - 1]; 2022 } 2023 2024 phys_avail[i + 3] = phys_avail[i + 1]; 2025 phys_avail[i + 1] = s; 2026 phys_avail[i + 2] = e; 2027 phys_avail_count++; 2028 } 2029 2030 return (s); 2031 } 2032 panic("moea64_bootstrap_alloc: could not allocate memory"); 2033} 2034 2035static void 2036tlbia(void) 2037{ 2038 vm_offset_t i; 2039 register_t msr, scratch; 2040 2041 for (i = 0; i < 0xFF000; i += 0x00001000) { 2042 __asm __volatile("\ 2043 mfmsr %0; \ 2044 mr %1, %0; \ 2045 insrdi %1,%3,1,0; \ 2046 mtmsrd %1; \ 2047 ptesync; \ 2048 \ 2049 tlbiel %2; \ 2050 \ 2051 mtmsrd %0; \ 2052 eieio; \ 2053 tlbsync; \ 2054 ptesync;" 2055 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 2056 } 2057} 2058 2059static int 2060moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 2061 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 2062{ 2063 struct pvo_entry *pvo; 2064 uint64_t vsid; 2065 int first; 2066 u_int ptegidx; 2067 int i; 2068 int bootstrap; 2069 2070 /* 2071 * One nasty thing that can happen here is that the UMA calls to 2072 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2073 * which calls UMA... 2074 * 2075 * We break the loop by detecting recursion and allocating out of 2076 * the bootstrap pool. 2077 */ 2078 2079 moea64_pvo_enter_calls++; 2080 first = 0; 2081 bootstrap = (flags & PVO_BOOTSTRAP); 2082 2083 if (!moea64_initialized) 2084 bootstrap = 1; 2085 2086 /* 2087 * Compute the PTE Group index. 2088 */ 2089 va &= ~ADDR_POFF; 2090 vsid = va_to_vsid(pm, va); 2091 ptegidx = va_to_pteg(vsid, va); 2092 2093 /* 2094 * Remove any existing mapping for this page. Reuse the pvo entry if 2095 * there is a mapping. 2096 */ 2097 LOCK_TABLE(); 2098 2099 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2100 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2101 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2102 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2103 (pte_lo & LPTE_PP)) { 2104 UNLOCK_TABLE(); 2105 return (0); 2106 } 2107 moea64_pvo_remove(pvo, -1); 2108 break; 2109 } 2110 } 2111 2112 /* 2113 * If we aren't overwriting a mapping, try to allocate. 2114 */ 2115 if (bootstrap) { 2116 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2117 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d", 2118 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2119 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2120 } 2121 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2122 moea64_bpvo_pool_index++; 2123 bootstrap = 1; 2124 } else { 2125 /* 2126 * Note: drop the table lock around the UMA allocation in 2127 * case the UMA allocator needs to manipulate the page 2128 * table. The mapping we are working with is already 2129 * protected by the PMAP lock. 2130 */ 2131 UNLOCK_TABLE(); 2132 pvo = uma_zalloc(zone, M_NOWAIT); 2133 LOCK_TABLE(); 2134 } 2135 2136 if (pvo == NULL) { 2137 UNLOCK_TABLE(); 2138 return (ENOMEM); 2139 } 2140 2141 moea64_pvo_entries++; 2142 pvo->pvo_vaddr = va; 2143 pvo->pvo_pmap = pm; 2144 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2145 pvo->pvo_vaddr &= ~ADDR_POFF; 2146 2147 if (!(flags & VM_PROT_EXECUTE)) 2148 pte_lo |= LPTE_NOEXEC; 2149 if (flags & PVO_WIRED) 2150 pvo->pvo_vaddr |= PVO_WIRED; 2151 if (pvo_head != &moea64_pvo_kunmanaged) 2152 pvo->pvo_vaddr |= PVO_MANAGED; 2153 if (bootstrap) 2154 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2155 if (flags & PVO_FAKE) 2156 pvo->pvo_vaddr |= PVO_FAKE; 2157 2158 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2159 (uint64_t)(pa) | pte_lo); 2160 2161 /* 2162 * Remember if the list was empty and therefore will be the first 2163 * item. 2164 */ 2165 if (LIST_FIRST(pvo_head) == NULL) 2166 first = 1; 2167 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2168 2169 if (pvo->pvo_vaddr & PVO_WIRED) 2170 pm->pm_stats.wired_count++; 2171 pm->pm_stats.resident_count++; 2172 2173 /* 2174 * We hope this succeeds but it isn't required. 2175 */ 2176 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2177 if (i >= 0) { 2178 PVO_PTEGIDX_SET(pvo, i); 2179 } else { 2180 panic("moea64_pvo_enter: overflow"); 2181 moea64_pte_overflow++; 2182 } 2183 2184 if (pm == kernel_pmap) 2185 isync(); 2186 2187 UNLOCK_TABLE(); 2188 2189 return (first ? ENOENT : 0); 2190} 2191 2192static void 2193moea64_pvo_remove(struct pvo_entry *pvo, int pteidx) 2194{ 2195 struct lpte *pt; 2196 2197 /* 2198 * If there is an active pte entry, we need to deactivate it (and 2199 * save the ref & cfg bits). 2200 */ 2201 LOCK_TABLE(); 2202 pt = moea64_pvo_to_pte(pvo, pteidx); 2203 if (pt != NULL) { 2204 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap, 2205 PVO_VADDR(pvo)); 2206 PVO_PTEGIDX_CLR(pvo); 2207 } else { 2208 moea64_pte_overflow--; 2209 } 2210 2211 /* 2212 * Update our statistics. 2213 */ 2214 pvo->pvo_pmap->pm_stats.resident_count--; 2215 if (pvo->pvo_vaddr & PVO_WIRED) 2216 pvo->pvo_pmap->pm_stats.wired_count--; 2217 2218 /* 2219 * Save the REF/CHG bits into their cache if the page is managed. 2220 */ 2221 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2222 struct vm_page *pg; 2223 2224 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2225 if (pg != NULL) { 2226 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2227 (LPTE_REF | LPTE_CHG)); 2228 } 2229 } 2230 2231 /* 2232 * Remove this PVO from the PV list. 2233 */ 2234 LIST_REMOVE(pvo, pvo_vlink); 2235 2236 /* 2237 * Remove this from the overflow list and return it to the pool 2238 * if we aren't going to reuse it. 2239 */ 2240 LIST_REMOVE(pvo, pvo_olink); 2241 UNLOCK_TABLE(); 2242 2243 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2244 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2245 moea64_upvo_zone, pvo); 2246 2247 moea64_pvo_entries--; 2248 moea64_pvo_remove_calls++; 2249} 2250 2251static __inline int 2252moea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2253{ 2254 2255 /* 2256 * We can find the actual pte entry without searching by grabbing 2257 * the PTEG index from 3 unused bits in pvo_vaddr and by 2258 * noticing the HID bit. 2259 */ 2260 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2261 ptegidx ^= moea64_pteg_mask; 2262 2263 return ((ptegidx << 3) | PVO_PTEGIDX_GET(pvo)); 2264} 2265 2266static struct pvo_entry * 2267moea64_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2268{ 2269 struct pvo_entry *pvo; 2270 int ptegidx; 2271 uint64_t vsid; 2272 2273 va &= ~ADDR_POFF; 2274 vsid = va_to_vsid(pm, va); 2275 ptegidx = va_to_pteg(vsid, va); 2276 2277 LOCK_TABLE(); 2278 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2279 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2280 if (pteidx_p) 2281 *pteidx_p = moea64_pvo_pte_index(pvo, ptegidx); 2282 break; 2283 } 2284 } 2285 UNLOCK_TABLE(); 2286 2287 return (pvo); 2288} 2289 2290static struct lpte * 2291moea64_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2292{ 2293 struct lpte *pt; 2294 2295 /* 2296 * If we haven't been supplied the ptegidx, calculate it. 2297 */ 2298 if (pteidx == -1) { 2299 int ptegidx; 2300 uint64_t vsid; 2301 2302 vsid = va_to_vsid(pvo->pvo_pmap, PVO_VADDR(pvo)); 2303 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo)); 2304 pteidx = moea64_pvo_pte_index(pvo, ptegidx); 2305 } 2306 2307 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2308 2309 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2310 !PVO_PTEGIDX_ISSET(pvo)) { 2311 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2312 "valid pte index", pvo); 2313 } 2314 2315 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2316 PVO_PTEGIDX_ISSET(pvo)) { 2317 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2318 "pvo but no valid pte", pvo); 2319 } 2320 2321 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2322 LPTE_VALID) { 2323 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2324 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2325 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2326 } 2327 2328 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2329 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) { 2330 panic("moea64_pvo_to_pte: pvo %p pte does not match " 2331 "pte %p in moea64_pteg_table difference is %#x", 2332 pvo, pt, 2333 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2334 } 2335 2336 ASSERT_TABLE_LOCK(); 2337 return (pt); 2338 } 2339 2340 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2341 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2342 "moea64_pteg_table but valid in pvo", pvo, pt); 2343 } 2344 2345 return (NULL); 2346} 2347 2348static int 2349moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2350{ 2351 struct lpte *pt; 2352 int i; 2353 2354 ASSERT_TABLE_LOCK(); 2355 2356 /* 2357 * First try primary hash. 2358 */ 2359 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2360 if ((pt->pte_hi & LPTE_VALID) == 0 && 2361 (pt->pte_hi & LPTE_LOCKED) == 0) { 2362 pvo_pt->pte_hi &= ~LPTE_HID; 2363 moea64_pte_set(pt, pvo_pt); 2364 return (i); 2365 } 2366 } 2367 2368 /* 2369 * Now try secondary hash. 2370 */ 2371 ptegidx ^= moea64_pteg_mask; 2372 2373 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2374 if ((pt->pte_hi & LPTE_VALID) == 0 && 2375 (pt->pte_hi & LPTE_LOCKED) == 0) { 2376 pvo_pt->pte_hi |= LPTE_HID; 2377 moea64_pte_set(pt, pvo_pt); 2378 return (i); 2379 } 2380 } 2381 2382 panic("moea64_pte_insert: overflow"); 2383 return (-1); 2384} 2385 2386static boolean_t 2387moea64_query_bit(vm_page_t m, u_int64_t ptebit) 2388{ 2389 struct pvo_entry *pvo; 2390 struct lpte *pt; 2391 2392 if (moea64_attr_fetch(m) & ptebit) 2393 return (TRUE); 2394 2395 vm_page_lock_queues(); 2396 2397 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2398 MOEA_PVO_CHECK(pvo); /* sanity check */ 2399 2400 /* 2401 * See if we saved the bit off. If so, cache it and return 2402 * success. 2403 */ 2404 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2405 moea64_attr_save(m, ptebit); 2406 MOEA_PVO_CHECK(pvo); /* sanity check */ 2407 vm_page_unlock_queues(); 2408 return (TRUE); 2409 } 2410 } 2411 2412 /* 2413 * No luck, now go through the hard part of looking at the PTEs 2414 * themselves. Sync so that any pending REF/CHG bits are flushed to 2415 * the PTEs. 2416 */ 2417 SYNC(); 2418 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2419 MOEA_PVO_CHECK(pvo); /* sanity check */ 2420 2421 /* 2422 * See if this pvo has a valid PTE. if so, fetch the 2423 * REF/CHG bits from the valid PTE. If the appropriate 2424 * ptebit is set, cache it and return success. 2425 */ 2426 LOCK_TABLE(); 2427 pt = moea64_pvo_to_pte(pvo, -1); 2428 if (pt != NULL) { 2429 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2430 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2431 UNLOCK_TABLE(); 2432 2433 moea64_attr_save(m, ptebit); 2434 MOEA_PVO_CHECK(pvo); /* sanity check */ 2435 vm_page_unlock_queues(); 2436 return (TRUE); 2437 } 2438 } 2439 UNLOCK_TABLE(); 2440 } 2441 2442 vm_page_unlock_queues(); 2443 return (FALSE); 2444} 2445 2446static u_int 2447moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit) 2448{ 2449 u_int count; 2450 struct pvo_entry *pvo; 2451 struct lpte *pt; 2452 uint64_t rv; 2453 2454 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2455 2456 /* 2457 * Clear the cached value. 2458 */ 2459 rv = moea64_attr_fetch(m); 2460 moea64_attr_clear(m, ptebit); 2461 2462 /* 2463 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2464 * we can reset the right ones). note that since the pvo entries and 2465 * list heads are accessed via BAT0 and are never placed in the page 2466 * table, we don't have to worry about further accesses setting the 2467 * REF/CHG bits. 2468 */ 2469 SYNC(); 2470 2471 /* 2472 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2473 * valid pte clear the ptebit from the valid pte. 2474 */ 2475 count = 0; 2476 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2477 MOEA_PVO_CHECK(pvo); /* sanity check */ 2478 2479 LOCK_TABLE(); 2480 pt = moea64_pvo_to_pte(pvo, -1); 2481 if (pt != NULL) { 2482 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2483 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2484 count++; 2485 moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit); 2486 } 2487 } 2488 rv |= pvo->pvo_pte.lpte.pte_lo; 2489 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2490 MOEA_PVO_CHECK(pvo); /* sanity check */ 2491 UNLOCK_TABLE(); 2492 } 2493 2494 if (origbit != NULL) { 2495 *origbit = rv; 2496 } 2497 2498 return (count); 2499} 2500 2501boolean_t 2502moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2503{ 2504 struct pvo_entry *pvo; 2505 vm_offset_t ppa; 2506 int error = 0; 2507 2508 PMAP_LOCK(kernel_pmap); 2509 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2510 pvo = moea64_pvo_find_va(kernel_pmap, ppa, NULL); 2511 if (pvo == NULL || 2512 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2513 error = EFAULT; 2514 break; 2515 } 2516 } 2517 PMAP_UNLOCK(kernel_pmap); 2518 2519 return (error); 2520} 2521 2522/* 2523 * Map a set of physical memory pages into the kernel virtual 2524 * address space. Return a pointer to where it is mapped. This 2525 * routine is intended to be used for mapping device memory, 2526 * NOT real memory. 2527 */ 2528void * 2529moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2530{ 2531 vm_offset_t va, tmpva, ppa, offset; 2532 2533 ppa = trunc_page(pa); 2534 offset = pa & PAGE_MASK; 2535 size = roundup(offset + size, PAGE_SIZE); 2536 2537 va = kmem_alloc_nofault(kernel_map, size); 2538 2539 if (!va) 2540 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2541 2542 for (tmpva = va; size > 0;) { 2543 moea64_kenter(mmu, tmpva, ppa); 2544 size -= PAGE_SIZE; 2545 tmpva += PAGE_SIZE; 2546 ppa += PAGE_SIZE; 2547 } 2548 2549 return ((void *)(va + offset)); 2550} 2551 2552void 2553moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2554{ 2555 vm_offset_t base, offset; 2556 2557 base = trunc_page(va); 2558 offset = va & PAGE_MASK; 2559 size = roundup(offset + size, PAGE_SIZE); 2560 2561 kmem_free(kernel_map, base, size); 2562} 2563 2564static void 2565moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2566{ 2567 struct pvo_entry *pvo; 2568 vm_offset_t lim; 2569 vm_paddr_t pa; 2570 vm_size_t len; 2571 2572 PMAP_LOCK(pm); 2573 while (sz > 0) { 2574 lim = round_page(va); 2575 len = MIN(lim - va, sz); 2576 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2577 if (pvo != NULL) { 2578 pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) | 2579 (va & ADDR_POFF); 2580 moea64_syncicache(pm, va, pa, len); 2581 } 2582 va += len; 2583 sz -= len; 2584 } 2585 PMAP_UNLOCK(pm); 2586} 2587