pmap.c revision 324400
1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: stable/11/sys/powerpc/booke/pmap.c 324400 2017-10-07 21:13:54Z alc $"); 53 54#include "opt_kstack_pages.h" 55 56#include <sys/param.h> 57#include <sys/conf.h> 58#include <sys/malloc.h> 59#include <sys/ktr.h> 60#include <sys/proc.h> 61#include <sys/user.h> 62#include <sys/queue.h> 63#include <sys/systm.h> 64#include <sys/kernel.h> 65#include <sys/kerneldump.h> 66#include <sys/linker.h> 67#include <sys/msgbuf.h> 68#include <sys/lock.h> 69#include <sys/mutex.h> 70#include <sys/rwlock.h> 71#include <sys/sched.h> 72#include <sys/smp.h> 73#include <sys/vmmeter.h> 74 75#include <vm/vm.h> 76#include <vm/vm_page.h> 77#include <vm/vm_kern.h> 78#include <vm/vm_pageout.h> 79#include <vm/vm_extern.h> 80#include <vm/vm_object.h> 81#include <vm/vm_param.h> 82#include <vm/vm_map.h> 83#include <vm/vm_pager.h> 84#include <vm/uma.h> 85 86#include <machine/cpu.h> 87#include <machine/pcb.h> 88#include <machine/platform.h> 89 90#include <machine/tlb.h> 91#include <machine/spr.h> 92#include <machine/md_var.h> 93#include <machine/mmuvar.h> 94#include <machine/pmap.h> 95#include <machine/pte.h> 96 97#include "mmu_if.h" 98 99#define SPARSE_MAPDEV 100#ifdef DEBUG 101#define debugf(fmt, args...) printf(fmt, ##args) 102#else 103#define debugf(fmt, args...) 104#endif 105 106#define TODO panic("%s: not implemented", __func__); 107 108extern unsigned char _etext[]; 109extern unsigned char _end[]; 110 111extern uint32_t *bootinfo; 112 113vm_paddr_t kernload; 114vm_offset_t kernstart; 115vm_size_t kernsize; 116 117/* Message buffer and tables. */ 118static vm_offset_t data_start; 119static vm_size_t data_end; 120 121/* Phys/avail memory regions. */ 122static struct mem_region *availmem_regions; 123static int availmem_regions_sz; 124static struct mem_region *physmem_regions; 125static int physmem_regions_sz; 126 127/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 128static vm_offset_t zero_page_va; 129static struct mtx zero_page_mutex; 130 131static struct mtx tlbivax_mutex; 132 133/* 134 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 135 * by idle thred only, no lock required. 136 */ 137static vm_offset_t zero_page_idle_va; 138 139/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 140static vm_offset_t copy_page_src_va; 141static vm_offset_t copy_page_dst_va; 142static struct mtx copy_page_mutex; 143 144/**************************************************************************/ 145/* PMAP */ 146/**************************************************************************/ 147 148static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 149 vm_prot_t, u_int flags, int8_t psind); 150 151unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 152unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 153 154/* 155 * If user pmap is processed with mmu_booke_remove and the resident count 156 * drops to 0, there are no more pages to remove, so we need not continue. 157 */ 158#define PMAP_REMOVE_DONE(pmap) \ 159 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 160 161extern int elf32_nxstack; 162 163/**************************************************************************/ 164/* TLB and TID handling */ 165/**************************************************************************/ 166 167/* Translation ID busy table */ 168static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 169 170/* 171 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 172 * core revisions and should be read from h/w registers during early config. 173 */ 174uint32_t tlb0_entries; 175uint32_t tlb0_ways; 176uint32_t tlb0_entries_per_way; 177uint32_t tlb1_entries; 178 179#define TLB0_ENTRIES (tlb0_entries) 180#define TLB0_WAYS (tlb0_ways) 181#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 182 183#define TLB1_ENTRIES (tlb1_entries) 184#define TLB1_MAXENTRIES 64 185 186static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE; 187 188static tlbtid_t tid_alloc(struct pmap *); 189static void tid_flush(tlbtid_t tid); 190 191static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 192 193static void tlb1_read_entry(tlb_entry_t *, unsigned int); 194static void tlb1_write_entry(tlb_entry_t *, unsigned int); 195static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 196static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 197 198static vm_size_t tsize2size(unsigned int); 199static unsigned int size2tsize(vm_size_t); 200static unsigned int ilog2(unsigned int); 201 202static void set_mas4_defaults(void); 203 204static inline void tlb0_flush_entry(vm_offset_t); 205static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 206 207/**************************************************************************/ 208/* Page table management */ 209/**************************************************************************/ 210 211static struct rwlock_padalign pvh_global_lock; 212 213/* Data for the pv entry allocation mechanism */ 214static uma_zone_t pvzone; 215static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 216 217#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 218 219#ifndef PMAP_SHPGPERPROC 220#define PMAP_SHPGPERPROC 200 221#endif 222 223static void ptbl_init(void); 224static struct ptbl_buf *ptbl_buf_alloc(void); 225static void ptbl_buf_free(struct ptbl_buf *); 226static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 227 228static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t); 229static void ptbl_free(mmu_t, pmap_t, unsigned int); 230static void ptbl_hold(mmu_t, pmap_t, unsigned int); 231static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 232 233static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 234static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 235static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); 236static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 237static void kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, 238 vm_offset_t pdir); 239 240static pv_entry_t pv_alloc(void); 241static void pv_free(pv_entry_t); 242static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 243static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 244 245static void booke_pmap_init_qpages(void); 246 247/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 248#define PTBL_BUFS (128 * 16) 249 250struct ptbl_buf { 251 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 252 vm_offset_t kva; /* va of mapping */ 253}; 254 255/* ptbl free list and a lock used for access synchronization. */ 256static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 257static struct mtx ptbl_buf_freelist_lock; 258 259/* Base address of kva space allocated fot ptbl bufs. */ 260static vm_offset_t ptbl_buf_pool_vabase; 261 262/* Pointer to ptbl_buf structures. */ 263static struct ptbl_buf *ptbl_bufs; 264 265#ifdef SMP 266extern tlb_entry_t __boot_tlb1[]; 267void pmap_bootstrap_ap(volatile uint32_t *); 268#endif 269 270/* 271 * Kernel MMU interface 272 */ 273static void mmu_booke_clear_modify(mmu_t, vm_page_t); 274static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 275 vm_size_t, vm_offset_t); 276static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 277static void mmu_booke_copy_pages(mmu_t, vm_page_t *, 278 vm_offset_t, vm_page_t *, vm_offset_t, int); 279static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280 vm_prot_t, u_int flags, int8_t psind); 281static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 282 vm_page_t, vm_prot_t); 283static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 284 vm_prot_t); 285static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 286static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 287 vm_prot_t); 288static void mmu_booke_init(mmu_t); 289static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 290static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 291static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 292static int mmu_booke_ts_referenced(mmu_t, vm_page_t); 293static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 294 int); 295static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 296 vm_paddr_t *); 297static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 298 vm_object_t, vm_pindex_t, vm_size_t); 299static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 300static void mmu_booke_page_init(mmu_t, vm_page_t); 301static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 302static void mmu_booke_pinit(mmu_t, pmap_t); 303static void mmu_booke_pinit0(mmu_t, pmap_t); 304static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 305 vm_prot_t); 306static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 307static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 308static void mmu_booke_release(mmu_t, pmap_t); 309static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 310static void mmu_booke_remove_all(mmu_t, vm_page_t); 311static void mmu_booke_remove_write(mmu_t, vm_page_t); 312static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 313static void mmu_booke_zero_page(mmu_t, vm_page_t); 314static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 315static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 316static void mmu_booke_activate(mmu_t, struct thread *); 317static void mmu_booke_deactivate(mmu_t, struct thread *); 318static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 319static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 320static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 321static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 322static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 323static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 324static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); 325static void mmu_booke_kremove(mmu_t, vm_offset_t); 326static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 327static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 328 vm_size_t); 329static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t, 330 void **); 331static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t, 332 void *); 333static void mmu_booke_scan_init(mmu_t); 334static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m); 335static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr); 336static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, 337 vm_size_t sz, vm_memattr_t mode); 338 339static mmu_method_t mmu_booke_methods[] = { 340 /* pmap dispatcher interface */ 341 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 342 MMUMETHOD(mmu_copy, mmu_booke_copy), 343 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 344 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 345 MMUMETHOD(mmu_enter, mmu_booke_enter), 346 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 347 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 348 MMUMETHOD(mmu_extract, mmu_booke_extract), 349 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 350 MMUMETHOD(mmu_init, mmu_booke_init), 351 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 352 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 353 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 354 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 355 MMUMETHOD(mmu_map, mmu_booke_map), 356 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 357 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 358 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 359 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 360 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 361 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 362 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 363 MMUMETHOD(mmu_protect, mmu_booke_protect), 364 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 365 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 366 MMUMETHOD(mmu_release, mmu_booke_release), 367 MMUMETHOD(mmu_remove, mmu_booke_remove), 368 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 369 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 370 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 371 MMUMETHOD(mmu_unwire, mmu_booke_unwire), 372 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 373 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 374 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 375 MMUMETHOD(mmu_activate, mmu_booke_activate), 376 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 377 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page), 378 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page), 379 380 /* Internal interfaces */ 381 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 382 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 383 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 384 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), 385 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 386 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), 387 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 388 MMUMETHOD(mmu_kremove, mmu_booke_kremove), 389 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 390 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr), 391 392 /* dumpsys() support */ 393 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 394 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 395 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init), 396 397 { 0, 0 } 398}; 399 400MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 401 402static __inline uint32_t 403tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) 404{ 405 uint32_t attrib; 406 int i; 407 408 if (ma != VM_MEMATTR_DEFAULT) { 409 switch (ma) { 410 case VM_MEMATTR_UNCACHEABLE: 411 return (MAS2_I | MAS2_G); 412 case VM_MEMATTR_WRITE_COMBINING: 413 case VM_MEMATTR_WRITE_BACK: 414 case VM_MEMATTR_PREFETCHABLE: 415 return (MAS2_I); 416 case VM_MEMATTR_WRITE_THROUGH: 417 return (MAS2_W | MAS2_M); 418 case VM_MEMATTR_CACHEABLE: 419 return (MAS2_M); 420 } 421 } 422 423 /* 424 * Assume the page is cache inhibited and access is guarded unless 425 * it's in our available memory array. 426 */ 427 attrib = _TLB_ENTRY_IO; 428 for (i = 0; i < physmem_regions_sz; i++) { 429 if ((pa >= physmem_regions[i].mr_start) && 430 (pa < (physmem_regions[i].mr_start + 431 physmem_regions[i].mr_size))) { 432 attrib = _TLB_ENTRY_MEM; 433 break; 434 } 435 } 436 437 return (attrib); 438} 439 440static inline void 441tlb_miss_lock(void) 442{ 443#ifdef SMP 444 struct pcpu *pc; 445 446 if (!smp_started) 447 return; 448 449 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 450 if (pc != pcpup) { 451 452 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 453 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 454 455 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 456 ("tlb_miss_lock: tried to lock self")); 457 458 tlb_lock(pc->pc_booke_tlb_lock); 459 460 CTR1(KTR_PMAP, "%s: locked", __func__); 461 } 462 } 463#endif 464} 465 466static inline void 467tlb_miss_unlock(void) 468{ 469#ifdef SMP 470 struct pcpu *pc; 471 472 if (!smp_started) 473 return; 474 475 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 476 if (pc != pcpup) { 477 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 478 __func__, pc->pc_cpuid); 479 480 tlb_unlock(pc->pc_booke_tlb_lock); 481 482 CTR1(KTR_PMAP, "%s: unlocked", __func__); 483 } 484 } 485#endif 486} 487 488/* Return number of entries in TLB0. */ 489static __inline void 490tlb0_get_tlbconf(void) 491{ 492 uint32_t tlb0_cfg; 493 494 tlb0_cfg = mfspr(SPR_TLB0CFG); 495 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 496 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 497 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 498} 499 500/* Return number of entries in TLB1. */ 501static __inline void 502tlb1_get_tlbconf(void) 503{ 504 uint32_t tlb1_cfg; 505 506 tlb1_cfg = mfspr(SPR_TLB1CFG); 507 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK; 508} 509 510/**************************************************************************/ 511/* Page table related */ 512/**************************************************************************/ 513 514/* Initialize pool of kva ptbl buffers. */ 515static void 516ptbl_init(void) 517{ 518 int i; 519 520 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 521 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 522 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 523 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 524 525 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 526 TAILQ_INIT(&ptbl_buf_freelist); 527 528 for (i = 0; i < PTBL_BUFS; i++) { 529 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 530 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 531 } 532} 533 534/* Get a ptbl_buf from the freelist. */ 535static struct ptbl_buf * 536ptbl_buf_alloc(void) 537{ 538 struct ptbl_buf *buf; 539 540 mtx_lock(&ptbl_buf_freelist_lock); 541 buf = TAILQ_FIRST(&ptbl_buf_freelist); 542 if (buf != NULL) 543 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 544 mtx_unlock(&ptbl_buf_freelist_lock); 545 546 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 547 548 return (buf); 549} 550 551/* Return ptbl buff to free pool. */ 552static void 553ptbl_buf_free(struct ptbl_buf *buf) 554{ 555 556 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 557 558 mtx_lock(&ptbl_buf_freelist_lock); 559 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 560 mtx_unlock(&ptbl_buf_freelist_lock); 561} 562 563/* 564 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 565 */ 566static void 567ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 568{ 569 struct ptbl_buf *pbuf; 570 571 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 572 573 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 574 575 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 576 if (pbuf->kva == (vm_offset_t)ptbl) { 577 /* Remove from pmap ptbl buf list. */ 578 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 579 580 /* Free corresponding ptbl buf. */ 581 ptbl_buf_free(pbuf); 582 break; 583 } 584} 585 586/* Allocate page table. */ 587static pte_t * 588ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) 589{ 590 vm_page_t mtbl[PTBL_PAGES]; 591 vm_page_t m; 592 struct ptbl_buf *pbuf; 593 unsigned int pidx; 594 pte_t *ptbl; 595 int i, j; 596 597 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 598 (pmap == kernel_pmap), pdir_idx); 599 600 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 601 ("ptbl_alloc: invalid pdir_idx")); 602 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 603 ("pte_alloc: valid ptbl entry exists!")); 604 605 pbuf = ptbl_buf_alloc(); 606 if (pbuf == NULL) 607 panic("pte_alloc: couldn't alloc kernel virtual memory"); 608 609 ptbl = (pte_t *)pbuf->kva; 610 611 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 612 613 /* Allocate ptbl pages, this will sleep! */ 614 for (i = 0; i < PTBL_PAGES; i++) { 615 pidx = (PTBL_PAGES * pdir_idx) + i; 616 while ((m = vm_page_alloc(NULL, pidx, 617 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 618 PMAP_UNLOCK(pmap); 619 rw_wunlock(&pvh_global_lock); 620 if (nosleep) { 621 ptbl_free_pmap_ptbl(pmap, ptbl); 622 for (j = 0; j < i; j++) 623 vm_page_free(mtbl[j]); 624 atomic_subtract_int(&vm_cnt.v_wire_count, i); 625 return (NULL); 626 } 627 VM_WAIT; 628 rw_wlock(&pvh_global_lock); 629 PMAP_LOCK(pmap); 630 } 631 mtbl[i] = m; 632 } 633 634 /* Map allocated pages into kernel_pmap. */ 635 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 636 637 /* Zero whole ptbl. */ 638 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 639 640 /* Add pbuf to the pmap ptbl bufs list. */ 641 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 642 643 return (ptbl); 644} 645 646/* Free ptbl pages and invalidate pdir entry. */ 647static void 648ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 649{ 650 pte_t *ptbl; 651 vm_paddr_t pa; 652 vm_offset_t va; 653 vm_page_t m; 654 int i; 655 656 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 657 (pmap == kernel_pmap), pdir_idx); 658 659 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 660 ("ptbl_free: invalid pdir_idx")); 661 662 ptbl = pmap->pm_pdir[pdir_idx]; 663 664 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 665 666 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 667 668 /* 669 * Invalidate the pdir entry as soon as possible, so that other CPUs 670 * don't attempt to look up the page tables we are releasing. 671 */ 672 mtx_lock_spin(&tlbivax_mutex); 673 tlb_miss_lock(); 674 675 pmap->pm_pdir[pdir_idx] = NULL; 676 677 tlb_miss_unlock(); 678 mtx_unlock_spin(&tlbivax_mutex); 679 680 for (i = 0; i < PTBL_PAGES; i++) { 681 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 682 pa = pte_vatopa(mmu, kernel_pmap, va); 683 m = PHYS_TO_VM_PAGE(pa); 684 vm_page_free_zero(m); 685 atomic_subtract_int(&vm_cnt.v_wire_count, 1); 686 mmu_booke_kremove(mmu, va); 687 } 688 689 ptbl_free_pmap_ptbl(pmap, ptbl); 690} 691 692/* 693 * Decrement ptbl pages hold count and attempt to free ptbl pages. 694 * Called when removing pte entry from ptbl. 695 * 696 * Return 1 if ptbl pages were freed. 697 */ 698static int 699ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 700{ 701 pte_t *ptbl; 702 vm_paddr_t pa; 703 vm_page_t m; 704 int i; 705 706 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 707 (pmap == kernel_pmap), pdir_idx); 708 709 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 710 ("ptbl_unhold: invalid pdir_idx")); 711 KASSERT((pmap != kernel_pmap), 712 ("ptbl_unhold: unholding kernel ptbl!")); 713 714 ptbl = pmap->pm_pdir[pdir_idx]; 715 716 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 717 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 718 ("ptbl_unhold: non kva ptbl")); 719 720 /* decrement hold count */ 721 for (i = 0; i < PTBL_PAGES; i++) { 722 pa = pte_vatopa(mmu, kernel_pmap, 723 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 724 m = PHYS_TO_VM_PAGE(pa); 725 m->wire_count--; 726 } 727 728 /* 729 * Free ptbl pages if there are no pte etries in this ptbl. 730 * wire_count has the same value for all ptbl pages, so check the last 731 * page. 732 */ 733 if (m->wire_count == 0) { 734 ptbl_free(mmu, pmap, pdir_idx); 735 736 //debugf("ptbl_unhold: e (freed ptbl)\n"); 737 return (1); 738 } 739 740 return (0); 741} 742 743/* 744 * Increment hold count for ptbl pages. This routine is used when a new pte 745 * entry is being inserted into the ptbl. 746 */ 747static void 748ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 749{ 750 vm_paddr_t pa; 751 pte_t *ptbl; 752 vm_page_t m; 753 int i; 754 755 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 756 pdir_idx); 757 758 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 759 ("ptbl_hold: invalid pdir_idx")); 760 KASSERT((pmap != kernel_pmap), 761 ("ptbl_hold: holding kernel ptbl!")); 762 763 ptbl = pmap->pm_pdir[pdir_idx]; 764 765 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 766 767 for (i = 0; i < PTBL_PAGES; i++) { 768 pa = pte_vatopa(mmu, kernel_pmap, 769 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 770 m = PHYS_TO_VM_PAGE(pa); 771 m->wire_count++; 772 } 773} 774 775/* Allocate pv_entry structure. */ 776pv_entry_t 777pv_alloc(void) 778{ 779 pv_entry_t pv; 780 781 pv_entry_count++; 782 if (pv_entry_count > pv_entry_high_water) 783 pagedaemon_wakeup(); 784 pv = uma_zalloc(pvzone, M_NOWAIT); 785 786 return (pv); 787} 788 789/* Free pv_entry structure. */ 790static __inline void 791pv_free(pv_entry_t pve) 792{ 793 794 pv_entry_count--; 795 uma_zfree(pvzone, pve); 796} 797 798 799/* Allocate and initialize pv_entry structure. */ 800static void 801pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 802{ 803 pv_entry_t pve; 804 805 //int su = (pmap == kernel_pmap); 806 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 807 // (u_int32_t)pmap, va, (u_int32_t)m); 808 809 pve = pv_alloc(); 810 if (pve == NULL) 811 panic("pv_insert: no pv entries!"); 812 813 pve->pv_pmap = pmap; 814 pve->pv_va = va; 815 816 /* add to pv_list */ 817 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 818 rw_assert(&pvh_global_lock, RA_WLOCKED); 819 820 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 821 822 //debugf("pv_insert: e\n"); 823} 824 825/* Destroy pv entry. */ 826static void 827pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 828{ 829 pv_entry_t pve; 830 831 //int su = (pmap == kernel_pmap); 832 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 833 834 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 835 rw_assert(&pvh_global_lock, RA_WLOCKED); 836 837 /* find pv entry */ 838 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 839 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 840 /* remove from pv_list */ 841 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 842 if (TAILQ_EMPTY(&m->md.pv_list)) 843 vm_page_aflag_clear(m, PGA_WRITEABLE); 844 845 /* free pv entry struct */ 846 pv_free(pve); 847 break; 848 } 849 } 850 851 //debugf("pv_remove: e\n"); 852} 853 854/* 855 * Clean pte entry, try to free page table page if requested. 856 * 857 * Return 1 if ptbl pages were freed, otherwise return 0. 858 */ 859static int 860pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 861{ 862 unsigned int pdir_idx = PDIR_IDX(va); 863 unsigned int ptbl_idx = PTBL_IDX(va); 864 vm_page_t m; 865 pte_t *ptbl; 866 pte_t *pte; 867 868 //int su = (pmap == kernel_pmap); 869 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 870 // su, (u_int32_t)pmap, va, flags); 871 872 ptbl = pmap->pm_pdir[pdir_idx]; 873 KASSERT(ptbl, ("pte_remove: null ptbl")); 874 875 pte = &ptbl[ptbl_idx]; 876 877 if (pte == NULL || !PTE_ISVALID(pte)) 878 return (0); 879 880 if (PTE_ISWIRED(pte)) 881 pmap->pm_stats.wired_count--; 882 883 /* Handle managed entry. */ 884 if (PTE_ISMANAGED(pte)) { 885 /* Get vm_page_t for mapped pte. */ 886 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 887 888 if (PTE_ISMODIFIED(pte)) 889 vm_page_dirty(m); 890 891 if (PTE_ISREFERENCED(pte)) 892 vm_page_aflag_set(m, PGA_REFERENCED); 893 894 pv_remove(pmap, va, m); 895 } 896 897 mtx_lock_spin(&tlbivax_mutex); 898 tlb_miss_lock(); 899 900 tlb0_flush_entry(va); 901 *pte = 0; 902 903 tlb_miss_unlock(); 904 mtx_unlock_spin(&tlbivax_mutex); 905 906 pmap->pm_stats.resident_count--; 907 908 if (flags & PTBL_UNHOLD) { 909 //debugf("pte_remove: e (unhold)\n"); 910 return (ptbl_unhold(mmu, pmap, pdir_idx)); 911 } 912 913 //debugf("pte_remove: e\n"); 914 return (0); 915} 916 917/* 918 * Insert PTE for a given page and virtual address. 919 */ 920static int 921pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, 922 boolean_t nosleep) 923{ 924 unsigned int pdir_idx = PDIR_IDX(va); 925 unsigned int ptbl_idx = PTBL_IDX(va); 926 pte_t *ptbl, *pte; 927 928 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 929 pmap == kernel_pmap, pmap, va); 930 931 /* Get the page table pointer. */ 932 ptbl = pmap->pm_pdir[pdir_idx]; 933 934 if (ptbl == NULL) { 935 /* Allocate page table pages. */ 936 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep); 937 if (ptbl == NULL) { 938 KASSERT(nosleep, ("nosleep and NULL ptbl")); 939 return (ENOMEM); 940 } 941 } else { 942 /* 943 * Check if there is valid mapping for requested 944 * va, if there is, remove it. 945 */ 946 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 947 if (PTE_ISVALID(pte)) { 948 pte_remove(mmu, pmap, va, PTBL_HOLD); 949 } else { 950 /* 951 * pte is not used, increment hold count 952 * for ptbl pages. 953 */ 954 if (pmap != kernel_pmap) 955 ptbl_hold(mmu, pmap, pdir_idx); 956 } 957 } 958 959 /* 960 * Insert pv_entry into pv_list for mapped page if part of managed 961 * memory. 962 */ 963 if ((m->oflags & VPO_UNMANAGED) == 0) { 964 flags |= PTE_MANAGED; 965 966 /* Create and insert pv entry. */ 967 pv_insert(pmap, va, m); 968 } 969 970 pmap->pm_stats.resident_count++; 971 972 mtx_lock_spin(&tlbivax_mutex); 973 tlb_miss_lock(); 974 975 tlb0_flush_entry(va); 976 if (pmap->pm_pdir[pdir_idx] == NULL) { 977 /* 978 * If we just allocated a new page table, hook it in 979 * the pdir. 980 */ 981 pmap->pm_pdir[pdir_idx] = ptbl; 982 } 983 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 984 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m)); 985 *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */ 986 987 tlb_miss_unlock(); 988 mtx_unlock_spin(&tlbivax_mutex); 989 return (0); 990} 991 992/* Return the pa for the given pmap/va. */ 993static vm_paddr_t 994pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 995{ 996 vm_paddr_t pa = 0; 997 pte_t *pte; 998 999 pte = pte_find(mmu, pmap, va); 1000 if ((pte != NULL) && PTE_ISVALID(pte)) 1001 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 1002 return (pa); 1003} 1004 1005/* Get a pointer to a PTE in a page table. */ 1006static pte_t * 1007pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1008{ 1009 unsigned int pdir_idx = PDIR_IDX(va); 1010 unsigned int ptbl_idx = PTBL_IDX(va); 1011 1012 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 1013 1014 if (pmap->pm_pdir[pdir_idx]) 1015 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 1016 1017 return (NULL); 1018} 1019 1020/* Set up kernel page tables. */ 1021static void 1022kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir) 1023{ 1024 int i; 1025 vm_offset_t va; 1026 pte_t *pte; 1027 1028 /* Initialize kernel pdir */ 1029 for (i = 0; i < kernel_ptbls; i++) 1030 kernel_pmap->pm_pdir[kptbl_min + i] = 1031 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1032 1033 /* 1034 * Fill in PTEs covering kernel code and data. They are not required 1035 * for address translation, as this area is covered by static TLB1 1036 * entries, but for pte_vatopa() to work correctly with kernel area 1037 * addresses. 1038 */ 1039 for (va = addr; va < data_end; va += PAGE_SIZE) { 1040 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1041 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart)); 1042 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1043 PTE_VALID | PTE_PS_4KB; 1044 } 1045} 1046 1047/**************************************************************************/ 1048/* PMAP related */ 1049/**************************************************************************/ 1050 1051/* 1052 * This is called during booke_init, before the system is really initialized. 1053 */ 1054static void 1055mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 1056{ 1057 vm_paddr_t phys_kernelend; 1058 struct mem_region *mp, *mp1; 1059 int cnt, i, j; 1060 vm_paddr_t s, e, sz; 1061 vm_paddr_t physsz, hwphyssz; 1062 u_int phys_avail_count; 1063 vm_size_t kstack0_sz; 1064 vm_offset_t kernel_pdir, kstack0; 1065 vm_paddr_t kstack0_phys; 1066 void *dpcpu; 1067 1068 debugf("mmu_booke_bootstrap: entered\n"); 1069 1070 /* Set interesting system properties */ 1071 hw_direct_map = 0; 1072 elf32_nxstack = 1; 1073 1074 /* Initialize invalidation mutex */ 1075 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 1076 1077 /* Read TLB0 size and associativity. */ 1078 tlb0_get_tlbconf(); 1079 1080 /* 1081 * Align kernel start and end address (kernel image). 1082 * Note that kernel end does not necessarily relate to kernsize. 1083 * kernsize is the size of the kernel that is actually mapped. 1084 */ 1085 kernstart = trunc_page(start); 1086 data_start = round_page(kernelend); 1087 data_end = data_start; 1088 1089 /* 1090 * Addresses of preloaded modules (like file systems) use 1091 * physical addresses. Make sure we relocate those into 1092 * virtual addresses. 1093 */ 1094 preload_addr_relocate = kernstart - kernload; 1095 1096 /* Allocate the dynamic per-cpu area. */ 1097 dpcpu = (void *)data_end; 1098 data_end += DPCPU_SIZE; 1099 1100 /* Allocate space for the message buffer. */ 1101 msgbufp = (struct msgbuf *)data_end; 1102 data_end += msgbufsize; 1103 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1104 data_end); 1105 1106 data_end = round_page(data_end); 1107 1108 /* Allocate space for ptbl_bufs. */ 1109 ptbl_bufs = (struct ptbl_buf *)data_end; 1110 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1111 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1112 data_end); 1113 1114 data_end = round_page(data_end); 1115 1116 /* Allocate PTE tables for kernel KVA. */ 1117 kernel_pdir = data_end; 1118 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 1119 PDIR_SIZE); 1120 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1121 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1122 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1123 1124 debugf(" data_end: 0x%08x\n", data_end); 1125 if (data_end - kernstart > kernsize) { 1126 kernsize += tlb1_mapin_region(kernstart + kernsize, 1127 kernload + kernsize, (data_end - kernstart) - kernsize); 1128 } 1129 data_end = kernstart + kernsize; 1130 debugf(" updated data_end: 0x%08x\n", data_end); 1131 1132 /* 1133 * Clear the structures - note we can only do it safely after the 1134 * possible additional TLB1 translations are in place (above) so that 1135 * all range up to the currently calculated 'data_end' is covered. 1136 */ 1137 dpcpu_init(dpcpu, 0); 1138 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1139 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1140 1141 /*******************************************************/ 1142 /* Set the start and end of kva. */ 1143 /*******************************************************/ 1144 virtual_avail = round_page(data_end); 1145 virtual_end = VM_MAX_KERNEL_ADDRESS; 1146 1147 /* Allocate KVA space for page zero/copy operations. */ 1148 zero_page_va = virtual_avail; 1149 virtual_avail += PAGE_SIZE; 1150 zero_page_idle_va = virtual_avail; 1151 virtual_avail += PAGE_SIZE; 1152 copy_page_src_va = virtual_avail; 1153 virtual_avail += PAGE_SIZE; 1154 copy_page_dst_va = virtual_avail; 1155 virtual_avail += PAGE_SIZE; 1156 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1157 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1158 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1159 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1160 1161 /* Initialize page zero/copy mutexes. */ 1162 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1163 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1164 1165 /* Allocate KVA space for ptbl bufs. */ 1166 ptbl_buf_pool_vabase = virtual_avail; 1167 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1168 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1169 ptbl_buf_pool_vabase, virtual_avail); 1170 1171 /* Calculate corresponding physical addresses for the kernel region. */ 1172 phys_kernelend = kernload + kernsize; 1173 debugf("kernel image and allocated data:\n"); 1174 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload); 1175 debugf(" kernstart = 0x%08x\n", kernstart); 1176 debugf(" kernsize = 0x%08x\n", kernsize); 1177 1178 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1179 panic("mmu_booke_bootstrap: phys_avail too small"); 1180 1181 /* 1182 * Remove kernel physical address range from avail regions list. Page 1183 * align all regions. Non-page aligned memory isn't very interesting 1184 * to us. Also, sort the entries for ascending addresses. 1185 */ 1186 1187 /* Retrieve phys/avail mem regions */ 1188 mem_regions(&physmem_regions, &physmem_regions_sz, 1189 &availmem_regions, &availmem_regions_sz); 1190 sz = 0; 1191 cnt = availmem_regions_sz; 1192 debugf("processing avail regions:\n"); 1193 for (mp = availmem_regions; mp->mr_size; mp++) { 1194 s = mp->mr_start; 1195 e = mp->mr_start + mp->mr_size; 1196 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e); 1197 /* Check whether this region holds all of the kernel. */ 1198 if (s < kernload && e > phys_kernelend) { 1199 availmem_regions[cnt].mr_start = phys_kernelend; 1200 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1201 e = kernload; 1202 } 1203 /* Look whether this regions starts within the kernel. */ 1204 if (s >= kernload && s < phys_kernelend) { 1205 if (e <= phys_kernelend) 1206 goto empty; 1207 s = phys_kernelend; 1208 } 1209 /* Now look whether this region ends within the kernel. */ 1210 if (e > kernload && e <= phys_kernelend) { 1211 if (s >= kernload) 1212 goto empty; 1213 e = kernload; 1214 } 1215 /* Now page align the start and size of the region. */ 1216 s = round_page(s); 1217 e = trunc_page(e); 1218 if (e < s) 1219 e = s; 1220 sz = e - s; 1221 debugf("%09jx-%09jx = %jx\n", 1222 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz); 1223 1224 /* Check whether some memory is left here. */ 1225 if (sz == 0) { 1226 empty: 1227 memmove(mp, mp + 1, 1228 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1229 cnt--; 1230 mp--; 1231 continue; 1232 } 1233 1234 /* Do an insertion sort. */ 1235 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1236 if (s < mp1->mr_start) 1237 break; 1238 if (mp1 < mp) { 1239 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1240 mp1->mr_start = s; 1241 mp1->mr_size = sz; 1242 } else { 1243 mp->mr_start = s; 1244 mp->mr_size = sz; 1245 } 1246 } 1247 availmem_regions_sz = cnt; 1248 1249 /*******************************************************/ 1250 /* Steal physical memory for kernel stack from the end */ 1251 /* of the first avail region */ 1252 /*******************************************************/ 1253 kstack0_sz = kstack_pages * PAGE_SIZE; 1254 kstack0_phys = availmem_regions[0].mr_start + 1255 availmem_regions[0].mr_size; 1256 kstack0_phys -= kstack0_sz; 1257 availmem_regions[0].mr_size -= kstack0_sz; 1258 1259 /*******************************************************/ 1260 /* Fill in phys_avail table, based on availmem_regions */ 1261 /*******************************************************/ 1262 phys_avail_count = 0; 1263 physsz = 0; 1264 hwphyssz = 0; 1265 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1266 1267 debugf("fill in phys_avail:\n"); 1268 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1269 1270 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n", 1271 (uintmax_t)availmem_regions[i].mr_start, 1272 (uintmax_t)availmem_regions[i].mr_start + 1273 availmem_regions[i].mr_size, 1274 (uintmax_t)availmem_regions[i].mr_size); 1275 1276 if (hwphyssz != 0 && 1277 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1278 debugf(" hw.physmem adjust\n"); 1279 if (physsz < hwphyssz) { 1280 phys_avail[j] = availmem_regions[i].mr_start; 1281 phys_avail[j + 1] = 1282 availmem_regions[i].mr_start + 1283 hwphyssz - physsz; 1284 physsz = hwphyssz; 1285 phys_avail_count++; 1286 } 1287 break; 1288 } 1289 1290 phys_avail[j] = availmem_regions[i].mr_start; 1291 phys_avail[j + 1] = availmem_regions[i].mr_start + 1292 availmem_regions[i].mr_size; 1293 phys_avail_count++; 1294 physsz += availmem_regions[i].mr_size; 1295 } 1296 physmem = btoc(physsz); 1297 1298 /* Calculate the last available physical address. */ 1299 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1300 ; 1301 Maxmem = powerpc_btop(phys_avail[i + 1]); 1302 1303 debugf("Maxmem = 0x%08lx\n", Maxmem); 1304 debugf("phys_avail_count = %d\n", phys_avail_count); 1305 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n", 1306 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem); 1307 1308 /*******************************************************/ 1309 /* Initialize (statically allocated) kernel pmap. */ 1310 /*******************************************************/ 1311 PMAP_LOCK_INIT(kernel_pmap); 1312 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1313 1314 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1315 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1316 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1317 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1318 1319 kernel_pte_alloc(data_end, kernstart, kernel_pdir); 1320 for (i = 0; i < MAXCPU; i++) { 1321 kernel_pmap->pm_tid[i] = TID_KERNEL; 1322 1323 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1324 tidbusy[i][TID_KERNEL] = kernel_pmap; 1325 } 1326 1327 /* Mark kernel_pmap active on all CPUs */ 1328 CPU_FILL(&kernel_pmap->pm_active); 1329 1330 /* 1331 * Initialize the global pv list lock. 1332 */ 1333 rw_init(&pvh_global_lock, "pmap pv global"); 1334 1335 /*******************************************************/ 1336 /* Final setup */ 1337 /*******************************************************/ 1338 1339 /* Enter kstack0 into kernel map, provide guard page */ 1340 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1341 thread0.td_kstack = kstack0; 1342 thread0.td_kstack_pages = kstack_pages; 1343 1344 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1345 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n", 1346 kstack0_phys, kstack0_phys + kstack0_sz); 1347 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1348 1349 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1350 for (i = 0; i < kstack_pages; i++) { 1351 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1352 kstack0 += PAGE_SIZE; 1353 kstack0_phys += PAGE_SIZE; 1354 } 1355 1356 pmap_bootstrapped = 1; 1357 1358 debugf("virtual_avail = %08x\n", virtual_avail); 1359 debugf("virtual_end = %08x\n", virtual_end); 1360 1361 debugf("mmu_booke_bootstrap: exit\n"); 1362} 1363 1364#ifdef SMP 1365 void 1366tlb1_ap_prep(void) 1367{ 1368 tlb_entry_t *e, tmp; 1369 unsigned int i; 1370 1371 /* Prepare TLB1 image for AP processors */ 1372 e = __boot_tlb1; 1373 for (i = 0; i < TLB1_ENTRIES; i++) { 1374 tlb1_read_entry(&tmp, i); 1375 1376 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED)) 1377 memcpy(e++, &tmp, sizeof(tmp)); 1378 } 1379} 1380 1381void 1382pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1383{ 1384 int i; 1385 1386 /* 1387 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1388 * have the snapshot of its contents in the s/w __boot_tlb1[] table 1389 * created by tlb1_ap_prep(), so use these values directly to 1390 * (re)program AP's TLB1 hardware. 1391 * 1392 * Start at index 1 because index 0 has the kernel map. 1393 */ 1394 for (i = 1; i < TLB1_ENTRIES; i++) { 1395 if (__boot_tlb1[i].mas1 & MAS1_VALID) 1396 tlb1_write_entry(&__boot_tlb1[i], i); 1397 } 1398 1399 set_mas4_defaults(); 1400} 1401#endif 1402 1403static void 1404booke_pmap_init_qpages(void) 1405{ 1406 struct pcpu *pc; 1407 int i; 1408 1409 CPU_FOREACH(i) { 1410 pc = pcpu_find(i); 1411 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE); 1412 if (pc->pc_qmap_addr == 0) 1413 panic("pmap_init_qpages: unable to allocate KVA"); 1414 } 1415} 1416 1417SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL); 1418 1419/* 1420 * Get the physical page address for the given pmap/virtual address. 1421 */ 1422static vm_paddr_t 1423mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1424{ 1425 vm_paddr_t pa; 1426 1427 PMAP_LOCK(pmap); 1428 pa = pte_vatopa(mmu, pmap, va); 1429 PMAP_UNLOCK(pmap); 1430 1431 return (pa); 1432} 1433 1434/* 1435 * Extract the physical page address associated with the given 1436 * kernel virtual address. 1437 */ 1438static vm_paddr_t 1439mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1440{ 1441 tlb_entry_t e; 1442 int i; 1443 1444 /* Check TLB1 mappings */ 1445 for (i = 0; i < TLB1_ENTRIES; i++) { 1446 tlb1_read_entry(&e, i); 1447 if (!(e.mas1 & MAS1_VALID)) 1448 continue; 1449 if (va >= e.virt && va < e.virt + e.size) 1450 return (e.phys + (va - e.virt)); 1451 } 1452 1453 return (pte_vatopa(mmu, kernel_pmap, va)); 1454} 1455 1456/* 1457 * Initialize the pmap module. 1458 * Called by vm_init, to initialize any structures that the pmap 1459 * system needs to map virtual memory. 1460 */ 1461static void 1462mmu_booke_init(mmu_t mmu) 1463{ 1464 int shpgperproc = PMAP_SHPGPERPROC; 1465 1466 /* 1467 * Initialize the address space (zone) for the pv entries. Set a 1468 * high water mark so that the system can recover from excessive 1469 * numbers of pv entries. 1470 */ 1471 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1472 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1473 1474 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1475 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1476 1477 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1478 pv_entry_high_water = 9 * (pv_entry_max / 10); 1479 1480 uma_zone_reserve_kva(pvzone, pv_entry_max); 1481 1482 /* Pre-fill pvzone with initial number of pv entries. */ 1483 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1484 1485 /* Initialize ptbl allocation. */ 1486 ptbl_init(); 1487} 1488 1489/* 1490 * Map a list of wired pages into kernel virtual address space. This is 1491 * intended for temporary mappings which do not need page modification or 1492 * references recorded. Existing mappings in the region are overwritten. 1493 */ 1494static void 1495mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1496{ 1497 vm_offset_t va; 1498 1499 va = sva; 1500 while (count-- > 0) { 1501 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1502 va += PAGE_SIZE; 1503 m++; 1504 } 1505} 1506 1507/* 1508 * Remove page mappings from kernel virtual address space. Intended for 1509 * temporary mappings entered by mmu_booke_qenter. 1510 */ 1511static void 1512mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1513{ 1514 vm_offset_t va; 1515 1516 va = sva; 1517 while (count-- > 0) { 1518 mmu_booke_kremove(mmu, va); 1519 va += PAGE_SIZE; 1520 } 1521} 1522 1523/* 1524 * Map a wired page into kernel virtual address space. 1525 */ 1526static void 1527mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1528{ 1529 1530 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1531} 1532 1533static void 1534mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1535{ 1536 uint32_t flags; 1537 pte_t *pte; 1538 1539 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1540 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1541 1542 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1543 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT; 1544 flags |= PTE_PS_4KB; 1545 1546 pte = pte_find(mmu, kernel_pmap, va); 1547 1548 mtx_lock_spin(&tlbivax_mutex); 1549 tlb_miss_lock(); 1550 1551 if (PTE_ISVALID(pte)) { 1552 1553 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1554 1555 /* Flush entry from TLB0 */ 1556 tlb0_flush_entry(va); 1557 } 1558 1559 *pte = PTE_RPN_FROM_PA(pa) | flags; 1560 1561 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1562 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1563 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1564 1565 /* Flush the real memory from the instruction cache. */ 1566 if ((flags & (PTE_I | PTE_G)) == 0) 1567 __syncicache((void *)va, PAGE_SIZE); 1568 1569 tlb_miss_unlock(); 1570 mtx_unlock_spin(&tlbivax_mutex); 1571} 1572 1573/* 1574 * Remove a page from kernel page table. 1575 */ 1576static void 1577mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1578{ 1579 pte_t *pte; 1580 1581 CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va); 1582 1583 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1584 (va <= VM_MAX_KERNEL_ADDRESS)), 1585 ("mmu_booke_kremove: invalid va")); 1586 1587 pte = pte_find(mmu, kernel_pmap, va); 1588 1589 if (!PTE_ISVALID(pte)) { 1590 1591 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1592 1593 return; 1594 } 1595 1596 mtx_lock_spin(&tlbivax_mutex); 1597 tlb_miss_lock(); 1598 1599 /* Invalidate entry in TLB0, update PTE. */ 1600 tlb0_flush_entry(va); 1601 *pte = 0; 1602 1603 tlb_miss_unlock(); 1604 mtx_unlock_spin(&tlbivax_mutex); 1605} 1606 1607/* 1608 * Initialize pmap associated with process 0. 1609 */ 1610static void 1611mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1612{ 1613 1614 PMAP_LOCK_INIT(pmap); 1615 mmu_booke_pinit(mmu, pmap); 1616 PCPU_SET(curpmap, pmap); 1617} 1618 1619/* 1620 * Initialize a preallocated and zeroed pmap structure, 1621 * such as one in a vmspace structure. 1622 */ 1623static void 1624mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1625{ 1626 int i; 1627 1628 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1629 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1630 1631 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1632 1633 for (i = 0; i < MAXCPU; i++) 1634 pmap->pm_tid[i] = TID_NONE; 1635 CPU_ZERO(&kernel_pmap->pm_active); 1636 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1637 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1638 TAILQ_INIT(&pmap->pm_ptbl_list); 1639} 1640 1641/* 1642 * Release any resources held by the given physical map. 1643 * Called when a pmap initialized by mmu_booke_pinit is being released. 1644 * Should only be called if the map contains no valid mappings. 1645 */ 1646static void 1647mmu_booke_release(mmu_t mmu, pmap_t pmap) 1648{ 1649 1650 KASSERT(pmap->pm_stats.resident_count == 0, 1651 ("pmap_release: pmap resident count %ld != 0", 1652 pmap->pm_stats.resident_count)); 1653} 1654 1655/* 1656 * Insert the given physical page at the specified virtual address in the 1657 * target physical map with the protection requested. If specified the page 1658 * will be wired down. 1659 */ 1660static int 1661mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1662 vm_prot_t prot, u_int flags, int8_t psind) 1663{ 1664 int error; 1665 1666 rw_wlock(&pvh_global_lock); 1667 PMAP_LOCK(pmap); 1668 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind); 1669 rw_wunlock(&pvh_global_lock); 1670 PMAP_UNLOCK(pmap); 1671 return (error); 1672} 1673 1674static int 1675mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1676 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused) 1677{ 1678 pte_t *pte; 1679 vm_paddr_t pa; 1680 uint32_t flags; 1681 int error, su, sync; 1682 1683 pa = VM_PAGE_TO_PHYS(m); 1684 su = (pmap == kernel_pmap); 1685 sync = 0; 1686 1687 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1688 // "pa=0x%08x prot=0x%08x flags=%#x)\n", 1689 // (u_int32_t)pmap, su, pmap->pm_tid, 1690 // (u_int32_t)m, va, pa, prot, flags); 1691 1692 if (su) { 1693 KASSERT(((va >= virtual_avail) && 1694 (va <= VM_MAX_KERNEL_ADDRESS)), 1695 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1696 } else { 1697 KASSERT((va <= VM_MAXUSER_ADDRESS), 1698 ("mmu_booke_enter_locked: user pmap, non user va")); 1699 } 1700 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1701 VM_OBJECT_ASSERT_LOCKED(m->object); 1702 1703 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1704 1705 /* 1706 * If there is an existing mapping, and the physical address has not 1707 * changed, must be protection or wiring change. 1708 */ 1709 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1710 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1711 1712 /* 1713 * Before actually updating pte->flags we calculate and 1714 * prepare its new value in a helper var. 1715 */ 1716 flags = *pte; 1717 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1718 1719 /* Wiring change, just update stats. */ 1720 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) { 1721 if (!PTE_ISWIRED(pte)) { 1722 flags |= PTE_WIRED; 1723 pmap->pm_stats.wired_count++; 1724 } 1725 } else { 1726 if (PTE_ISWIRED(pte)) { 1727 flags &= ~PTE_WIRED; 1728 pmap->pm_stats.wired_count--; 1729 } 1730 } 1731 1732 if (prot & VM_PROT_WRITE) { 1733 /* Add write permissions. */ 1734 flags |= PTE_SW; 1735 if (!su) 1736 flags |= PTE_UW; 1737 1738 if ((flags & PTE_MANAGED) != 0) 1739 vm_page_aflag_set(m, PGA_WRITEABLE); 1740 } else { 1741 /* Handle modified pages, sense modify status. */ 1742 1743 /* 1744 * The PTE_MODIFIED flag could be set by underlying 1745 * TLB misses since we last read it (above), possibly 1746 * other CPUs could update it so we check in the PTE 1747 * directly rather than rely on that saved local flags 1748 * copy. 1749 */ 1750 if (PTE_ISMODIFIED(pte)) 1751 vm_page_dirty(m); 1752 } 1753 1754 if (prot & VM_PROT_EXECUTE) { 1755 flags |= PTE_SX; 1756 if (!su) 1757 flags |= PTE_UX; 1758 1759 /* 1760 * Check existing flags for execute permissions: if we 1761 * are turning execute permissions on, icache should 1762 * be flushed. 1763 */ 1764 if ((*pte & (PTE_UX | PTE_SX)) == 0) 1765 sync++; 1766 } 1767 1768 flags &= ~PTE_REFERENCED; 1769 1770 /* 1771 * The new flags value is all calculated -- only now actually 1772 * update the PTE. 1773 */ 1774 mtx_lock_spin(&tlbivax_mutex); 1775 tlb_miss_lock(); 1776 1777 tlb0_flush_entry(va); 1778 *pte &= ~PTE_FLAGS_MASK; 1779 *pte |= flags; 1780 1781 tlb_miss_unlock(); 1782 mtx_unlock_spin(&tlbivax_mutex); 1783 1784 } else { 1785 /* 1786 * If there is an existing mapping, but it's for a different 1787 * physical address, pte_enter() will delete the old mapping. 1788 */ 1789 //if ((pte != NULL) && PTE_ISVALID(pte)) 1790 // debugf("mmu_booke_enter_locked: replace\n"); 1791 //else 1792 // debugf("mmu_booke_enter_locked: new\n"); 1793 1794 /* Now set up the flags and install the new mapping. */ 1795 flags = (PTE_SR | PTE_VALID); 1796 flags |= PTE_M; 1797 1798 if (!su) 1799 flags |= PTE_UR; 1800 1801 if (prot & VM_PROT_WRITE) { 1802 flags |= PTE_SW; 1803 if (!su) 1804 flags |= PTE_UW; 1805 1806 if ((m->oflags & VPO_UNMANAGED) == 0) 1807 vm_page_aflag_set(m, PGA_WRITEABLE); 1808 } 1809 1810 if (prot & VM_PROT_EXECUTE) { 1811 flags |= PTE_SX; 1812 if (!su) 1813 flags |= PTE_UX; 1814 } 1815 1816 /* If its wired update stats. */ 1817 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) 1818 flags |= PTE_WIRED; 1819 1820 error = pte_enter(mmu, pmap, m, va, flags, 1821 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0); 1822 if (error != 0) 1823 return (KERN_RESOURCE_SHORTAGE); 1824 1825 if ((flags & PMAP_ENTER_WIRED) != 0) 1826 pmap->pm_stats.wired_count++; 1827 1828 /* Flush the real memory from the instruction cache. */ 1829 if (prot & VM_PROT_EXECUTE) 1830 sync++; 1831 } 1832 1833 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1834 __syncicache((void *)va, PAGE_SIZE); 1835 sync = 0; 1836 } 1837 1838 return (KERN_SUCCESS); 1839} 1840 1841/* 1842 * Maps a sequence of resident pages belonging to the same object. 1843 * The sequence begins with the given page m_start. This page is 1844 * mapped at the given virtual address start. Each subsequent page is 1845 * mapped at a virtual address that is offset from start by the same 1846 * amount as the page is offset from m_start within the object. The 1847 * last page in the sequence is the page with the largest offset from 1848 * m_start that can be mapped at a virtual address less than the given 1849 * virtual address end. Not every virtual page between start and end 1850 * is mapped; only those for which a resident page exists with the 1851 * corresponding offset from m_start are mapped. 1852 */ 1853static void 1854mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1855 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1856{ 1857 vm_page_t m; 1858 vm_pindex_t diff, psize; 1859 1860 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1861 1862 psize = atop(end - start); 1863 m = m_start; 1864 rw_wlock(&pvh_global_lock); 1865 PMAP_LOCK(pmap); 1866 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1867 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1868 prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1869 PMAP_ENTER_NOSLEEP, 0); 1870 m = TAILQ_NEXT(m, listq); 1871 } 1872 rw_wunlock(&pvh_global_lock); 1873 PMAP_UNLOCK(pmap); 1874} 1875 1876static void 1877mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1878 vm_prot_t prot) 1879{ 1880 1881 rw_wlock(&pvh_global_lock); 1882 PMAP_LOCK(pmap); 1883 mmu_booke_enter_locked(mmu, pmap, va, m, 1884 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 1885 0); 1886 rw_wunlock(&pvh_global_lock); 1887 PMAP_UNLOCK(pmap); 1888} 1889 1890/* 1891 * Remove the given range of addresses from the specified map. 1892 * 1893 * It is assumed that the start and end are properly rounded to the page size. 1894 */ 1895static void 1896mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1897{ 1898 pte_t *pte; 1899 uint8_t hold_flag; 1900 1901 int su = (pmap == kernel_pmap); 1902 1903 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1904 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1905 1906 if (su) { 1907 KASSERT(((va >= virtual_avail) && 1908 (va <= VM_MAX_KERNEL_ADDRESS)), 1909 ("mmu_booke_remove: kernel pmap, non kernel va")); 1910 } else { 1911 KASSERT((va <= VM_MAXUSER_ADDRESS), 1912 ("mmu_booke_remove: user pmap, non user va")); 1913 } 1914 1915 if (PMAP_REMOVE_DONE(pmap)) { 1916 //debugf("mmu_booke_remove: e (empty)\n"); 1917 return; 1918 } 1919 1920 hold_flag = PTBL_HOLD_FLAG(pmap); 1921 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1922 1923 rw_wlock(&pvh_global_lock); 1924 PMAP_LOCK(pmap); 1925 for (; va < endva; va += PAGE_SIZE) { 1926 pte = pte_find(mmu, pmap, va); 1927 if ((pte != NULL) && PTE_ISVALID(pte)) 1928 pte_remove(mmu, pmap, va, hold_flag); 1929 } 1930 PMAP_UNLOCK(pmap); 1931 rw_wunlock(&pvh_global_lock); 1932 1933 //debugf("mmu_booke_remove: e\n"); 1934} 1935 1936/* 1937 * Remove physical page from all pmaps in which it resides. 1938 */ 1939static void 1940mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1941{ 1942 pv_entry_t pv, pvn; 1943 uint8_t hold_flag; 1944 1945 rw_wlock(&pvh_global_lock); 1946 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1947 pvn = TAILQ_NEXT(pv, pv_link); 1948 1949 PMAP_LOCK(pv->pv_pmap); 1950 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1951 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1952 PMAP_UNLOCK(pv->pv_pmap); 1953 } 1954 vm_page_aflag_clear(m, PGA_WRITEABLE); 1955 rw_wunlock(&pvh_global_lock); 1956} 1957 1958/* 1959 * Map a range of physical addresses into kernel virtual address space. 1960 */ 1961static vm_offset_t 1962mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1963 vm_paddr_t pa_end, int prot) 1964{ 1965 vm_offset_t sva = *virt; 1966 vm_offset_t va = sva; 1967 1968 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1969 // sva, pa_start, pa_end); 1970 1971 while (pa_start < pa_end) { 1972 mmu_booke_kenter(mmu, va, pa_start); 1973 va += PAGE_SIZE; 1974 pa_start += PAGE_SIZE; 1975 } 1976 *virt = va; 1977 1978 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1979 return (sva); 1980} 1981 1982/* 1983 * The pmap must be activated before it's address space can be accessed in any 1984 * way. 1985 */ 1986static void 1987mmu_booke_activate(mmu_t mmu, struct thread *td) 1988{ 1989 pmap_t pmap; 1990 u_int cpuid; 1991 1992 pmap = &td->td_proc->p_vmspace->vm_pmap; 1993 1994 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1995 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1996 1997 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1998 1999 sched_pin(); 2000 2001 cpuid = PCPU_GET(cpuid); 2002 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 2003 PCPU_SET(curpmap, pmap); 2004 2005 if (pmap->pm_tid[cpuid] == TID_NONE) 2006 tid_alloc(pmap); 2007 2008 /* Load PID0 register with pmap tid value. */ 2009 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 2010 __asm __volatile("isync"); 2011 2012 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0); 2013 2014 sched_unpin(); 2015 2016 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 2017 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 2018} 2019 2020/* 2021 * Deactivate the specified process's address space. 2022 */ 2023static void 2024mmu_booke_deactivate(mmu_t mmu, struct thread *td) 2025{ 2026 pmap_t pmap; 2027 2028 pmap = &td->td_proc->p_vmspace->vm_pmap; 2029 2030 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 2031 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 2032 2033 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0); 2034 2035 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 2036 PCPU_SET(curpmap, NULL); 2037} 2038 2039/* 2040 * Copy the range specified by src_addr/len 2041 * from the source map to the range dst_addr/len 2042 * in the destination map. 2043 * 2044 * This routine is only advisory and need not do anything. 2045 */ 2046static void 2047mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 2048 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 2049{ 2050 2051} 2052 2053/* 2054 * Set the physical protection on the specified range of this map as requested. 2055 */ 2056static void 2057mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 2058 vm_prot_t prot) 2059{ 2060 vm_offset_t va; 2061 vm_page_t m; 2062 pte_t *pte; 2063 2064 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2065 mmu_booke_remove(mmu, pmap, sva, eva); 2066 return; 2067 } 2068 2069 if (prot & VM_PROT_WRITE) 2070 return; 2071 2072 PMAP_LOCK(pmap); 2073 for (va = sva; va < eva; va += PAGE_SIZE) { 2074 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2075 if (PTE_ISVALID(pte)) { 2076 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2077 2078 mtx_lock_spin(&tlbivax_mutex); 2079 tlb_miss_lock(); 2080 2081 /* Handle modified pages. */ 2082 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 2083 vm_page_dirty(m); 2084 2085 tlb0_flush_entry(va); 2086 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2087 2088 tlb_miss_unlock(); 2089 mtx_unlock_spin(&tlbivax_mutex); 2090 } 2091 } 2092 } 2093 PMAP_UNLOCK(pmap); 2094} 2095 2096/* 2097 * Clear the write and modified bits in each of the given page's mappings. 2098 */ 2099static void 2100mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 2101{ 2102 pv_entry_t pv; 2103 pte_t *pte; 2104 2105 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2106 ("mmu_booke_remove_write: page %p is not managed", m)); 2107 2108 /* 2109 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2110 * set by another thread while the object is locked. Thus, 2111 * if PGA_WRITEABLE is clear, no page table entries need updating. 2112 */ 2113 VM_OBJECT_ASSERT_WLOCKED(m->object); 2114 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2115 return; 2116 rw_wlock(&pvh_global_lock); 2117 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2118 PMAP_LOCK(pv->pv_pmap); 2119 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2120 if (PTE_ISVALID(pte)) { 2121 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2122 2123 mtx_lock_spin(&tlbivax_mutex); 2124 tlb_miss_lock(); 2125 2126 /* Handle modified pages. */ 2127 if (PTE_ISMODIFIED(pte)) 2128 vm_page_dirty(m); 2129 2130 /* Flush mapping from TLB0. */ 2131 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2132 2133 tlb_miss_unlock(); 2134 mtx_unlock_spin(&tlbivax_mutex); 2135 } 2136 } 2137 PMAP_UNLOCK(pv->pv_pmap); 2138 } 2139 vm_page_aflag_clear(m, PGA_WRITEABLE); 2140 rw_wunlock(&pvh_global_lock); 2141} 2142 2143static void 2144mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2145{ 2146 pte_t *pte; 2147 pmap_t pmap; 2148 vm_page_t m; 2149 vm_offset_t addr; 2150 vm_paddr_t pa = 0; 2151 int active, valid; 2152 2153 va = trunc_page(va); 2154 sz = round_page(sz); 2155 2156 rw_wlock(&pvh_global_lock); 2157 pmap = PCPU_GET(curpmap); 2158 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2159 while (sz > 0) { 2160 PMAP_LOCK(pm); 2161 pte = pte_find(mmu, pm, va); 2162 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2163 if (valid) 2164 pa = PTE_PA(pte); 2165 PMAP_UNLOCK(pm); 2166 if (valid) { 2167 if (!active) { 2168 /* Create a mapping in the active pmap. */ 2169 addr = 0; 2170 m = PHYS_TO_VM_PAGE(pa); 2171 PMAP_LOCK(pmap); 2172 pte_enter(mmu, pmap, m, addr, 2173 PTE_SR | PTE_VALID | PTE_UR, FALSE); 2174 __syncicache((void *)addr, PAGE_SIZE); 2175 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2176 PMAP_UNLOCK(pmap); 2177 } else 2178 __syncicache((void *)va, PAGE_SIZE); 2179 } 2180 va += PAGE_SIZE; 2181 sz -= PAGE_SIZE; 2182 } 2183 rw_wunlock(&pvh_global_lock); 2184} 2185 2186/* 2187 * Atomically extract and hold the physical page with the given 2188 * pmap and virtual address pair if that mapping permits the given 2189 * protection. 2190 */ 2191static vm_page_t 2192mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2193 vm_prot_t prot) 2194{ 2195 pte_t *pte; 2196 vm_page_t m; 2197 uint32_t pte_wbit; 2198 vm_paddr_t pa; 2199 2200 m = NULL; 2201 pa = 0; 2202 PMAP_LOCK(pmap); 2203retry: 2204 pte = pte_find(mmu, pmap, va); 2205 if ((pte != NULL) && PTE_ISVALID(pte)) { 2206 if (pmap == kernel_pmap) 2207 pte_wbit = PTE_SW; 2208 else 2209 pte_wbit = PTE_UW; 2210 2211 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2212 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2213 goto retry; 2214 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2215 vm_page_hold(m); 2216 } 2217 } 2218 2219 PA_UNLOCK_COND(pa); 2220 PMAP_UNLOCK(pmap); 2221 return (m); 2222} 2223 2224/* 2225 * Initialize a vm_page's machine-dependent fields. 2226 */ 2227static void 2228mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2229{ 2230 2231 TAILQ_INIT(&m->md.pv_list); 2232} 2233 2234/* 2235 * mmu_booke_zero_page_area zeros the specified hardware page by 2236 * mapping it into virtual memory and using bzero to clear 2237 * its contents. 2238 * 2239 * off and size must reside within a single page. 2240 */ 2241static void 2242mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2243{ 2244 vm_offset_t va; 2245 2246 /* XXX KASSERT off and size are within a single page? */ 2247 2248 mtx_lock(&zero_page_mutex); 2249 va = zero_page_va; 2250 2251 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2252 bzero((caddr_t)va + off, size); 2253 mmu_booke_kremove(mmu, va); 2254 2255 mtx_unlock(&zero_page_mutex); 2256} 2257 2258/* 2259 * mmu_booke_zero_page zeros the specified hardware page. 2260 */ 2261static void 2262mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2263{ 2264 vm_offset_t off, va; 2265 2266 mtx_lock(&zero_page_mutex); 2267 va = zero_page_va; 2268 2269 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2270 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 2271 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 2272 mmu_booke_kremove(mmu, va); 2273 2274 mtx_unlock(&zero_page_mutex); 2275} 2276 2277/* 2278 * mmu_booke_copy_page copies the specified (machine independent) page by 2279 * mapping the page into virtual memory and using memcopy to copy the page, 2280 * one machine dependent page at a time. 2281 */ 2282static void 2283mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2284{ 2285 vm_offset_t sva, dva; 2286 2287 sva = copy_page_src_va; 2288 dva = copy_page_dst_va; 2289 2290 mtx_lock(©_page_mutex); 2291 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2292 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2293 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2294 mmu_booke_kremove(mmu, dva); 2295 mmu_booke_kremove(mmu, sva); 2296 mtx_unlock(©_page_mutex); 2297} 2298 2299static inline void 2300mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2301 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2302{ 2303 void *a_cp, *b_cp; 2304 vm_offset_t a_pg_offset, b_pg_offset; 2305 int cnt; 2306 2307 mtx_lock(©_page_mutex); 2308 while (xfersize > 0) { 2309 a_pg_offset = a_offset & PAGE_MASK; 2310 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2311 mmu_booke_kenter(mmu, copy_page_src_va, 2312 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2313 a_cp = (char *)copy_page_src_va + a_pg_offset; 2314 b_pg_offset = b_offset & PAGE_MASK; 2315 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2316 mmu_booke_kenter(mmu, copy_page_dst_va, 2317 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2318 b_cp = (char *)copy_page_dst_va + b_pg_offset; 2319 bcopy(a_cp, b_cp, cnt); 2320 mmu_booke_kremove(mmu, copy_page_dst_va); 2321 mmu_booke_kremove(mmu, copy_page_src_va); 2322 a_offset += cnt; 2323 b_offset += cnt; 2324 xfersize -= cnt; 2325 } 2326 mtx_unlock(©_page_mutex); 2327} 2328 2329/* 2330 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2331 * into virtual memory and using bzero to clear its contents. This is intended 2332 * to be called from the vm_pagezero process only and outside of Giant. No 2333 * lock is required. 2334 */ 2335static void 2336mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2337{ 2338 vm_offset_t va; 2339 2340 va = zero_page_idle_va; 2341 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2342 bzero((caddr_t)va, PAGE_SIZE); 2343 mmu_booke_kremove(mmu, va); 2344} 2345 2346static vm_offset_t 2347mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m) 2348{ 2349 vm_paddr_t paddr; 2350 vm_offset_t qaddr; 2351 uint32_t flags; 2352 pte_t *pte; 2353 2354 paddr = VM_PAGE_TO_PHYS(m); 2355 2356 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 2357 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT; 2358 flags |= PTE_PS_4KB; 2359 2360 critical_enter(); 2361 qaddr = PCPU_GET(qmap_addr); 2362 2363 pte = pte_find(mmu, kernel_pmap, qaddr); 2364 2365 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy")); 2366 2367 /* 2368 * XXX: tlbivax is broadcast to other cores, but qaddr should 2369 * not be present in other TLBs. Is there a better instruction 2370 * sequence to use? Or just forget it & use mmu_booke_kenter()... 2371 */ 2372 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK)); 2373 __asm __volatile("isync; msync"); 2374 2375 *pte = PTE_RPN_FROM_PA(paddr) | flags; 2376 2377 /* Flush the real memory from the instruction cache. */ 2378 if ((flags & (PTE_I | PTE_G)) == 0) 2379 __syncicache((void *)qaddr, PAGE_SIZE); 2380 2381 return (qaddr); 2382} 2383 2384static void 2385mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr) 2386{ 2387 pte_t *pte; 2388 2389 pte = pte_find(mmu, kernel_pmap, addr); 2390 2391 KASSERT(PCPU_GET(qmap_addr) == addr, 2392 ("mmu_booke_quick_remove_page: invalid address")); 2393 KASSERT(*pte != 0, 2394 ("mmu_booke_quick_remove_page: PTE not in use")); 2395 2396 *pte = 0; 2397 critical_exit(); 2398} 2399 2400/* 2401 * Return whether or not the specified physical page was modified 2402 * in any of physical maps. 2403 */ 2404static boolean_t 2405mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2406{ 2407 pte_t *pte; 2408 pv_entry_t pv; 2409 boolean_t rv; 2410 2411 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2412 ("mmu_booke_is_modified: page %p is not managed", m)); 2413 rv = FALSE; 2414 2415 /* 2416 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2417 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2418 * is clear, no PTEs can be modified. 2419 */ 2420 VM_OBJECT_ASSERT_WLOCKED(m->object); 2421 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2422 return (rv); 2423 rw_wlock(&pvh_global_lock); 2424 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2425 PMAP_LOCK(pv->pv_pmap); 2426 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2427 PTE_ISVALID(pte)) { 2428 if (PTE_ISMODIFIED(pte)) 2429 rv = TRUE; 2430 } 2431 PMAP_UNLOCK(pv->pv_pmap); 2432 if (rv) 2433 break; 2434 } 2435 rw_wunlock(&pvh_global_lock); 2436 return (rv); 2437} 2438 2439/* 2440 * Return whether or not the specified virtual address is eligible 2441 * for prefault. 2442 */ 2443static boolean_t 2444mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2445{ 2446 2447 return (FALSE); 2448} 2449 2450/* 2451 * Return whether or not the specified physical page was referenced 2452 * in any physical maps. 2453 */ 2454static boolean_t 2455mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2456{ 2457 pte_t *pte; 2458 pv_entry_t pv; 2459 boolean_t rv; 2460 2461 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2462 ("mmu_booke_is_referenced: page %p is not managed", m)); 2463 rv = FALSE; 2464 rw_wlock(&pvh_global_lock); 2465 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2466 PMAP_LOCK(pv->pv_pmap); 2467 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2468 PTE_ISVALID(pte)) { 2469 if (PTE_ISREFERENCED(pte)) 2470 rv = TRUE; 2471 } 2472 PMAP_UNLOCK(pv->pv_pmap); 2473 if (rv) 2474 break; 2475 } 2476 rw_wunlock(&pvh_global_lock); 2477 return (rv); 2478} 2479 2480/* 2481 * Clear the modify bits on the specified physical page. 2482 */ 2483static void 2484mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2485{ 2486 pte_t *pte; 2487 pv_entry_t pv; 2488 2489 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2490 ("mmu_booke_clear_modify: page %p is not managed", m)); 2491 VM_OBJECT_ASSERT_WLOCKED(m->object); 2492 KASSERT(!vm_page_xbusied(m), 2493 ("mmu_booke_clear_modify: page %p is exclusive busied", m)); 2494 2495 /* 2496 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2497 * If the object containing the page is locked and the page is not 2498 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. 2499 */ 2500 if ((m->aflags & PGA_WRITEABLE) == 0) 2501 return; 2502 rw_wlock(&pvh_global_lock); 2503 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2504 PMAP_LOCK(pv->pv_pmap); 2505 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2506 PTE_ISVALID(pte)) { 2507 mtx_lock_spin(&tlbivax_mutex); 2508 tlb_miss_lock(); 2509 2510 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2511 tlb0_flush_entry(pv->pv_va); 2512 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2513 PTE_REFERENCED); 2514 } 2515 2516 tlb_miss_unlock(); 2517 mtx_unlock_spin(&tlbivax_mutex); 2518 } 2519 PMAP_UNLOCK(pv->pv_pmap); 2520 } 2521 rw_wunlock(&pvh_global_lock); 2522} 2523 2524/* 2525 * Return a count of reference bits for a page, clearing those bits. 2526 * It is not necessary for every reference bit to be cleared, but it 2527 * is necessary that 0 only be returned when there are truly no 2528 * reference bits set. 2529 * 2530 * As an optimization, update the page's dirty field if a modified bit is 2531 * found while counting reference bits. This opportunistic update can be 2532 * performed at low cost and can eliminate the need for some future calls 2533 * to pmap_is_modified(). However, since this function stops after 2534 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 2535 * dirty pages. Those dirty pages will only be detected by a future call 2536 * to pmap_is_modified(). 2537 */ 2538static int 2539mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2540{ 2541 pte_t *pte; 2542 pv_entry_t pv; 2543 int count; 2544 2545 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2546 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2547 count = 0; 2548 rw_wlock(&pvh_global_lock); 2549 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2550 PMAP_LOCK(pv->pv_pmap); 2551 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2552 PTE_ISVALID(pte)) { 2553 if (PTE_ISMODIFIED(pte)) 2554 vm_page_dirty(m); 2555 if (PTE_ISREFERENCED(pte)) { 2556 mtx_lock_spin(&tlbivax_mutex); 2557 tlb_miss_lock(); 2558 2559 tlb0_flush_entry(pv->pv_va); 2560 *pte &= ~PTE_REFERENCED; 2561 2562 tlb_miss_unlock(); 2563 mtx_unlock_spin(&tlbivax_mutex); 2564 2565 if (++count >= PMAP_TS_REFERENCED_MAX) { 2566 PMAP_UNLOCK(pv->pv_pmap); 2567 break; 2568 } 2569 } 2570 } 2571 PMAP_UNLOCK(pv->pv_pmap); 2572 } 2573 rw_wunlock(&pvh_global_lock); 2574 return (count); 2575} 2576 2577/* 2578 * Clear the wired attribute from the mappings for the specified range of 2579 * addresses in the given pmap. Every valid mapping within that range must 2580 * have the wired attribute set. In contrast, invalid mappings cannot have 2581 * the wired attribute set, so they are ignored. 2582 * 2583 * The wired attribute of the page table entry is not a hardware feature, so 2584 * there is no need to invalidate any TLB entries. 2585 */ 2586static void 2587mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2588{ 2589 vm_offset_t va; 2590 pte_t *pte; 2591 2592 PMAP_LOCK(pmap); 2593 for (va = sva; va < eva; va += PAGE_SIZE) { 2594 if ((pte = pte_find(mmu, pmap, va)) != NULL && 2595 PTE_ISVALID(pte)) { 2596 if (!PTE_ISWIRED(pte)) 2597 panic("mmu_booke_unwire: pte %p isn't wired", 2598 pte); 2599 *pte &= ~PTE_WIRED; 2600 pmap->pm_stats.wired_count--; 2601 } 2602 } 2603 PMAP_UNLOCK(pmap); 2604 2605} 2606 2607/* 2608 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2609 * page. This count may be changed upwards or downwards in the future; it is 2610 * only necessary that true be returned for a small subset of pmaps for proper 2611 * page aging. 2612 */ 2613static boolean_t 2614mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2615{ 2616 pv_entry_t pv; 2617 int loops; 2618 boolean_t rv; 2619 2620 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2621 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2622 loops = 0; 2623 rv = FALSE; 2624 rw_wlock(&pvh_global_lock); 2625 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2626 if (pv->pv_pmap == pmap) { 2627 rv = TRUE; 2628 break; 2629 } 2630 if (++loops >= 16) 2631 break; 2632 } 2633 rw_wunlock(&pvh_global_lock); 2634 return (rv); 2635} 2636 2637/* 2638 * Return the number of managed mappings to the given physical page that are 2639 * wired. 2640 */ 2641static int 2642mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2643{ 2644 pv_entry_t pv; 2645 pte_t *pte; 2646 int count = 0; 2647 2648 if ((m->oflags & VPO_UNMANAGED) != 0) 2649 return (count); 2650 rw_wlock(&pvh_global_lock); 2651 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2652 PMAP_LOCK(pv->pv_pmap); 2653 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2654 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2655 count++; 2656 PMAP_UNLOCK(pv->pv_pmap); 2657 } 2658 rw_wunlock(&pvh_global_lock); 2659 return (count); 2660} 2661 2662static int 2663mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2664{ 2665 int i; 2666 vm_offset_t va; 2667 2668 /* 2669 * This currently does not work for entries that 2670 * overlap TLB1 entries. 2671 */ 2672 for (i = 0; i < TLB1_ENTRIES; i ++) { 2673 if (tlb1_iomapped(i, pa, size, &va) == 0) 2674 return (0); 2675 } 2676 2677 return (EFAULT); 2678} 2679 2680void 2681mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) 2682{ 2683 vm_paddr_t ppa; 2684 vm_offset_t ofs; 2685 vm_size_t gran; 2686 2687 /* Minidumps are based on virtual memory addresses. */ 2688 if (do_minidump) { 2689 *va = (void *)(vm_offset_t)pa; 2690 return; 2691 } 2692 2693 /* Raw physical memory dumps don't have a virtual address. */ 2694 /* We always map a 256MB page at 256M. */ 2695 gran = 256 * 1024 * 1024; 2696 ppa = rounddown2(pa, gran); 2697 ofs = pa - ppa; 2698 *va = (void *)gran; 2699 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO); 2700 2701 if (sz > (gran - ofs)) 2702 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran, 2703 _TLB_ENTRY_IO); 2704} 2705 2706void 2707mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va) 2708{ 2709 vm_paddr_t ppa; 2710 vm_offset_t ofs; 2711 vm_size_t gran; 2712 tlb_entry_t e; 2713 int i; 2714 2715 /* Minidumps are based on virtual memory addresses. */ 2716 /* Nothing to do... */ 2717 if (do_minidump) 2718 return; 2719 2720 for (i = 0; i < TLB1_ENTRIES; i++) { 2721 tlb1_read_entry(&e, i); 2722 if (!(e.mas1 & MAS1_VALID)) 2723 break; 2724 } 2725 2726 /* Raw physical memory dumps don't have a virtual address. */ 2727 i--; 2728 e.mas1 = 0; 2729 e.mas2 = 0; 2730 e.mas3 = 0; 2731 tlb1_write_entry(&e, i); 2732 2733 gran = 256 * 1024 * 1024; 2734 ppa = rounddown2(pa, gran); 2735 ofs = pa - ppa; 2736 if (sz > (gran - ofs)) { 2737 i--; 2738 e.mas1 = 0; 2739 e.mas2 = 0; 2740 e.mas3 = 0; 2741 tlb1_write_entry(&e, i); 2742 } 2743} 2744 2745extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; 2746 2747void 2748mmu_booke_scan_init(mmu_t mmu) 2749{ 2750 vm_offset_t va; 2751 pte_t *pte; 2752 int i; 2753 2754 if (!do_minidump) { 2755 /* Initialize phys. segments for dumpsys(). */ 2756 memset(&dump_map, 0, sizeof(dump_map)); 2757 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions, 2758 &availmem_regions_sz); 2759 for (i = 0; i < physmem_regions_sz; i++) { 2760 dump_map[i].pa_start = physmem_regions[i].mr_start; 2761 dump_map[i].pa_size = physmem_regions[i].mr_size; 2762 } 2763 return; 2764 } 2765 2766 /* Virtual segments for minidumps: */ 2767 memset(&dump_map, 0, sizeof(dump_map)); 2768 2769 /* 1st: kernel .data and .bss. */ 2770 dump_map[0].pa_start = trunc_page((uintptr_t)_etext); 2771 dump_map[0].pa_size = 2772 round_page((uintptr_t)_end) - dump_map[0].pa_start; 2773 2774 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2775 dump_map[1].pa_start = data_start; 2776 dump_map[1].pa_size = data_end - data_start; 2777 2778 /* 3rd: kernel VM. */ 2779 va = dump_map[1].pa_start + dump_map[1].pa_size; 2780 /* Find start of next chunk (from va). */ 2781 while (va < virtual_end) { 2782 /* Don't dump the buffer cache. */ 2783 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { 2784 va = kmi.buffer_eva; 2785 continue; 2786 } 2787 pte = pte_find(mmu, kernel_pmap, va); 2788 if (pte != NULL && PTE_ISVALID(pte)) 2789 break; 2790 va += PAGE_SIZE; 2791 } 2792 if (va < virtual_end) { 2793 dump_map[2].pa_start = va; 2794 va += PAGE_SIZE; 2795 /* Find last page in chunk. */ 2796 while (va < virtual_end) { 2797 /* Don't run into the buffer cache. */ 2798 if (va == kmi.buffer_sva) 2799 break; 2800 pte = pte_find(mmu, kernel_pmap, va); 2801 if (pte == NULL || !PTE_ISVALID(pte)) 2802 break; 2803 va += PAGE_SIZE; 2804 } 2805 dump_map[2].pa_size = va - dump_map[2].pa_start; 2806 } 2807} 2808 2809/* 2810 * Map a set of physical memory pages into the kernel virtual address space. 2811 * Return a pointer to where it is mapped. This routine is intended to be used 2812 * for mapping device memory, NOT real memory. 2813 */ 2814static void * 2815mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2816{ 2817 2818 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2819} 2820 2821static void * 2822mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2823{ 2824 tlb_entry_t e; 2825 void *res; 2826 uintptr_t va, tmpva; 2827 vm_size_t sz; 2828 int i; 2829 2830 /* 2831 * Check if this is premapped in TLB1. Note: this should probably also 2832 * check whether a sequence of TLB1 entries exist that match the 2833 * requirement, but now only checks the easy case. 2834 */ 2835 if (ma == VM_MEMATTR_DEFAULT) { 2836 for (i = 0; i < TLB1_ENTRIES; i++) { 2837 tlb1_read_entry(&e, i); 2838 if (!(e.mas1 & MAS1_VALID)) 2839 continue; 2840 if (pa >= e.phys && 2841 (pa + size) <= (e.phys + e.size)) 2842 return (void *)(e.virt + 2843 (vm_offset_t)(pa - e.phys)); 2844 } 2845 } 2846 2847 size = roundup(size, PAGE_SIZE); 2848 2849 /* 2850 * The device mapping area is between VM_MAXUSER_ADDRESS and 2851 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing. 2852 */ 2853#ifdef SPARSE_MAPDEV 2854 /* 2855 * With a sparse mapdev, align to the largest starting region. This 2856 * could feasibly be optimized for a 'best-fit' alignment, but that 2857 * calculation could be very costly. 2858 */ 2859 do { 2860 tmpva = tlb1_map_base; 2861 va = roundup(tlb1_map_base, 1 << flsl(size)); 2862 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size)); 2863#else 2864 va = atomic_fetchadd_int(&tlb1_map_base, size); 2865#endif 2866 res = (void *)va; 2867 2868 do { 2869 sz = 1 << (ilog2(size) & ~1); 2870 if (va % sz != 0) { 2871 do { 2872 sz >>= 2; 2873 } while (va % sz != 0); 2874 } 2875 if (bootverbose) 2876 printf("Wiring VA=%x to PA=%jx (size=%x)\n", 2877 va, (uintmax_t)pa, sz); 2878 tlb1_set_entry(va, pa, sz, 2879 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)); 2880 size -= sz; 2881 pa += sz; 2882 va += sz; 2883 } while (size > 0); 2884 2885 return (res); 2886} 2887 2888/* 2889 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2890 */ 2891static void 2892mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2893{ 2894#ifdef SUPPORTS_SHRINKING_TLB1 2895 vm_offset_t base, offset; 2896 2897 /* 2898 * Unmap only if this is inside kernel virtual space. 2899 */ 2900 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2901 base = trunc_page(va); 2902 offset = va & PAGE_MASK; 2903 size = roundup(offset + size, PAGE_SIZE); 2904 kva_free(base, size); 2905 } 2906#endif 2907} 2908 2909/* 2910 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2911 * specified pmap. This eliminates the blast of soft faults on process startup 2912 * and immediately after an mmap. 2913 */ 2914static void 2915mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2916 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2917{ 2918 2919 VM_OBJECT_ASSERT_WLOCKED(object); 2920 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2921 ("mmu_booke_object_init_pt: non-device object")); 2922} 2923 2924/* 2925 * Perform the pmap work for mincore. 2926 */ 2927static int 2928mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2929 vm_paddr_t *locked_pa) 2930{ 2931 2932 /* XXX: this should be implemented at some point */ 2933 return (0); 2934} 2935 2936static int 2937mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz, 2938 vm_memattr_t mode) 2939{ 2940 vm_offset_t va; 2941 pte_t *pte; 2942 int i, j; 2943 tlb_entry_t e; 2944 2945 /* Check TLB1 mappings */ 2946 for (i = 0; i < TLB1_ENTRIES; i++) { 2947 tlb1_read_entry(&e, i); 2948 if (!(e.mas1 & MAS1_VALID)) 2949 continue; 2950 if (addr >= e.virt && addr < e.virt + e.size) 2951 break; 2952 } 2953 if (i < TLB1_ENTRIES) { 2954 /* Only allow full mappings to be modified for now. */ 2955 /* Validate the range. */ 2956 for (j = i, va = addr; va < addr + sz; va += e.size, j++) { 2957 tlb1_read_entry(&e, j); 2958 if (va != e.virt || (sz - (va - addr) < e.size)) 2959 return (EINVAL); 2960 } 2961 for (va = addr; va < addr + sz; va += e.size, i++) { 2962 tlb1_read_entry(&e, i); 2963 e.mas2 &= ~MAS2_WIMGE_MASK; 2964 e.mas2 |= tlb_calc_wimg(e.phys, mode); 2965 2966 /* 2967 * Write it out to the TLB. Should really re-sync with other 2968 * cores. 2969 */ 2970 tlb1_write_entry(&e, i); 2971 } 2972 return (0); 2973 } 2974 2975 /* Not in TLB1, try through pmap */ 2976 /* First validate the range. */ 2977 for (va = addr; va < addr + sz; va += PAGE_SIZE) { 2978 pte = pte_find(mmu, kernel_pmap, va); 2979 if (pte == NULL || !PTE_ISVALID(pte)) 2980 return (EINVAL); 2981 } 2982 2983 mtx_lock_spin(&tlbivax_mutex); 2984 tlb_miss_lock(); 2985 for (va = addr; va < addr + sz; va += PAGE_SIZE) { 2986 pte = pte_find(mmu, kernel_pmap, va); 2987 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT); 2988 *pte |= tlb_calc_wimg(PTE_PA(pte), mode << PTE_MAS2_SHIFT); 2989 tlb0_flush_entry(va); 2990 } 2991 tlb_miss_unlock(); 2992 mtx_unlock_spin(&tlbivax_mutex); 2993 2994 return (pte_vatopa(mmu, kernel_pmap, va)); 2995} 2996 2997/**************************************************************************/ 2998/* TID handling */ 2999/**************************************************************************/ 3000 3001/* 3002 * Allocate a TID. If necessary, steal one from someone else. 3003 * The new TID is flushed from the TLB before returning. 3004 */ 3005static tlbtid_t 3006tid_alloc(pmap_t pmap) 3007{ 3008 tlbtid_t tid; 3009 int thiscpu; 3010 3011 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 3012 3013 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 3014 3015 thiscpu = PCPU_GET(cpuid); 3016 3017 tid = PCPU_GET(tid_next); 3018 if (tid > TID_MAX) 3019 tid = TID_MIN; 3020 PCPU_SET(tid_next, tid + 1); 3021 3022 /* If we are stealing TID then clear the relevant pmap's field */ 3023 if (tidbusy[thiscpu][tid] != NULL) { 3024 3025 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 3026 3027 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 3028 3029 /* Flush all entries from TLB0 matching this TID. */ 3030 tid_flush(tid); 3031 } 3032 3033 tidbusy[thiscpu][tid] = pmap; 3034 pmap->pm_tid[thiscpu] = tid; 3035 __asm __volatile("msync; isync"); 3036 3037 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 3038 PCPU_GET(tid_next)); 3039 3040 return (tid); 3041} 3042 3043/**************************************************************************/ 3044/* TLB0 handling */ 3045/**************************************************************************/ 3046 3047static void 3048tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 3049 uint32_t mas7) 3050{ 3051 int as; 3052 char desc[3]; 3053 tlbtid_t tid; 3054 vm_size_t size; 3055 unsigned int tsize; 3056 3057 desc[2] = '\0'; 3058 if (mas1 & MAS1_VALID) 3059 desc[0] = 'V'; 3060 else 3061 desc[0] = ' '; 3062 3063 if (mas1 & MAS1_IPROT) 3064 desc[1] = 'P'; 3065 else 3066 desc[1] = ' '; 3067 3068 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 3069 tid = MAS1_GETTID(mas1); 3070 3071 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3072 size = 0; 3073 if (tsize) 3074 size = tsize2size(tsize); 3075 3076 debugf("%3d: (%s) [AS=%d] " 3077 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 3078 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 3079 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 3080} 3081 3082/* Convert TLB0 va and way number to tlb0[] table index. */ 3083static inline unsigned int 3084tlb0_tableidx(vm_offset_t va, unsigned int way) 3085{ 3086 unsigned int idx; 3087 3088 idx = (way * TLB0_ENTRIES_PER_WAY); 3089 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 3090 return (idx); 3091} 3092 3093/* 3094 * Invalidate TLB0 entry. 3095 */ 3096static inline void 3097tlb0_flush_entry(vm_offset_t va) 3098{ 3099 3100 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 3101 3102 mtx_assert(&tlbivax_mutex, MA_OWNED); 3103 3104 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 3105 __asm __volatile("isync; msync"); 3106 __asm __volatile("tlbsync; msync"); 3107 3108 CTR1(KTR_PMAP, "%s: e", __func__); 3109} 3110 3111/* Print out contents of the MAS registers for each TLB0 entry */ 3112void 3113tlb0_print_tlbentries(void) 3114{ 3115 uint32_t mas0, mas1, mas2, mas3, mas7; 3116 int entryidx, way, idx; 3117 3118 debugf("TLB0 entries:\n"); 3119 for (way = 0; way < TLB0_WAYS; way ++) 3120 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 3121 3122 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 3123 mtspr(SPR_MAS0, mas0); 3124 __asm __volatile("isync"); 3125 3126 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 3127 mtspr(SPR_MAS2, mas2); 3128 3129 __asm __volatile("isync; tlbre"); 3130 3131 mas1 = mfspr(SPR_MAS1); 3132 mas2 = mfspr(SPR_MAS2); 3133 mas3 = mfspr(SPR_MAS3); 3134 mas7 = mfspr(SPR_MAS7); 3135 3136 idx = tlb0_tableidx(mas2, way); 3137 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 3138 } 3139} 3140 3141/**************************************************************************/ 3142/* TLB1 handling */ 3143/**************************************************************************/ 3144 3145/* 3146 * TLB1 mapping notes: 3147 * 3148 * TLB1[0] Kernel text and data. 3149 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI 3150 * windows, other devices mappings. 3151 */ 3152 3153 /* 3154 * Read an entry from given TLB1 slot. 3155 */ 3156void 3157tlb1_read_entry(tlb_entry_t *entry, unsigned int slot) 3158{ 3159 register_t msr; 3160 uint32_t mas0; 3161 3162 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__)); 3163 3164 msr = mfmsr(); 3165 __asm __volatile("wrteei 0"); 3166 3167 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot); 3168 mtspr(SPR_MAS0, mas0); 3169 __asm __volatile("isync; tlbre"); 3170 3171 entry->mas1 = mfspr(SPR_MAS1); 3172 entry->mas2 = mfspr(SPR_MAS2); 3173 entry->mas3 = mfspr(SPR_MAS3); 3174 3175 switch ((mfpvr() >> 16) & 0xFFFF) { 3176 case FSL_E500v2: 3177 case FSL_E500mc: 3178 case FSL_E5500: 3179 entry->mas7 = mfspr(SPR_MAS7); 3180 break; 3181 default: 3182 entry->mas7 = 0; 3183 break; 3184 } 3185 mtmsr(msr); 3186 3187 entry->virt = entry->mas2 & MAS2_EPN_MASK; 3188 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) | 3189 (entry->mas3 & MAS3_RPN); 3190 entry->size = 3191 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT); 3192} 3193 3194/* 3195 * Write given entry to TLB1 hardware. 3196 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 3197 */ 3198static void 3199tlb1_write_entry(tlb_entry_t *e, unsigned int idx) 3200{ 3201 register_t msr; 3202 uint32_t mas0; 3203 3204 //debugf("tlb1_write_entry: s\n"); 3205 3206 /* Select entry */ 3207 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 3208 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 3209 3210 msr = mfmsr(); 3211 __asm __volatile("wrteei 0"); 3212 3213 mtspr(SPR_MAS0, mas0); 3214 __asm __volatile("isync"); 3215 mtspr(SPR_MAS1, e->mas1); 3216 __asm __volatile("isync"); 3217 mtspr(SPR_MAS2, e->mas2); 3218 __asm __volatile("isync"); 3219 mtspr(SPR_MAS3, e->mas3); 3220 __asm __volatile("isync"); 3221 switch ((mfpvr() >> 16) & 0xFFFF) { 3222 case FSL_E500mc: 3223 case FSL_E5500: 3224 mtspr(SPR_MAS8, 0); 3225 __asm __volatile("isync"); 3226 /* FALLTHROUGH */ 3227 case FSL_E500v2: 3228 mtspr(SPR_MAS7, e->mas7); 3229 __asm __volatile("isync"); 3230 break; 3231 default: 3232 break; 3233 } 3234 3235 __asm __volatile("tlbwe; isync; msync"); 3236 mtmsr(msr); 3237 3238 //debugf("tlb1_write_entry: e\n"); 3239} 3240 3241/* 3242 * Return the largest uint value log such that 2^log <= num. 3243 */ 3244static unsigned int 3245ilog2(unsigned int num) 3246{ 3247 int lz; 3248 3249 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 3250 return (31 - lz); 3251} 3252 3253/* 3254 * Convert TLB TSIZE value to mapped region size. 3255 */ 3256static vm_size_t 3257tsize2size(unsigned int tsize) 3258{ 3259 3260 /* 3261 * size = 4^tsize KB 3262 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 3263 */ 3264 3265 return ((1 << (2 * tsize)) * 1024); 3266} 3267 3268/* 3269 * Convert region size (must be power of 4) to TLB TSIZE value. 3270 */ 3271static unsigned int 3272size2tsize(vm_size_t size) 3273{ 3274 3275 return (ilog2(size) / 2 - 5); 3276} 3277 3278/* 3279 * Register permanent kernel mapping in TLB1. 3280 * 3281 * Entries are created starting from index 0 (current free entry is 3282 * kept in tlb1_idx) and are not supposed to be invalidated. 3283 */ 3284int 3285tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size, 3286 uint32_t flags) 3287{ 3288 tlb_entry_t e; 3289 uint32_t ts, tid; 3290 int tsize, index; 3291 3292 for (index = 0; index < TLB1_ENTRIES; index++) { 3293 tlb1_read_entry(&e, index); 3294 if ((e.mas1 & MAS1_VALID) == 0) 3295 break; 3296 /* Check if we're just updating the flags, and update them. */ 3297 if (e.phys == pa && e.virt == va && e.size == size) { 3298 e.mas2 = (va & MAS2_EPN_MASK) | flags; 3299 tlb1_write_entry(&e, index); 3300 return (0); 3301 } 3302 } 3303 if (index >= TLB1_ENTRIES) { 3304 printf("tlb1_set_entry: TLB1 full!\n"); 3305 return (-1); 3306 } 3307 3308 /* Convert size to TSIZE */ 3309 tsize = size2tsize(size); 3310 3311 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 3312 /* XXX TS is hard coded to 0 for now as we only use single address space */ 3313 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 3314 3315 e.phys = pa; 3316 e.virt = va; 3317 e.size = size; 3318 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 3319 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 3320 e.mas2 = (va & MAS2_EPN_MASK) | flags; 3321 3322 /* Set supervisor RWX permission bits */ 3323 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 3324 e.mas7 = (pa >> 32) & MAS7_RPN; 3325 3326 tlb1_write_entry(&e, index); 3327 3328 /* 3329 * XXX in general TLB1 updates should be propagated between CPUs, 3330 * since current design assumes to have the same TLB1 set-up on all 3331 * cores. 3332 */ 3333 return (0); 3334} 3335 3336/* 3337 * Map in contiguous RAM region into the TLB1 using maximum of 3338 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 3339 * 3340 * If necessary round up last entry size and return total size 3341 * used by all allocated entries. 3342 */ 3343vm_size_t 3344tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 3345{ 3346 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 3347 vm_size_t mapped, pgsz, base, mask; 3348 int idx, nents; 3349 3350 /* Round up to the next 1M */ 3351 size = roundup2(size, 1 << 20); 3352 3353 mapped = 0; 3354 idx = 0; 3355 base = va; 3356 pgsz = 64*1024*1024; 3357 while (mapped < size) { 3358 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 3359 while (pgsz > (size - mapped)) 3360 pgsz >>= 2; 3361 pgs[idx++] = pgsz; 3362 mapped += pgsz; 3363 } 3364 3365 /* We under-map. Correct for this. */ 3366 if (mapped < size) { 3367 while (pgs[idx - 1] == pgsz) { 3368 idx--; 3369 mapped -= pgsz; 3370 } 3371 /* XXX We may increase beyond out starting point. */ 3372 pgsz <<= 2; 3373 pgs[idx++] = pgsz; 3374 mapped += pgsz; 3375 } 3376 } 3377 3378 nents = idx; 3379 mask = pgs[0] - 1; 3380 /* Align address to the boundary */ 3381 if (va & mask) { 3382 va = (va + mask) & ~mask; 3383 pa = (pa + mask) & ~mask; 3384 } 3385 3386 for (idx = 0; idx < nents; idx++) { 3387 pgsz = pgs[idx]; 3388 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz); 3389 tlb1_set_entry(va, pa, pgsz, 3390 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM); 3391 pa += pgsz; 3392 va += pgsz; 3393 } 3394 3395 mapped = (va - base); 3396#ifdef __powerpc64__ 3397 printf("mapped size 0x%016lx (wasted space 0x%16lx)\n", 3398#else 3399 printf("mapped size 0x%08x (wasted space 0x%08x)\n", 3400#endif 3401 mapped, mapped - size); 3402 return (mapped); 3403} 3404 3405/* 3406 * TLB1 initialization routine, to be called after the very first 3407 * assembler level setup done in locore.S. 3408 */ 3409void 3410tlb1_init() 3411{ 3412 uint32_t mas0, mas1, mas2, mas3, mas7; 3413 uint32_t tsz; 3414 3415 tlb1_get_tlbconf(); 3416 3417 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0); 3418 mtspr(SPR_MAS0, mas0); 3419 __asm __volatile("isync; tlbre"); 3420 3421 mas1 = mfspr(SPR_MAS1); 3422 mas2 = mfspr(SPR_MAS2); 3423 mas3 = mfspr(SPR_MAS3); 3424 mas7 = mfspr(SPR_MAS7); 3425 3426 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) | 3427 (mas3 & MAS3_RPN); 3428 3429 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3430 kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 3431 3432 /* Setup TLB miss defaults */ 3433 set_mas4_defaults(); 3434} 3435 3436vm_offset_t 3437pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 3438{ 3439 vm_paddr_t pa_base; 3440 vm_offset_t va, sz; 3441 int i; 3442 tlb_entry_t e; 3443 3444 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!")); 3445 3446 for (i = 0; i < TLB1_ENTRIES; i++) { 3447 tlb1_read_entry(&e, i); 3448 if (!(e.mas1 & MAS1_VALID)) 3449 continue; 3450 if (pa >= e.phys && (pa + size) <= 3451 (e.phys + e.size)) 3452 return (e.virt + (pa - e.phys)); 3453 } 3454 3455 pa_base = rounddown(pa, PAGE_SIZE); 3456 size = roundup(size + (pa - pa_base), PAGE_SIZE); 3457 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1)); 3458 va = tlb1_map_base + (pa - pa_base); 3459 3460 do { 3461 sz = 1 << (ilog2(size) & ~1); 3462 tlb1_set_entry(tlb1_map_base, pa_base, sz, 3463 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO); 3464 size -= sz; 3465 pa_base += sz; 3466 tlb1_map_base += sz; 3467 } while (size > 0); 3468 3469 return (va); 3470} 3471 3472/* 3473 * Setup MAS4 defaults. 3474 * These values are loaded to MAS0-2 on a TLB miss. 3475 */ 3476static void 3477set_mas4_defaults(void) 3478{ 3479 uint32_t mas4; 3480 3481 /* Defaults: TLB0, PID0, TSIZED=4K */ 3482 mas4 = MAS4_TLBSELD0; 3483 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3484#ifdef SMP 3485 mas4 |= MAS4_MD; 3486#endif 3487 mtspr(SPR_MAS4, mas4); 3488 __asm __volatile("isync"); 3489} 3490 3491/* 3492 * Print out contents of the MAS registers for each TLB1 entry 3493 */ 3494void 3495tlb1_print_tlbentries(void) 3496{ 3497 uint32_t mas0, mas1, mas2, mas3, mas7; 3498 int i; 3499 3500 debugf("TLB1 entries:\n"); 3501 for (i = 0; i < TLB1_ENTRIES; i++) { 3502 3503 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3504 mtspr(SPR_MAS0, mas0); 3505 3506 __asm __volatile("isync; tlbre"); 3507 3508 mas1 = mfspr(SPR_MAS1); 3509 mas2 = mfspr(SPR_MAS2); 3510 mas3 = mfspr(SPR_MAS3); 3511 mas7 = mfspr(SPR_MAS7); 3512 3513 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3514 } 3515} 3516 3517/* 3518 * Return 0 if the physical IO range is encompassed by one of the 3519 * the TLB1 entries, otherwise return related error code. 3520 */ 3521static int 3522tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3523{ 3524 uint32_t prot; 3525 vm_paddr_t pa_start; 3526 vm_paddr_t pa_end; 3527 unsigned int entry_tsize; 3528 vm_size_t entry_size; 3529 tlb_entry_t e; 3530 3531 *va = (vm_offset_t)NULL; 3532 3533 tlb1_read_entry(&e, i); 3534 /* Skip invalid entries */ 3535 if (!(e.mas1 & MAS1_VALID)) 3536 return (EINVAL); 3537 3538 /* 3539 * The entry must be cache-inhibited, guarded, and r/w 3540 * so it can function as an i/o page 3541 */ 3542 prot = e.mas2 & (MAS2_I | MAS2_G); 3543 if (prot != (MAS2_I | MAS2_G)) 3544 return (EPERM); 3545 3546 prot = e.mas3 & (MAS3_SR | MAS3_SW); 3547 if (prot != (MAS3_SR | MAS3_SW)) 3548 return (EPERM); 3549 3550 /* The address should be within the entry range. */ 3551 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3552 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3553 3554 entry_size = tsize2size(entry_tsize); 3555 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) | 3556 (e.mas3 & MAS3_RPN); 3557 pa_end = pa_start + entry_size; 3558 3559 if ((pa < pa_start) || ((pa + size) > pa_end)) 3560 return (ERANGE); 3561 3562 /* Return virtual address of this mapping. */ 3563 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3564 return (0); 3565} 3566 3567/* 3568 * Invalidate all TLB0 entries which match the given TID. Note this is 3569 * dedicated for cases when invalidations should NOT be propagated to other 3570 * CPUs. 3571 */ 3572static void 3573tid_flush(tlbtid_t tid) 3574{ 3575 register_t msr; 3576 uint32_t mas0, mas1, mas2; 3577 int entry, way; 3578 3579 3580 /* Don't evict kernel translations */ 3581 if (tid == TID_KERNEL) 3582 return; 3583 3584 msr = mfmsr(); 3585 __asm __volatile("wrteei 0"); 3586 3587 for (way = 0; way < TLB0_WAYS; way++) 3588 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) { 3589 3590 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 3591 mtspr(SPR_MAS0, mas0); 3592 __asm __volatile("isync"); 3593 3594 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT; 3595 mtspr(SPR_MAS2, mas2); 3596 3597 __asm __volatile("isync; tlbre"); 3598 3599 mas1 = mfspr(SPR_MAS1); 3600 3601 if (!(mas1 & MAS1_VALID)) 3602 continue; 3603 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid) 3604 continue; 3605 mas1 &= ~MAS1_VALID; 3606 mtspr(SPR_MAS1, mas1); 3607 __asm __volatile("isync; tlbwe; isync; msync"); 3608 } 3609 mtmsr(msr); 3610} 3611