1176771Sraj/*- 2192532Sraj * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4176771Sraj * All rights reserved. 5176771Sraj * 6176771Sraj * Redistribution and use in source and binary forms, with or without 7176771Sraj * modification, are permitted provided that the following conditions 8176771Sraj * are met: 9176771Sraj * 1. Redistributions of source code must retain the above copyright 10176771Sraj * notice, this list of conditions and the following disclaimer. 11176771Sraj * 2. Redistributions in binary form must reproduce the above copyright 12176771Sraj * notice, this list of conditions and the following disclaimer in the 13176771Sraj * documentation and/or other materials provided with the distribution. 14176771Sraj * 15176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25176771Sraj * 26176771Sraj * Some hw specific parts of this pmap were derived or influenced 27176771Sraj * by NetBSD's ibm4xx pmap module. More generic code is shared with 28176771Sraj * a few other pmap modules from the FreeBSD tree. 29176771Sraj */ 30176771Sraj 31176771Sraj /* 32176771Sraj * VM layout notes: 33176771Sraj * 34176771Sraj * Kernel and user threads run within one common virtual address space 35176771Sraj * defined by AS=0. 36176771Sraj * 37176771Sraj * Virtual address space layout: 38176771Sraj * ----------------------------- 39187151Sraj * 0x0000_0000 - 0xafff_ffff : user process 40187151Sraj * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41187151Sraj * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42190701Smarcel * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43187151Sraj * 0xc100_0000 - 0xfeef_ffff : KVA 44187151Sraj * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45187151Sraj * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46187151Sraj * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47187151Sraj * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48187151Sraj * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49176771Sraj */ 50176771Sraj 51176771Sraj#include <sys/cdefs.h> 52176771Sraj__FBSDID("$FreeBSD$"); 53176771Sraj 54176771Sraj#include <sys/param.h> 55176771Sraj#include <sys/malloc.h> 56187149Sraj#include <sys/ktr.h> 57176771Sraj#include <sys/proc.h> 58176771Sraj#include <sys/user.h> 59176771Sraj#include <sys/queue.h> 60176771Sraj#include <sys/systm.h> 61176771Sraj#include <sys/kernel.h> 62224611Smarcel#include <sys/linker.h> 63176771Sraj#include <sys/msgbuf.h> 64176771Sraj#include <sys/lock.h> 65176771Sraj#include <sys/mutex.h> 66242535Salc#include <sys/rwlock.h> 67222813Sattilio#include <sys/sched.h> 68192532Sraj#include <sys/smp.h> 69176771Sraj#include <sys/vmmeter.h> 70176771Sraj 71176771Sraj#include <vm/vm.h> 72176771Sraj#include <vm/vm_page.h> 73176771Sraj#include <vm/vm_kern.h> 74176771Sraj#include <vm/vm_pageout.h> 75176771Sraj#include <vm/vm_extern.h> 76176771Sraj#include <vm/vm_object.h> 77176771Sraj#include <vm/vm_param.h> 78176771Sraj#include <vm/vm_map.h> 79176771Sraj#include <vm/vm_pager.h> 80176771Sraj#include <vm/uma.h> 81176771Sraj 82176771Sraj#include <machine/cpu.h> 83176771Sraj#include <machine/pcb.h> 84192067Snwhitehorn#include <machine/platform.h> 85176771Sraj 86176771Sraj#include <machine/tlb.h> 87176771Sraj#include <machine/spr.h> 88176771Sraj#include <machine/md_var.h> 89176771Sraj#include <machine/mmuvar.h> 90176771Sraj#include <machine/pmap.h> 91176771Sraj#include <machine/pte.h> 92176771Sraj 93176771Sraj#include "mmu_if.h" 94176771Sraj 95176771Sraj#ifdef DEBUG 96176771Sraj#define debugf(fmt, args...) printf(fmt, ##args) 97176771Sraj#else 98176771Sraj#define debugf(fmt, args...) 99176771Sraj#endif 100176771Sraj 101176771Sraj#define TODO panic("%s: not implemented", __func__); 102176771Sraj 103190701Smarcelextern int dumpsys_minidump; 104190701Smarcel 105190701Smarcelextern unsigned char _etext[]; 106190701Smarcelextern unsigned char _end[]; 107190701Smarcel 108224611Smarcelextern uint32_t *bootinfo; 109224611Smarcel 110224611Smarcel#ifdef SMP 111242526Smarcelextern uint32_t bp_ntlb1s; 112224611Smarcel#endif 113224611Smarcel 114224611Smarcelvm_paddr_t kernload; 115190701Smarcelvm_offset_t kernstart; 116190701Smarcelvm_size_t kernsize; 117176771Sraj 118190701Smarcel/* Message buffer and tables. */ 119190701Smarcelstatic vm_offset_t data_start; 120190701Smarcelstatic vm_size_t data_end; 121190701Smarcel 122192067Snwhitehorn/* Phys/avail memory regions. */ 123192067Snwhitehornstatic struct mem_region *availmem_regions; 124192067Snwhitehornstatic int availmem_regions_sz; 125192067Snwhitehornstatic struct mem_region *physmem_regions; 126192067Snwhitehornstatic int physmem_regions_sz; 127176771Sraj 128176771Sraj/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 129176771Srajstatic vm_offset_t zero_page_va; 130176771Srajstatic struct mtx zero_page_mutex; 131176771Sraj 132187149Srajstatic struct mtx tlbivax_mutex; 133187149Sraj 134176771Sraj/* 135176771Sraj * Reserved KVA space for mmu_booke_zero_page_idle. This is used 136176771Sraj * by idle thred only, no lock required. 137176771Sraj */ 138176771Srajstatic vm_offset_t zero_page_idle_va; 139176771Sraj 140176771Sraj/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 141176771Srajstatic vm_offset_t copy_page_src_va; 142176771Srajstatic vm_offset_t copy_page_dst_va; 143176771Srajstatic struct mtx copy_page_mutex; 144176771Sraj 145176771Sraj/**************************************************************************/ 146176771Sraj/* PMAP */ 147176771Sraj/**************************************************************************/ 148176771Sraj 149270439Skibstatic int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 150270439Skib vm_prot_t, u_int flags, int8_t psind); 151176771Sraj 152176771Srajunsigned int kptbl_min; /* Index of the first kernel ptbl. */ 153176771Srajunsigned int kernel_ptbls; /* Number of KVA ptbls. */ 154176771Sraj 155176771Sraj/* 156176771Sraj * If user pmap is processed with mmu_booke_remove and the resident count 157176771Sraj * drops to 0, there are no more pages to remove, so we need not continue. 158176771Sraj */ 159176771Sraj#define PMAP_REMOVE_DONE(pmap) \ 160176771Sraj ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 161176771Sraj 162187149Srajextern void tid_flush(tlbtid_t); 163176771Sraj 164176771Sraj/**************************************************************************/ 165176771Sraj/* TLB and TID handling */ 166176771Sraj/**************************************************************************/ 167176771Sraj 168176771Sraj/* Translation ID busy table */ 169187149Srajstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 170176771Sraj 171176771Sraj/* 172187149Sraj * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 173187149Sraj * core revisions and should be read from h/w registers during early config. 174176771Sraj */ 175187149Srajuint32_t tlb0_entries; 176187149Srajuint32_t tlb0_ways; 177187149Srajuint32_t tlb0_entries_per_way; 178176771Sraj 179187149Sraj#define TLB0_ENTRIES (tlb0_entries) 180187149Sraj#define TLB0_WAYS (tlb0_ways) 181187149Sraj#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 182176771Sraj 183187149Sraj#define TLB1_ENTRIES 16 184176771Sraj 185176771Sraj/* In-ram copy of the TLB1 */ 186187149Srajstatic tlb_entry_t tlb1[TLB1_ENTRIES]; 187176771Sraj 188176771Sraj/* Next free entry in the TLB1 */ 189176771Srajstatic unsigned int tlb1_idx; 190265998Sianstatic vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS; 191176771Sraj 192176771Srajstatic tlbtid_t tid_alloc(struct pmap *); 193176771Sraj 194187149Srajstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 195176771Sraj 196187149Srajstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 197176771Srajstatic void tlb1_write_entry(unsigned int); 198176771Srajstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 199224611Smarcelstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 200176771Sraj 201176771Srajstatic vm_size_t tsize2size(unsigned int); 202176771Srajstatic unsigned int size2tsize(vm_size_t); 203176771Srajstatic unsigned int ilog2(unsigned int); 204176771Sraj 205176771Srajstatic void set_mas4_defaults(void); 206176771Sraj 207187149Srajstatic inline void tlb0_flush_entry(vm_offset_t); 208176771Srajstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 209176771Sraj 210176771Sraj/**************************************************************************/ 211176771Sraj/* Page table management */ 212176771Sraj/**************************************************************************/ 213176771Sraj 214242535Salcstatic struct rwlock_padalign pvh_global_lock; 215242535Salc 216176771Sraj/* Data for the pv entry allocation mechanism */ 217176771Srajstatic uma_zone_t pvzone; 218176771Srajstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 219176771Sraj 220176771Sraj#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 221176771Sraj 222176771Sraj#ifndef PMAP_SHPGPERPROC 223176771Sraj#define PMAP_SHPGPERPROC 200 224176771Sraj#endif 225176771Sraj 226176771Srajstatic void ptbl_init(void); 227176771Srajstatic struct ptbl_buf *ptbl_buf_alloc(void); 228176771Srajstatic void ptbl_buf_free(struct ptbl_buf *); 229176771Srajstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 230176771Sraj 231270439Skibstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t); 232176771Srajstatic void ptbl_free(mmu_t, pmap_t, unsigned int); 233176771Srajstatic void ptbl_hold(mmu_t, pmap_t, unsigned int); 234176771Srajstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int); 235176771Sraj 236176771Srajstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 237176771Srajstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 238270439Skibstatic int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); 239187149Srajstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 240176771Sraj 241187149Srajstatic pv_entry_t pv_alloc(void); 242176771Srajstatic void pv_free(pv_entry_t); 243176771Srajstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t); 244176771Srajstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t); 245176771Sraj 246176771Sraj/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 247176771Sraj#define PTBL_BUFS (128 * 16) 248176771Sraj 249176771Srajstruct ptbl_buf { 250176771Sraj TAILQ_ENTRY(ptbl_buf) link; /* list link */ 251176771Sraj vm_offset_t kva; /* va of mapping */ 252176771Sraj}; 253176771Sraj 254176771Sraj/* ptbl free list and a lock used for access synchronization. */ 255176771Srajstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 256176771Srajstatic struct mtx ptbl_buf_freelist_lock; 257176771Sraj 258176771Sraj/* Base address of kva space allocated fot ptbl bufs. */ 259176771Srajstatic vm_offset_t ptbl_buf_pool_vabase; 260176771Sraj 261176771Sraj/* Pointer to ptbl_buf structures. */ 262176771Srajstatic struct ptbl_buf *ptbl_bufs; 263176771Sraj 264192532Srajvoid pmap_bootstrap_ap(volatile uint32_t *); 265192532Sraj 266176771Sraj/* 267176771Sraj * Kernel MMU interface 268176771Sraj */ 269176771Srajstatic void mmu_booke_clear_modify(mmu_t, vm_page_t); 270194101Srajstatic void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 271194101Sraj vm_size_t, vm_offset_t); 272176771Srajstatic void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 273248280Skibstatic void mmu_booke_copy_pages(mmu_t, vm_page_t *, 274248280Skib vm_offset_t, vm_page_t *, vm_offset_t, int); 275270439Skibstatic int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 276270439Skib vm_prot_t, u_int flags, int8_t psind); 277176771Srajstatic void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 278176771Sraj vm_page_t, vm_prot_t); 279176771Srajstatic void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280176771Sraj vm_prot_t); 281176771Srajstatic vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 282176771Srajstatic vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 283176771Sraj vm_prot_t); 284176771Srajstatic void mmu_booke_init(mmu_t); 285176771Srajstatic boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 286176771Srajstatic boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 287207155Salcstatic boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 288238357Salcstatic int mmu_booke_ts_referenced(mmu_t, vm_page_t); 289235936Srajstatic vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 290176771Sraj int); 291208504Salcstatic int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 292208504Salc vm_paddr_t *); 293176771Srajstatic void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 294176771Sraj vm_object_t, vm_pindex_t, vm_size_t); 295176771Srajstatic boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 296176771Srajstatic void mmu_booke_page_init(mmu_t, vm_page_t); 297176771Srajstatic int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 298176771Srajstatic void mmu_booke_pinit(mmu_t, pmap_t); 299176771Srajstatic void mmu_booke_pinit0(mmu_t, pmap_t); 300176771Srajstatic void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 301176771Sraj vm_prot_t); 302176771Srajstatic void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 303176771Srajstatic void mmu_booke_qremove(mmu_t, vm_offset_t, int); 304176771Srajstatic void mmu_booke_release(mmu_t, pmap_t); 305176771Srajstatic void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 306176771Srajstatic void mmu_booke_remove_all(mmu_t, vm_page_t); 307176771Srajstatic void mmu_booke_remove_write(mmu_t, vm_page_t); 308270920Skibstatic void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 309176771Srajstatic void mmu_booke_zero_page(mmu_t, vm_page_t); 310176771Srajstatic void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 311176771Srajstatic void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 312176771Srajstatic void mmu_booke_activate(mmu_t, struct thread *); 313176771Srajstatic void mmu_booke_deactivate(mmu_t, struct thread *); 314176771Srajstatic void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 315235936Srajstatic void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 316265996Sianstatic void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 317176771Srajstatic void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318235936Srajstatic vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 319235936Srajstatic void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 320265996Sianstatic void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); 321176771Srajstatic void mmu_booke_kremove(mmu_t, vm_offset_t); 322235936Srajstatic boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 323198341Smarcelstatic void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 324198341Smarcel vm_size_t); 325190701Smarcelstatic vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 326190701Smarcel vm_size_t, vm_size_t *); 327190701Smarcelstatic void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 328190701Smarcel vm_size_t, vm_offset_t); 329190701Smarcelstatic struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 330176771Sraj 331176771Srajstatic mmu_method_t mmu_booke_methods[] = { 332176771Sraj /* pmap dispatcher interface */ 333176771Sraj MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 334176771Sraj MMUMETHOD(mmu_copy, mmu_booke_copy), 335176771Sraj MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 336248280Skib MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 337176771Sraj MMUMETHOD(mmu_enter, mmu_booke_enter), 338176771Sraj MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 339176771Sraj MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 340176771Sraj MMUMETHOD(mmu_extract, mmu_booke_extract), 341176771Sraj MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 342176771Sraj MMUMETHOD(mmu_init, mmu_booke_init), 343176771Sraj MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 344176771Sraj MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 345207155Salc MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 346176771Sraj MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 347176771Sraj MMUMETHOD(mmu_map, mmu_booke_map), 348176771Sraj MMUMETHOD(mmu_mincore, mmu_booke_mincore), 349176771Sraj MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 350176771Sraj MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 351176771Sraj MMUMETHOD(mmu_page_init, mmu_booke_page_init), 352176771Sraj MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 353176771Sraj MMUMETHOD(mmu_pinit, mmu_booke_pinit), 354176771Sraj MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 355176771Sraj MMUMETHOD(mmu_protect, mmu_booke_protect), 356176771Sraj MMUMETHOD(mmu_qenter, mmu_booke_qenter), 357176771Sraj MMUMETHOD(mmu_qremove, mmu_booke_qremove), 358176771Sraj MMUMETHOD(mmu_release, mmu_booke_release), 359176771Sraj MMUMETHOD(mmu_remove, mmu_booke_remove), 360176771Sraj MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 361176771Sraj MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 362198341Smarcel MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 363270920Skib MMUMETHOD(mmu_unwire, mmu_booke_unwire), 364176771Sraj MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 365176771Sraj MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 366176771Sraj MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 367176771Sraj MMUMETHOD(mmu_activate, mmu_booke_activate), 368176771Sraj MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 369176771Sraj 370176771Sraj /* Internal interfaces */ 371176771Sraj MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 372176771Sraj MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 373176771Sraj MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 374265996Sian MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), 375176771Sraj MMUMETHOD(mmu_kenter, mmu_booke_kenter), 376265996Sian MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), 377176771Sraj MMUMETHOD(mmu_kextract, mmu_booke_kextract), 378176771Sraj/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 379176771Sraj MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 380176771Sraj 381190701Smarcel /* dumpsys() support */ 382190701Smarcel MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 383190701Smarcel MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 384190701Smarcel MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 385190701Smarcel 386176771Sraj { 0, 0 } 387176771Sraj}; 388176771Sraj 389212627SgrehanMMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 390176771Sraj 391265996Sianstatic __inline uint32_t 392265996Siantlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 393265996Sian{ 394265996Sian uint32_t attrib; 395265996Sian int i; 396265996Sian 397265996Sian if (ma != VM_MEMATTR_DEFAULT) { 398265996Sian switch (ma) { 399265996Sian case VM_MEMATTR_UNCACHEABLE: 400265996Sian return (PTE_I | PTE_G); 401265996Sian case VM_MEMATTR_WRITE_COMBINING: 402265996Sian case VM_MEMATTR_WRITE_BACK: 403265996Sian case VM_MEMATTR_PREFETCHABLE: 404265996Sian return (PTE_I); 405265996Sian case VM_MEMATTR_WRITE_THROUGH: 406265996Sian return (PTE_W | PTE_M); 407265996Sian } 408265996Sian } 409265996Sian 410265996Sian /* 411265996Sian * Assume the page is cache inhibited and access is guarded unless 412265996Sian * it's in our available memory array. 413265996Sian */ 414265996Sian attrib = _TLB_ENTRY_IO; 415265996Sian for (i = 0; i < physmem_regions_sz; i++) { 416265996Sian if ((pa >= physmem_regions[i].mr_start) && 417265996Sian (pa < (physmem_regions[i].mr_start + 418265996Sian physmem_regions[i].mr_size))) { 419265996Sian attrib = _TLB_ENTRY_MEM; 420265996Sian break; 421265996Sian } 422265996Sian } 423265996Sian 424265996Sian return (attrib); 425265996Sian} 426265996Sian 427192532Srajstatic inline void 428192532Srajtlb_miss_lock(void) 429192532Sraj{ 430192532Sraj#ifdef SMP 431192532Sraj struct pcpu *pc; 432192532Sraj 433192532Sraj if (!smp_started) 434192532Sraj return; 435192532Sraj 436222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 437192532Sraj if (pc != pcpup) { 438192532Sraj 439192532Sraj CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 440192532Sraj "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 441192532Sraj 442192532Sraj KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 443192532Sraj ("tlb_miss_lock: tried to lock self")); 444192532Sraj 445192532Sraj tlb_lock(pc->pc_booke_tlb_lock); 446192532Sraj 447192532Sraj CTR1(KTR_PMAP, "%s: locked", __func__); 448192532Sraj } 449192532Sraj } 450192532Sraj#endif 451192532Sraj} 452192532Sraj 453192532Srajstatic inline void 454192532Srajtlb_miss_unlock(void) 455192532Sraj{ 456192532Sraj#ifdef SMP 457192532Sraj struct pcpu *pc; 458192532Sraj 459192532Sraj if (!smp_started) 460192532Sraj return; 461192532Sraj 462222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 463192532Sraj if (pc != pcpup) { 464192532Sraj CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 465192532Sraj __func__, pc->pc_cpuid); 466192532Sraj 467192532Sraj tlb_unlock(pc->pc_booke_tlb_lock); 468192532Sraj 469192532Sraj CTR1(KTR_PMAP, "%s: unlocked", __func__); 470192532Sraj } 471192532Sraj } 472192532Sraj#endif 473192532Sraj} 474192532Sraj 475176771Sraj/* Return number of entries in TLB0. */ 476176771Srajstatic __inline void 477176771Srajtlb0_get_tlbconf(void) 478176771Sraj{ 479176771Sraj uint32_t tlb0_cfg; 480176771Sraj 481176771Sraj tlb0_cfg = mfspr(SPR_TLB0CFG); 482187149Sraj tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 483187149Sraj tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 484187149Sraj tlb0_entries_per_way = tlb0_entries / tlb0_ways; 485176771Sraj} 486176771Sraj 487176771Sraj/* Initialize pool of kva ptbl buffers. */ 488176771Srajstatic void 489176771Srajptbl_init(void) 490176771Sraj{ 491176771Sraj int i; 492176771Sraj 493187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 494187151Sraj (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 495187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 496187151Sraj __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 497176771Sraj 498176771Sraj mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 499176771Sraj TAILQ_INIT(&ptbl_buf_freelist); 500176771Sraj 501176771Sraj for (i = 0; i < PTBL_BUFS; i++) { 502176771Sraj ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 503176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 504176771Sraj } 505176771Sraj} 506176771Sraj 507182362Sraj/* Get a ptbl_buf from the freelist. */ 508176771Srajstatic struct ptbl_buf * 509176771Srajptbl_buf_alloc(void) 510176771Sraj{ 511176771Sraj struct ptbl_buf *buf; 512176771Sraj 513176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 514176771Sraj buf = TAILQ_FIRST(&ptbl_buf_freelist); 515176771Sraj if (buf != NULL) 516176771Sraj TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 517176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 518176771Sraj 519187151Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 520187151Sraj 521176771Sraj return (buf); 522176771Sraj} 523176771Sraj 524176771Sraj/* Return ptbl buff to free pool. */ 525176771Srajstatic void 526176771Srajptbl_buf_free(struct ptbl_buf *buf) 527176771Sraj{ 528176771Sraj 529187149Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 530176771Sraj 531176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 532176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 533176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 534176771Sraj} 535176771Sraj 536176771Sraj/* 537187149Sraj * Search the list of allocated ptbl bufs and find on list of allocated ptbls 538176771Sraj */ 539176771Srajstatic void 540176771Srajptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 541176771Sraj{ 542176771Sraj struct ptbl_buf *pbuf; 543176771Sraj 544187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 545176771Sraj 546187149Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 547187149Sraj 548187149Sraj TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 549176771Sraj if (pbuf->kva == (vm_offset_t)ptbl) { 550176771Sraj /* Remove from pmap ptbl buf list. */ 551187149Sraj TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 552176771Sraj 553187149Sraj /* Free corresponding ptbl buf. */ 554176771Sraj ptbl_buf_free(pbuf); 555176771Sraj break; 556176771Sraj } 557176771Sraj} 558176771Sraj 559176771Sraj/* Allocate page table. */ 560187149Srajstatic pte_t * 561270439Skibptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) 562176771Sraj{ 563176771Sraj vm_page_t mtbl[PTBL_PAGES]; 564176771Sraj vm_page_t m; 565176771Sraj struct ptbl_buf *pbuf; 566176771Sraj unsigned int pidx; 567187149Sraj pte_t *ptbl; 568270439Skib int i, j; 569176771Sraj 570187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 571187149Sraj (pmap == kernel_pmap), pdir_idx); 572176771Sraj 573176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 574176771Sraj ("ptbl_alloc: invalid pdir_idx")); 575176771Sraj KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 576176771Sraj ("pte_alloc: valid ptbl entry exists!")); 577176771Sraj 578176771Sraj pbuf = ptbl_buf_alloc(); 579176771Sraj if (pbuf == NULL) 580176771Sraj panic("pte_alloc: couldn't alloc kernel virtual memory"); 581187149Sraj 582187149Sraj ptbl = (pte_t *)pbuf->kva; 583176771Sraj 584187149Sraj CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 585187149Sraj 586176771Sraj /* Allocate ptbl pages, this will sleep! */ 587176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 588176771Sraj pidx = (PTBL_PAGES * pdir_idx) + i; 589187149Sraj while ((m = vm_page_alloc(NULL, pidx, 590187149Sraj VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 591176771Sraj PMAP_UNLOCK(pmap); 592242535Salc rw_wunlock(&pvh_global_lock); 593270439Skib if (nosleep) { 594270439Skib ptbl_free_pmap_ptbl(pmap, ptbl); 595270439Skib for (j = 0; j < i; j++) 596270439Skib vm_page_free(mtbl[j]); 597270439Skib atomic_subtract_int(&cnt.v_wire_count, i); 598270439Skib return (NULL); 599270439Skib } 600176771Sraj VM_WAIT; 601242535Salc rw_wlock(&pvh_global_lock); 602176771Sraj PMAP_LOCK(pmap); 603176771Sraj } 604176771Sraj mtbl[i] = m; 605176771Sraj } 606176771Sraj 607187149Sraj /* Map allocated pages into kernel_pmap. */ 608187149Sraj mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 609176771Sraj 610176771Sraj /* Zero whole ptbl. */ 611187149Sraj bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 612176771Sraj 613176771Sraj /* Add pbuf to the pmap ptbl bufs list. */ 614187149Sraj TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 615176771Sraj 616187149Sraj return (ptbl); 617176771Sraj} 618176771Sraj 619176771Sraj/* Free ptbl pages and invalidate pdir entry. */ 620176771Srajstatic void 621176771Srajptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 622176771Sraj{ 623176771Sraj pte_t *ptbl; 624176771Sraj vm_paddr_t pa; 625176771Sraj vm_offset_t va; 626176771Sraj vm_page_t m; 627176771Sraj int i; 628176771Sraj 629187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 630187149Sraj (pmap == kernel_pmap), pdir_idx); 631176771Sraj 632176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 633176771Sraj ("ptbl_free: invalid pdir_idx")); 634176771Sraj 635176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 636176771Sraj 637187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 638187149Sraj 639176771Sraj KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 640176771Sraj 641187149Sraj /* 642187149Sraj * Invalidate the pdir entry as soon as possible, so that other CPUs 643187149Sraj * don't attempt to look up the page tables we are releasing. 644187149Sraj */ 645187149Sraj mtx_lock_spin(&tlbivax_mutex); 646192532Sraj tlb_miss_lock(); 647187149Sraj 648187149Sraj pmap->pm_pdir[pdir_idx] = NULL; 649187149Sraj 650192532Sraj tlb_miss_unlock(); 651187149Sraj mtx_unlock_spin(&tlbivax_mutex); 652187149Sraj 653176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 654176771Sraj va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 655176771Sraj pa = pte_vatopa(mmu, kernel_pmap, va); 656176771Sraj m = PHYS_TO_VM_PAGE(pa); 657176771Sraj vm_page_free_zero(m); 658176771Sraj atomic_subtract_int(&cnt.v_wire_count, 1); 659176771Sraj mmu_booke_kremove(mmu, va); 660176771Sraj } 661176771Sraj 662176771Sraj ptbl_free_pmap_ptbl(pmap, ptbl); 663176771Sraj} 664176771Sraj 665176771Sraj/* 666176771Sraj * Decrement ptbl pages hold count and attempt to free ptbl pages. 667176771Sraj * Called when removing pte entry from ptbl. 668176771Sraj * 669176771Sraj * Return 1 if ptbl pages were freed. 670176771Sraj */ 671176771Srajstatic int 672176771Srajptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 673176771Sraj{ 674176771Sraj pte_t *ptbl; 675176771Sraj vm_paddr_t pa; 676176771Sraj vm_page_t m; 677176771Sraj int i; 678176771Sraj 679187151Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 680187151Sraj (pmap == kernel_pmap), pdir_idx); 681176771Sraj 682176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 683176771Sraj ("ptbl_unhold: invalid pdir_idx")); 684176771Sraj KASSERT((pmap != kernel_pmap), 685176771Sraj ("ptbl_unhold: unholding kernel ptbl!")); 686176771Sraj 687176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 688176771Sraj 689176771Sraj //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 690176771Sraj KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 691176771Sraj ("ptbl_unhold: non kva ptbl")); 692176771Sraj 693176771Sraj /* decrement hold count */ 694176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 695187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 696187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 697176771Sraj m = PHYS_TO_VM_PAGE(pa); 698176771Sraj m->wire_count--; 699176771Sraj } 700176771Sraj 701176771Sraj /* 702176771Sraj * Free ptbl pages if there are no pte etries in this ptbl. 703187151Sraj * wire_count has the same value for all ptbl pages, so check the last 704187151Sraj * page. 705176771Sraj */ 706176771Sraj if (m->wire_count == 0) { 707176771Sraj ptbl_free(mmu, pmap, pdir_idx); 708176771Sraj 709176771Sraj //debugf("ptbl_unhold: e (freed ptbl)\n"); 710176771Sraj return (1); 711176771Sraj } 712176771Sraj 713176771Sraj return (0); 714176771Sraj} 715176771Sraj 716176771Sraj/* 717187151Sraj * Increment hold count for ptbl pages. This routine is used when a new pte 718187151Sraj * entry is being inserted into the ptbl. 719176771Sraj */ 720176771Srajstatic void 721176771Srajptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 722176771Sraj{ 723176771Sraj vm_paddr_t pa; 724176771Sraj pte_t *ptbl; 725176771Sraj vm_page_t m; 726176771Sraj int i; 727176771Sraj 728187151Sraj CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 729187151Sraj pdir_idx); 730176771Sraj 731176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 732176771Sraj ("ptbl_hold: invalid pdir_idx")); 733176771Sraj KASSERT((pmap != kernel_pmap), 734176771Sraj ("ptbl_hold: holding kernel ptbl!")); 735176771Sraj 736176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 737176771Sraj 738176771Sraj KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 739176771Sraj 740176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 741187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 742187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 743176771Sraj m = PHYS_TO_VM_PAGE(pa); 744176771Sraj m->wire_count++; 745176771Sraj } 746176771Sraj} 747176771Sraj 748176771Sraj/* Allocate pv_entry structure. */ 749176771Srajpv_entry_t 750176771Srajpv_alloc(void) 751176771Sraj{ 752176771Sraj pv_entry_t pv; 753176771Sraj 754176771Sraj pv_entry_count++; 755194123Salc if (pv_entry_count > pv_entry_high_water) 756194123Salc pagedaemon_wakeup(); 757176771Sraj pv = uma_zalloc(pvzone, M_NOWAIT); 758176771Sraj 759176771Sraj return (pv); 760176771Sraj} 761176771Sraj 762176771Sraj/* Free pv_entry structure. */ 763176771Srajstatic __inline void 764176771Srajpv_free(pv_entry_t pve) 765176771Sraj{ 766176771Sraj 767176771Sraj pv_entry_count--; 768176771Sraj uma_zfree(pvzone, pve); 769176771Sraj} 770176771Sraj 771176771Sraj 772176771Sraj/* Allocate and initialize pv_entry structure. */ 773176771Srajstatic void 774176771Srajpv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 775176771Sraj{ 776176771Sraj pv_entry_t pve; 777176771Sraj 778176771Sraj //int su = (pmap == kernel_pmap); 779176771Sraj //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 780176771Sraj // (u_int32_t)pmap, va, (u_int32_t)m); 781176771Sraj 782176771Sraj pve = pv_alloc(); 783176771Sraj if (pve == NULL) 784176771Sraj panic("pv_insert: no pv entries!"); 785176771Sraj 786176771Sraj pve->pv_pmap = pmap; 787176771Sraj pve->pv_va = va; 788176771Sraj 789176771Sraj /* add to pv_list */ 790176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 791242535Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 792176771Sraj 793176771Sraj TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 794176771Sraj 795176771Sraj //debugf("pv_insert: e\n"); 796176771Sraj} 797176771Sraj 798176771Sraj/* Destroy pv entry. */ 799176771Srajstatic void 800176771Srajpv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 801176771Sraj{ 802176771Sraj pv_entry_t pve; 803176771Sraj 804176771Sraj //int su = (pmap == kernel_pmap); 805176771Sraj //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 806176771Sraj 807176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 808242535Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 809176771Sraj 810176771Sraj /* find pv entry */ 811176771Sraj TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 812176771Sraj if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 813176771Sraj /* remove from pv_list */ 814176771Sraj TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 815176771Sraj if (TAILQ_EMPTY(&m->md.pv_list)) 816225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 817176771Sraj 818176771Sraj /* free pv entry struct */ 819176771Sraj pv_free(pve); 820176771Sraj break; 821176771Sraj } 822176771Sraj } 823176771Sraj 824176771Sraj //debugf("pv_remove: e\n"); 825176771Sraj} 826176771Sraj 827176771Sraj/* 828176771Sraj * Clean pte entry, try to free page table page if requested. 829176771Sraj * 830176771Sraj * Return 1 if ptbl pages were freed, otherwise return 0. 831176771Sraj */ 832176771Srajstatic int 833187151Srajpte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 834176771Sraj{ 835176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 836176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 837176771Sraj vm_page_t m; 838176771Sraj pte_t *ptbl; 839176771Sraj pte_t *pte; 840176771Sraj 841176771Sraj //int su = (pmap == kernel_pmap); 842176771Sraj //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 843176771Sraj // su, (u_int32_t)pmap, va, flags); 844176771Sraj 845176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 846176771Sraj KASSERT(ptbl, ("pte_remove: null ptbl")); 847176771Sraj 848176771Sraj pte = &ptbl[ptbl_idx]; 849176771Sraj 850176771Sraj if (pte == NULL || !PTE_ISVALID(pte)) 851176771Sraj return (0); 852176771Sraj 853176771Sraj if (PTE_ISWIRED(pte)) 854176771Sraj pmap->pm_stats.wired_count--; 855176771Sraj 856191445Smarcel /* Handle managed entry. */ 857191445Smarcel if (PTE_ISMANAGED(pte)) { 858191445Smarcel /* Get vm_page_t for mapped pte. */ 859191445Smarcel m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 860176771Sraj 861191445Smarcel if (PTE_ISMODIFIED(pte)) 862191445Smarcel vm_page_dirty(m); 863176771Sraj 864191445Smarcel if (PTE_ISREFERENCED(pte)) 865225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 866176771Sraj 867191445Smarcel pv_remove(pmap, va, m); 868176771Sraj } 869176771Sraj 870187149Sraj mtx_lock_spin(&tlbivax_mutex); 871192532Sraj tlb_miss_lock(); 872187149Sraj 873187149Sraj tlb0_flush_entry(va); 874176771Sraj pte->flags = 0; 875176771Sraj pte->rpn = 0; 876187149Sraj 877192532Sraj tlb_miss_unlock(); 878187149Sraj mtx_unlock_spin(&tlbivax_mutex); 879187149Sraj 880176771Sraj pmap->pm_stats.resident_count--; 881176771Sraj 882176771Sraj if (flags & PTBL_UNHOLD) { 883176771Sraj //debugf("pte_remove: e (unhold)\n"); 884176771Sraj return (ptbl_unhold(mmu, pmap, pdir_idx)); 885176771Sraj } 886176771Sraj 887176771Sraj //debugf("pte_remove: e\n"); 888176771Sraj return (0); 889176771Sraj} 890176771Sraj 891176771Sraj/* 892176771Sraj * Insert PTE for a given page and virtual address. 893176771Sraj */ 894270439Skibstatic int 895270439Skibpte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, 896270439Skib boolean_t nosleep) 897176771Sraj{ 898176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 899176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 900187149Sraj pte_t *ptbl, *pte; 901176771Sraj 902187149Sraj CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 903187149Sraj pmap == kernel_pmap, pmap, va); 904176771Sraj 905176771Sraj /* Get the page table pointer. */ 906176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 907176771Sraj 908187149Sraj if (ptbl == NULL) { 909187149Sraj /* Allocate page table pages. */ 910270439Skib ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep); 911270439Skib if (ptbl == NULL) { 912270439Skib KASSERT(nosleep, ("nosleep and NULL ptbl")); 913270439Skib return (ENOMEM); 914270439Skib } 915187149Sraj } else { 916176771Sraj /* 917176771Sraj * Check if there is valid mapping for requested 918176771Sraj * va, if there is, remove it. 919176771Sraj */ 920176771Sraj pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 921176771Sraj if (PTE_ISVALID(pte)) { 922176771Sraj pte_remove(mmu, pmap, va, PTBL_HOLD); 923176771Sraj } else { 924176771Sraj /* 925176771Sraj * pte is not used, increment hold count 926176771Sraj * for ptbl pages. 927176771Sraj */ 928176771Sraj if (pmap != kernel_pmap) 929176771Sraj ptbl_hold(mmu, pmap, pdir_idx); 930176771Sraj } 931176771Sraj } 932176771Sraj 933176771Sraj /* 934187149Sraj * Insert pv_entry into pv_list for mapped page if part of managed 935187149Sraj * memory. 936176771Sraj */ 937224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) { 938224746Skib flags |= PTE_MANAGED; 939176771Sraj 940224746Skib /* Create and insert pv entry. */ 941224746Skib pv_insert(pmap, va, m); 942176771Sraj } 943176771Sraj 944176771Sraj pmap->pm_stats.resident_count++; 945187149Sraj 946187149Sraj mtx_lock_spin(&tlbivax_mutex); 947192532Sraj tlb_miss_lock(); 948187149Sraj 949187149Sraj tlb0_flush_entry(va); 950187149Sraj if (pmap->pm_pdir[pdir_idx] == NULL) { 951187149Sraj /* 952187149Sraj * If we just allocated a new page table, hook it in 953187149Sraj * the pdir. 954187149Sraj */ 955187149Sraj pmap->pm_pdir[pdir_idx] = ptbl; 956187149Sraj } 957187149Sraj pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 958176771Sraj pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 959176771Sraj pte->flags |= (PTE_VALID | flags); 960176771Sraj 961192532Sraj tlb_miss_unlock(); 962187149Sraj mtx_unlock_spin(&tlbivax_mutex); 963270439Skib return (0); 964176771Sraj} 965176771Sraj 966176771Sraj/* Return the pa for the given pmap/va. */ 967176771Srajstatic vm_paddr_t 968176771Srajpte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 969176771Sraj{ 970176771Sraj vm_paddr_t pa = 0; 971176771Sraj pte_t *pte; 972176771Sraj 973176771Sraj pte = pte_find(mmu, pmap, va); 974176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 975176771Sraj pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 976176771Sraj return (pa); 977176771Sraj} 978176771Sraj 979176771Sraj/* Get a pointer to a PTE in a page table. */ 980176771Srajstatic pte_t * 981176771Srajpte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 982176771Sraj{ 983176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 984176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 985176771Sraj 986176771Sraj KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 987176771Sraj 988176771Sraj if (pmap->pm_pdir[pdir_idx]) 989176771Sraj return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 990176771Sraj 991176771Sraj return (NULL); 992176771Sraj} 993176771Sraj 994176771Sraj/**************************************************************************/ 995176771Sraj/* PMAP related */ 996176771Sraj/**************************************************************************/ 997176771Sraj 998176771Sraj/* 999222400Smarcel * This is called during booke_init, before the system is really initialized. 1000176771Sraj */ 1001176771Srajstatic void 1002190701Smarcelmmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 1003176771Sraj{ 1004176771Sraj vm_offset_t phys_kernelend; 1005176771Sraj struct mem_region *mp, *mp1; 1006176771Sraj int cnt, i, j; 1007176771Sraj u_int s, e, sz; 1008176771Sraj u_int phys_avail_count; 1009182198Sraj vm_size_t physsz, hwphyssz, kstack0_sz; 1010193489Sraj vm_offset_t kernel_pdir, kstack0, va; 1011182198Sraj vm_paddr_t kstack0_phys; 1012194784Sjeff void *dpcpu; 1013193489Sraj pte_t *pte; 1014176771Sraj 1015176771Sraj debugf("mmu_booke_bootstrap: entered\n"); 1016176771Sraj 1017187149Sraj /* Initialize invalidation mutex */ 1018187149Sraj mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 1019187149Sraj 1020187149Sraj /* Read TLB0 size and associativity. */ 1021187149Sraj tlb0_get_tlbconf(); 1022187149Sraj 1023224611Smarcel /* 1024224611Smarcel * Align kernel start and end address (kernel image). 1025224611Smarcel * Note that kernel end does not necessarily relate to kernsize. 1026224611Smarcel * kernsize is the size of the kernel that is actually mapped. 1027235932Smarcel * Also note that "start - 1" is deliberate. With SMP, the 1028235932Smarcel * entry point is exactly a page from the actual load address. 1029235932Smarcel * As such, trunc_page() has no effect and we're off by a page. 1030235932Smarcel * Since we always have the ELF header between the load address 1031235932Smarcel * and the entry point, we can safely subtract 1 to compensate. 1032224611Smarcel */ 1033235932Smarcel kernstart = trunc_page(start - 1); 1034190701Smarcel data_start = round_page(kernelend); 1035190701Smarcel data_end = data_start; 1036190701Smarcel 1037224611Smarcel /* 1038224611Smarcel * Addresses of preloaded modules (like file systems) use 1039224611Smarcel * physical addresses. Make sure we relocate those into 1040224611Smarcel * virtual addresses. 1041224611Smarcel */ 1042224611Smarcel preload_addr_relocate = kernstart - kernload; 1043224611Smarcel 1044224611Smarcel /* Allocate the dynamic per-cpu area. */ 1045224611Smarcel dpcpu = (void *)data_end; 1046224611Smarcel data_end += DPCPU_SIZE; 1047224611Smarcel 1048176771Sraj /* Allocate space for the message buffer. */ 1049190701Smarcel msgbufp = (struct msgbuf *)data_end; 1050217688Spluknet data_end += msgbufsize; 1051187149Sraj debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1052190701Smarcel data_end); 1053176771Sraj 1054190701Smarcel data_end = round_page(data_end); 1055176771Sraj 1056176771Sraj /* Allocate space for ptbl_bufs. */ 1057190701Smarcel ptbl_bufs = (struct ptbl_buf *)data_end; 1058190701Smarcel data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1059187149Sraj debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1060190701Smarcel data_end); 1061176771Sraj 1062190701Smarcel data_end = round_page(data_end); 1063176771Sraj 1064176771Sraj /* Allocate PTE tables for kernel KVA. */ 1065190701Smarcel kernel_pdir = data_end; 1066176771Sraj kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1067176771Sraj PDIR_SIZE - 1) / PDIR_SIZE; 1068190701Smarcel data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1069176771Sraj debugf(" kernel ptbls: %d\n", kernel_ptbls); 1070190701Smarcel debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1071176771Sraj 1072190701Smarcel debugf(" data_end: 0x%08x\n", data_end); 1073224611Smarcel if (data_end - kernstart > kernsize) { 1074224611Smarcel kernsize += tlb1_mapin_region(kernstart + kernsize, 1075224611Smarcel kernload + kernsize, (data_end - kernstart) - kernsize); 1076224611Smarcel } 1077224611Smarcel data_end = kernstart + kernsize; 1078190701Smarcel debugf(" updated data_end: 0x%08x\n", data_end); 1079187149Sraj 1080182362Sraj /* 1081182362Sraj * Clear the structures - note we can only do it safely after the 1082187149Sraj * possible additional TLB1 translations are in place (above) so that 1083190701Smarcel * all range up to the currently calculated 'data_end' is covered. 1084182362Sraj */ 1085224611Smarcel dpcpu_init(dpcpu, 0); 1086182362Sraj memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1087182362Sraj memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1088182362Sraj 1089176771Sraj /*******************************************************/ 1090176771Sraj /* Set the start and end of kva. */ 1091176771Sraj /*******************************************************/ 1092190701Smarcel virtual_avail = round_page(data_end); 1093176771Sraj virtual_end = VM_MAX_KERNEL_ADDRESS; 1094176771Sraj 1095176771Sraj /* Allocate KVA space for page zero/copy operations. */ 1096176771Sraj zero_page_va = virtual_avail; 1097176771Sraj virtual_avail += PAGE_SIZE; 1098176771Sraj zero_page_idle_va = virtual_avail; 1099176771Sraj virtual_avail += PAGE_SIZE; 1100176771Sraj copy_page_src_va = virtual_avail; 1101176771Sraj virtual_avail += PAGE_SIZE; 1102176771Sraj copy_page_dst_va = virtual_avail; 1103176771Sraj virtual_avail += PAGE_SIZE; 1104187149Sraj debugf("zero_page_va = 0x%08x\n", zero_page_va); 1105187149Sraj debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1106187149Sraj debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1107187149Sraj debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1108176771Sraj 1109176771Sraj /* Initialize page zero/copy mutexes. */ 1110176771Sraj mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1111176771Sraj mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1112176771Sraj 1113176771Sraj /* Allocate KVA space for ptbl bufs. */ 1114176771Sraj ptbl_buf_pool_vabase = virtual_avail; 1115176771Sraj virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1116187149Sraj debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1117187149Sraj ptbl_buf_pool_vabase, virtual_avail); 1118176771Sraj 1119176771Sraj /* Calculate corresponding physical addresses for the kernel region. */ 1120190701Smarcel phys_kernelend = kernload + kernsize; 1121176771Sraj debugf("kernel image and allocated data:\n"); 1122176771Sraj debugf(" kernload = 0x%08x\n", kernload); 1123190701Smarcel debugf(" kernstart = 0x%08x\n", kernstart); 1124190701Smarcel debugf(" kernsize = 0x%08x\n", kernsize); 1125176771Sraj 1126176771Sraj if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1127176771Sraj panic("mmu_booke_bootstrap: phys_avail too small"); 1128176771Sraj 1129176771Sraj /* 1130187151Sraj * Remove kernel physical address range from avail regions list. Page 1131187151Sraj * align all regions. Non-page aligned memory isn't very interesting 1132187151Sraj * to us. Also, sort the entries for ascending addresses. 1133176771Sraj */ 1134192067Snwhitehorn 1135192067Snwhitehorn /* Retrieve phys/avail mem regions */ 1136192067Snwhitehorn mem_regions(&physmem_regions, &physmem_regions_sz, 1137192067Snwhitehorn &availmem_regions, &availmem_regions_sz); 1138176771Sraj sz = 0; 1139176771Sraj cnt = availmem_regions_sz; 1140176771Sraj debugf("processing avail regions:\n"); 1141176771Sraj for (mp = availmem_regions; mp->mr_size; mp++) { 1142176771Sraj s = mp->mr_start; 1143176771Sraj e = mp->mr_start + mp->mr_size; 1144176771Sraj debugf(" %08x-%08x -> ", s, e); 1145176771Sraj /* Check whether this region holds all of the kernel. */ 1146176771Sraj if (s < kernload && e > phys_kernelend) { 1147176771Sraj availmem_regions[cnt].mr_start = phys_kernelend; 1148176771Sraj availmem_regions[cnt++].mr_size = e - phys_kernelend; 1149176771Sraj e = kernload; 1150176771Sraj } 1151176771Sraj /* Look whether this regions starts within the kernel. */ 1152176771Sraj if (s >= kernload && s < phys_kernelend) { 1153176771Sraj if (e <= phys_kernelend) 1154176771Sraj goto empty; 1155176771Sraj s = phys_kernelend; 1156176771Sraj } 1157176771Sraj /* Now look whether this region ends within the kernel. */ 1158176771Sraj if (e > kernload && e <= phys_kernelend) { 1159176771Sraj if (s >= kernload) 1160176771Sraj goto empty; 1161176771Sraj e = kernload; 1162176771Sraj } 1163176771Sraj /* Now page align the start and size of the region. */ 1164176771Sraj s = round_page(s); 1165176771Sraj e = trunc_page(e); 1166176771Sraj if (e < s) 1167176771Sraj e = s; 1168176771Sraj sz = e - s; 1169176771Sraj debugf("%08x-%08x = %x\n", s, e, sz); 1170176771Sraj 1171176771Sraj /* Check whether some memory is left here. */ 1172176771Sraj if (sz == 0) { 1173176771Sraj empty: 1174176771Sraj memmove(mp, mp + 1, 1175176771Sraj (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1176176771Sraj cnt--; 1177176771Sraj mp--; 1178176771Sraj continue; 1179176771Sraj } 1180176771Sraj 1181176771Sraj /* Do an insertion sort. */ 1182176771Sraj for (mp1 = availmem_regions; mp1 < mp; mp1++) 1183176771Sraj if (s < mp1->mr_start) 1184176771Sraj break; 1185176771Sraj if (mp1 < mp) { 1186176771Sraj memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1187176771Sraj mp1->mr_start = s; 1188176771Sraj mp1->mr_size = sz; 1189176771Sraj } else { 1190176771Sraj mp->mr_start = s; 1191176771Sraj mp->mr_size = sz; 1192176771Sraj } 1193176771Sraj } 1194176771Sraj availmem_regions_sz = cnt; 1195176771Sraj 1196176771Sraj /*******************************************************/ 1197182198Sraj /* Steal physical memory for kernel stack from the end */ 1198182198Sraj /* of the first avail region */ 1199182198Sraj /*******************************************************/ 1200182198Sraj kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1201182198Sraj kstack0_phys = availmem_regions[0].mr_start + 1202182198Sraj availmem_regions[0].mr_size; 1203182198Sraj kstack0_phys -= kstack0_sz; 1204182198Sraj availmem_regions[0].mr_size -= kstack0_sz; 1205182198Sraj 1206182198Sraj /*******************************************************/ 1207176771Sraj /* Fill in phys_avail table, based on availmem_regions */ 1208176771Sraj /*******************************************************/ 1209176771Sraj phys_avail_count = 0; 1210176771Sraj physsz = 0; 1211176771Sraj hwphyssz = 0; 1212176771Sraj TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1213176771Sraj 1214176771Sraj debugf("fill in phys_avail:\n"); 1215176771Sraj for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1216176771Sraj 1217176771Sraj debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1218176771Sraj availmem_regions[i].mr_start, 1219187151Sraj availmem_regions[i].mr_start + 1220187151Sraj availmem_regions[i].mr_size, 1221176771Sraj availmem_regions[i].mr_size); 1222176771Sraj 1223182362Sraj if (hwphyssz != 0 && 1224182362Sraj (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1225176771Sraj debugf(" hw.physmem adjust\n"); 1226176771Sraj if (physsz < hwphyssz) { 1227176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1228182362Sraj phys_avail[j + 1] = 1229182362Sraj availmem_regions[i].mr_start + 1230176771Sraj hwphyssz - physsz; 1231176771Sraj physsz = hwphyssz; 1232176771Sraj phys_avail_count++; 1233176771Sraj } 1234176771Sraj break; 1235176771Sraj } 1236176771Sraj 1237176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1238176771Sraj phys_avail[j + 1] = availmem_regions[i].mr_start + 1239176771Sraj availmem_regions[i].mr_size; 1240176771Sraj phys_avail_count++; 1241176771Sraj physsz += availmem_regions[i].mr_size; 1242176771Sraj } 1243176771Sraj physmem = btoc(physsz); 1244176771Sraj 1245176771Sraj /* Calculate the last available physical address. */ 1246176771Sraj for (i = 0; phys_avail[i + 2] != 0; i += 2) 1247176771Sraj ; 1248176771Sraj Maxmem = powerpc_btop(phys_avail[i + 1]); 1249176771Sraj 1250176771Sraj debugf("Maxmem = 0x%08lx\n", Maxmem); 1251176771Sraj debugf("phys_avail_count = %d\n", phys_avail_count); 1252187151Sraj debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1253187151Sraj physmem); 1254176771Sraj 1255176771Sraj /*******************************************************/ 1256176771Sraj /* Initialize (statically allocated) kernel pmap. */ 1257176771Sraj /*******************************************************/ 1258176771Sraj PMAP_LOCK_INIT(kernel_pmap); 1259176771Sraj kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1260176771Sraj 1261187149Sraj debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1262187149Sraj debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1263176771Sraj debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1264176771Sraj kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1265176771Sraj 1266176771Sraj /* Initialize kernel pdir */ 1267176771Sraj for (i = 0; i < kernel_ptbls; i++) 1268176771Sraj kernel_pmap->pm_pdir[kptbl_min + i] = 1269176771Sraj (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1270176771Sraj 1271187149Sraj for (i = 0; i < MAXCPU; i++) { 1272187149Sraj kernel_pmap->pm_tid[i] = TID_KERNEL; 1273187149Sraj 1274187149Sraj /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1275187149Sraj tidbusy[i][0] = kernel_pmap; 1276187149Sraj } 1277193489Sraj 1278193489Sraj /* 1279193489Sraj * Fill in PTEs covering kernel code and data. They are not required 1280193489Sraj * for address translation, as this area is covered by static TLB1 1281193489Sraj * entries, but for pte_vatopa() to work correctly with kernel area 1282193489Sraj * addresses. 1283193489Sraj */ 1284235932Smarcel for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1285193489Sraj pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1286235932Smarcel pte->rpn = kernload + (va - kernstart); 1287193489Sraj pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1288193489Sraj PTE_VALID; 1289193489Sraj } 1290187149Sraj /* Mark kernel_pmap active on all CPUs */ 1291222813Sattilio CPU_FILL(&kernel_pmap->pm_active); 1292176771Sraj 1293242535Salc /* 1294242535Salc * Initialize the global pv list lock. 1295242535Salc */ 1296242535Salc rw_init(&pvh_global_lock, "pmap pv global"); 1297242535Salc 1298176771Sraj /*******************************************************/ 1299176771Sraj /* Final setup */ 1300176771Sraj /*******************************************************/ 1301187149Sraj 1302182198Sraj /* Enter kstack0 into kernel map, provide guard page */ 1303182198Sraj kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1304182198Sraj thread0.td_kstack = kstack0; 1305182198Sraj thread0.td_kstack_pages = KSTACK_PAGES; 1306182198Sraj 1307182198Sraj debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1308182198Sraj debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1309182198Sraj kstack0_phys, kstack0_phys + kstack0_sz); 1310182198Sraj debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1311182198Sraj 1312182198Sraj virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1313182198Sraj for (i = 0; i < KSTACK_PAGES; i++) { 1314182198Sraj mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1315182198Sraj kstack0 += PAGE_SIZE; 1316182198Sraj kstack0_phys += PAGE_SIZE; 1317182198Sraj } 1318187149Sraj 1319187149Sraj debugf("virtual_avail = %08x\n", virtual_avail); 1320187149Sraj debugf("virtual_end = %08x\n", virtual_end); 1321182198Sraj 1322176771Sraj debugf("mmu_booke_bootstrap: exit\n"); 1323176771Sraj} 1324176771Sraj 1325192532Srajvoid 1326192532Srajpmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1327192532Sraj{ 1328192532Sraj int i; 1329192532Sraj 1330192532Sraj /* 1331192532Sraj * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1332192532Sraj * have the snapshot of its contents in the s/w tlb1[] table, so use 1333192532Sraj * these values directly to (re)program AP's TLB1 hardware. 1334192532Sraj */ 1335242526Smarcel for (i = bp_ntlb1s; i < tlb1_idx; i++) { 1336192532Sraj /* Skip invalid entries */ 1337192532Sraj if (!(tlb1[i].mas1 & MAS1_VALID)) 1338192532Sraj continue; 1339192532Sraj 1340192532Sraj tlb1_write_entry(i); 1341192532Sraj } 1342192532Sraj 1343192532Sraj set_mas4_defaults(); 1344192532Sraj} 1345192532Sraj 1346176771Sraj/* 1347176771Sraj * Get the physical page address for the given pmap/virtual address. 1348176771Sraj */ 1349176771Srajstatic vm_paddr_t 1350176771Srajmmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1351176771Sraj{ 1352176771Sraj vm_paddr_t pa; 1353176771Sraj 1354176771Sraj PMAP_LOCK(pmap); 1355176771Sraj pa = pte_vatopa(mmu, pmap, va); 1356176771Sraj PMAP_UNLOCK(pmap); 1357176771Sraj 1358176771Sraj return (pa); 1359176771Sraj} 1360176771Sraj 1361176771Sraj/* 1362176771Sraj * Extract the physical page address associated with the given 1363176771Sraj * kernel virtual address. 1364176771Sraj */ 1365176771Srajstatic vm_paddr_t 1366176771Srajmmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1367176771Sraj{ 1368265996Sian int i; 1369176771Sraj 1370265996Sian /* Check TLB1 mappings */ 1371265996Sian for (i = 0; i < tlb1_idx; i++) { 1372265996Sian if (!(tlb1[i].mas1 & MAS1_VALID)) 1373265996Sian continue; 1374265996Sian if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size) 1375265996Sian return (tlb1[i].phys + (va - tlb1[i].virt)); 1376265996Sian } 1377265996Sian 1378176771Sraj return (pte_vatopa(mmu, kernel_pmap, va)); 1379176771Sraj} 1380176771Sraj 1381176771Sraj/* 1382176771Sraj * Initialize the pmap module. 1383176771Sraj * Called by vm_init, to initialize any structures that the pmap 1384176771Sraj * system needs to map virtual memory. 1385176771Sraj */ 1386176771Srajstatic void 1387176771Srajmmu_booke_init(mmu_t mmu) 1388176771Sraj{ 1389176771Sraj int shpgperproc = PMAP_SHPGPERPROC; 1390176771Sraj 1391176771Sraj /* 1392176771Sraj * Initialize the address space (zone) for the pv entries. Set a 1393176771Sraj * high water mark so that the system can recover from excessive 1394176771Sraj * numbers of pv entries. 1395176771Sraj */ 1396176771Sraj pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1397176771Sraj NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1398176771Sraj 1399176771Sraj TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1400176771Sraj pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1401176771Sraj 1402176771Sraj TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1403176771Sraj pv_entry_high_water = 9 * (pv_entry_max / 10); 1404176771Sraj 1405247360Sattilio uma_zone_reserve_kva(pvzone, pv_entry_max); 1406176771Sraj 1407176771Sraj /* Pre-fill pvzone with initial number of pv entries. */ 1408176771Sraj uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1409176771Sraj 1410176771Sraj /* Initialize ptbl allocation. */ 1411176771Sraj ptbl_init(); 1412176771Sraj} 1413176771Sraj 1414176771Sraj/* 1415176771Sraj * Map a list of wired pages into kernel virtual address space. This is 1416176771Sraj * intended for temporary mappings which do not need page modification or 1417176771Sraj * references recorded. Existing mappings in the region are overwritten. 1418176771Sraj */ 1419176771Srajstatic void 1420176771Srajmmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1421176771Sraj{ 1422176771Sraj vm_offset_t va; 1423176771Sraj 1424176771Sraj va = sva; 1425176771Sraj while (count-- > 0) { 1426176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1427176771Sraj va += PAGE_SIZE; 1428176771Sraj m++; 1429176771Sraj } 1430176771Sraj} 1431176771Sraj 1432176771Sraj/* 1433176771Sraj * Remove page mappings from kernel virtual address space. Intended for 1434176771Sraj * temporary mappings entered by mmu_booke_qenter. 1435176771Sraj */ 1436176771Srajstatic void 1437176771Srajmmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1438176771Sraj{ 1439176771Sraj vm_offset_t va; 1440176771Sraj 1441176771Sraj va = sva; 1442176771Sraj while (count-- > 0) { 1443176771Sraj mmu_booke_kremove(mmu, va); 1444176771Sraj va += PAGE_SIZE; 1445176771Sraj } 1446176771Sraj} 1447176771Sraj 1448176771Sraj/* 1449176771Sraj * Map a wired page into kernel virtual address space. 1450176771Sraj */ 1451176771Srajstatic void 1452235936Srajmmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1453176771Sraj{ 1454265996Sian 1455265996Sian mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1456265996Sian} 1457265996Sian 1458265996Sianstatic void 1459265996Sianmmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1460265996Sian{ 1461176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1462176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1463187151Sraj uint32_t flags; 1464176771Sraj pte_t *pte; 1465176771Sraj 1466187151Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1467187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1468176771Sraj 1469265996Sian flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1470265996Sian flags |= tlb_calc_wimg(pa, ma); 1471176771Sraj 1472176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1473176771Sraj 1474187149Sraj mtx_lock_spin(&tlbivax_mutex); 1475192532Sraj tlb_miss_lock(); 1476187149Sraj 1477176771Sraj if (PTE_ISVALID(pte)) { 1478187149Sraj 1479187149Sraj CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1480176771Sraj 1481176771Sraj /* Flush entry from TLB0 */ 1482187149Sraj tlb0_flush_entry(va); 1483176771Sraj } 1484176771Sraj 1485176771Sraj pte->rpn = pa & ~PTE_PA_MASK; 1486176771Sraj pte->flags = flags; 1487176771Sraj 1488176771Sraj //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1489176771Sraj // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1490176771Sraj // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1491176771Sraj 1492176771Sraj /* Flush the real memory from the instruction cache. */ 1493176771Sraj if ((flags & (PTE_I | PTE_G)) == 0) { 1494176771Sraj __syncicache((void *)va, PAGE_SIZE); 1495176771Sraj } 1496176771Sraj 1497192532Sraj tlb_miss_unlock(); 1498187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1499176771Sraj} 1500176771Sraj 1501176771Sraj/* 1502176771Sraj * Remove a page from kernel page table. 1503176771Sraj */ 1504176771Srajstatic void 1505176771Srajmmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1506176771Sraj{ 1507176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1508176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1509176771Sraj pte_t *pte; 1510176771Sraj 1511187149Sraj// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1512176771Sraj 1513187149Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1514187149Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1515176771Sraj ("mmu_booke_kremove: invalid va")); 1516176771Sraj 1517176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1518176771Sraj 1519176771Sraj if (!PTE_ISVALID(pte)) { 1520187149Sraj 1521187149Sraj CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1522187149Sraj 1523176771Sraj return; 1524176771Sraj } 1525176771Sraj 1526187149Sraj mtx_lock_spin(&tlbivax_mutex); 1527192532Sraj tlb_miss_lock(); 1528176771Sraj 1529187149Sraj /* Invalidate entry in TLB0, update PTE. */ 1530187149Sraj tlb0_flush_entry(va); 1531176771Sraj pte->flags = 0; 1532176771Sraj pte->rpn = 0; 1533176771Sraj 1534192532Sraj tlb_miss_unlock(); 1535187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1536176771Sraj} 1537176771Sraj 1538176771Sraj/* 1539176771Sraj * Initialize pmap associated with process 0. 1540176771Sraj */ 1541176771Srajstatic void 1542176771Srajmmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1543176771Sraj{ 1544187151Sraj 1545254667Skib PMAP_LOCK_INIT(pmap); 1546176771Sraj mmu_booke_pinit(mmu, pmap); 1547176771Sraj PCPU_SET(curpmap, pmap); 1548176771Sraj} 1549176771Sraj 1550176771Sraj/* 1551176771Sraj * Initialize a preallocated and zeroed pmap structure, 1552176771Sraj * such as one in a vmspace structure. 1553176771Sraj */ 1554176771Srajstatic void 1555176771Srajmmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1556176771Sraj{ 1557187149Sraj int i; 1558176771Sraj 1559187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1560187149Sraj curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1561176771Sraj 1562187149Sraj KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1563176771Sraj 1564187149Sraj for (i = 0; i < MAXCPU; i++) 1565187149Sraj pmap->pm_tid[i] = TID_NONE; 1566222813Sattilio CPU_ZERO(&kernel_pmap->pm_active); 1567176771Sraj bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1568176771Sraj bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1569187149Sraj TAILQ_INIT(&pmap->pm_ptbl_list); 1570176771Sraj} 1571176771Sraj 1572176771Sraj/* 1573176771Sraj * Release any resources held by the given physical map. 1574176771Sraj * Called when a pmap initialized by mmu_booke_pinit is being released. 1575176771Sraj * Should only be called if the map contains no valid mappings. 1576176771Sraj */ 1577176771Srajstatic void 1578176771Srajmmu_booke_release(mmu_t mmu, pmap_t pmap) 1579176771Sraj{ 1580176771Sraj 1581187151Sraj KASSERT(pmap->pm_stats.resident_count == 0, 1582187151Sraj ("pmap_release: pmap resident count %ld != 0", 1583187151Sraj pmap->pm_stats.resident_count)); 1584176771Sraj} 1585176771Sraj 1586176771Sraj/* 1587176771Sraj * Insert the given physical page at the specified virtual address in the 1588176771Sraj * target physical map with the protection requested. If specified the page 1589176771Sraj * will be wired down. 1590176771Sraj */ 1591270439Skibstatic int 1592176771Srajmmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1593270439Skib vm_prot_t prot, u_int flags, int8_t psind) 1594176771Sraj{ 1595270439Skib int error; 1596187151Sraj 1597242535Salc rw_wlock(&pvh_global_lock); 1598176771Sraj PMAP_LOCK(pmap); 1599270439Skib error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind); 1600242535Salc rw_wunlock(&pvh_global_lock); 1601176771Sraj PMAP_UNLOCK(pmap); 1602270439Skib return (error); 1603176771Sraj} 1604176771Sraj 1605270439Skibstatic int 1606176771Srajmmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1607270439Skib vm_prot_t prot, u_int pmap_flags, int8_t psind __unused) 1608176771Sraj{ 1609176771Sraj pte_t *pte; 1610176771Sraj vm_paddr_t pa; 1611187151Sraj uint32_t flags; 1612270439Skib int error, su, sync; 1613176771Sraj 1614176771Sraj pa = VM_PAGE_TO_PHYS(m); 1615176771Sraj su = (pmap == kernel_pmap); 1616176771Sraj sync = 0; 1617176771Sraj 1618176771Sraj //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1619270439Skib // "pa=0x%08x prot=0x%08x flags=%#x)\n", 1620176771Sraj // (u_int32_t)pmap, su, pmap->pm_tid, 1621270439Skib // (u_int32_t)m, va, pa, prot, flags); 1622176771Sraj 1623176771Sraj if (su) { 1624187151Sraj KASSERT(((va >= virtual_avail) && 1625187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1626187151Sraj ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1627176771Sraj } else { 1628176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1629187151Sraj ("mmu_booke_enter_locked: user pmap, non user va")); 1630176771Sraj } 1631254138Sattilio if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1632250747Salc VM_OBJECT_ASSERT_LOCKED(m->object); 1633176771Sraj 1634176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1635176771Sraj 1636176771Sraj /* 1637176771Sraj * If there is an existing mapping, and the physical address has not 1638176771Sraj * changed, must be protection or wiring change. 1639176771Sraj */ 1640176771Sraj if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1641176771Sraj (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1642187149Sraj 1643187149Sraj /* 1644187149Sraj * Before actually updating pte->flags we calculate and 1645187149Sraj * prepare its new value in a helper var. 1646187149Sraj */ 1647187149Sraj flags = pte->flags; 1648187149Sraj flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1649176771Sraj 1650176771Sraj /* Wiring change, just update stats. */ 1651270439Skib if ((pmap_flags & PMAP_ENTER_WIRED) != 0) { 1652176771Sraj if (!PTE_ISWIRED(pte)) { 1653187149Sraj flags |= PTE_WIRED; 1654176771Sraj pmap->pm_stats.wired_count++; 1655176771Sraj } 1656176771Sraj } else { 1657176771Sraj if (PTE_ISWIRED(pte)) { 1658187149Sraj flags &= ~PTE_WIRED; 1659176771Sraj pmap->pm_stats.wired_count--; 1660176771Sraj } 1661176771Sraj } 1662176771Sraj 1663176771Sraj if (prot & VM_PROT_WRITE) { 1664176771Sraj /* Add write permissions. */ 1665187149Sraj flags |= PTE_SW; 1666176771Sraj if (!su) 1667187149Sraj flags |= PTE_UW; 1668192795Sraj 1669208846Salc if ((flags & PTE_MANAGED) != 0) 1670225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1671176771Sraj } else { 1672176771Sraj /* Handle modified pages, sense modify status. */ 1673187149Sraj 1674187149Sraj /* 1675187149Sraj * The PTE_MODIFIED flag could be set by underlying 1676187149Sraj * TLB misses since we last read it (above), possibly 1677187149Sraj * other CPUs could update it so we check in the PTE 1678187149Sraj * directly rather than rely on that saved local flags 1679187149Sraj * copy. 1680187149Sraj */ 1681178626Smarcel if (PTE_ISMODIFIED(pte)) 1682178626Smarcel vm_page_dirty(m); 1683176771Sraj } 1684176771Sraj 1685176771Sraj if (prot & VM_PROT_EXECUTE) { 1686187149Sraj flags |= PTE_SX; 1687176771Sraj if (!su) 1688187149Sraj flags |= PTE_UX; 1689176771Sraj 1690187149Sraj /* 1691187149Sraj * Check existing flags for execute permissions: if we 1692187149Sraj * are turning execute permissions on, icache should 1693187149Sraj * be flushed. 1694187149Sraj */ 1695208720Salc if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1696176771Sraj sync++; 1697176771Sraj } 1698176771Sraj 1699187149Sraj flags &= ~PTE_REFERENCED; 1700187149Sraj 1701187149Sraj /* 1702187149Sraj * The new flags value is all calculated -- only now actually 1703187149Sraj * update the PTE. 1704187149Sraj */ 1705187149Sraj mtx_lock_spin(&tlbivax_mutex); 1706192532Sraj tlb_miss_lock(); 1707187149Sraj 1708187149Sraj tlb0_flush_entry(va); 1709187149Sraj pte->flags = flags; 1710187149Sraj 1711192532Sraj tlb_miss_unlock(); 1712187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1713187149Sraj 1714176771Sraj } else { 1715176771Sraj /* 1716187149Sraj * If there is an existing mapping, but it's for a different 1717176771Sraj * physical address, pte_enter() will delete the old mapping. 1718176771Sraj */ 1719176771Sraj //if ((pte != NULL) && PTE_ISVALID(pte)) 1720176771Sraj // debugf("mmu_booke_enter_locked: replace\n"); 1721176771Sraj //else 1722176771Sraj // debugf("mmu_booke_enter_locked: new\n"); 1723176771Sraj 1724176771Sraj /* Now set up the flags and install the new mapping. */ 1725176771Sraj flags = (PTE_SR | PTE_VALID); 1726187149Sraj flags |= PTE_M; 1727176771Sraj 1728176771Sraj if (!su) 1729176771Sraj flags |= PTE_UR; 1730176771Sraj 1731176771Sraj if (prot & VM_PROT_WRITE) { 1732176771Sraj flags |= PTE_SW; 1733176771Sraj if (!su) 1734176771Sraj flags |= PTE_UW; 1735192795Sraj 1736224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) 1737225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1738176771Sraj } 1739176771Sraj 1740176771Sraj if (prot & VM_PROT_EXECUTE) { 1741176771Sraj flags |= PTE_SX; 1742176771Sraj if (!su) 1743176771Sraj flags |= PTE_UX; 1744176771Sraj } 1745176771Sraj 1746176771Sraj /* If its wired update stats. */ 1747270439Skib if ((pmap_flags & PMAP_ENTER_WIRED) != 0) 1748176771Sraj flags |= PTE_WIRED; 1749176771Sraj 1750270439Skib error = pte_enter(mmu, pmap, m, va, flags, 1751270439Skib (pmap_flags & PMAP_ENTER_NOSLEEP) != 0); 1752270439Skib if (error != 0) 1753270439Skib return (KERN_RESOURCE_SHORTAGE); 1754176771Sraj 1755270439Skib if ((flags & PMAP_ENTER_WIRED) != 0) 1756270439Skib pmap->pm_stats.wired_count++; 1757270439Skib 1758176771Sraj /* Flush the real memory from the instruction cache. */ 1759176771Sraj if (prot & VM_PROT_EXECUTE) 1760176771Sraj sync++; 1761176771Sraj } 1762176771Sraj 1763176771Sraj if (sync && (su || pmap == PCPU_GET(curpmap))) { 1764176771Sraj __syncicache((void *)va, PAGE_SIZE); 1765176771Sraj sync = 0; 1766176771Sraj } 1767270439Skib 1768270439Skib return (KERN_SUCCESS); 1769176771Sraj} 1770176771Sraj 1771176771Sraj/* 1772176771Sraj * Maps a sequence of resident pages belonging to the same object. 1773176771Sraj * The sequence begins with the given page m_start. This page is 1774176771Sraj * mapped at the given virtual address start. Each subsequent page is 1775176771Sraj * mapped at a virtual address that is offset from start by the same 1776176771Sraj * amount as the page is offset from m_start within the object. The 1777176771Sraj * last page in the sequence is the page with the largest offset from 1778176771Sraj * m_start that can be mapped at a virtual address less than the given 1779176771Sraj * virtual address end. Not every virtual page between start and end 1780176771Sraj * is mapped; only those for which a resident page exists with the 1781176771Sraj * corresponding offset from m_start are mapped. 1782176771Sraj */ 1783176771Srajstatic void 1784176771Srajmmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1785176771Sraj vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1786176771Sraj{ 1787176771Sraj vm_page_t m; 1788176771Sraj vm_pindex_t diff, psize; 1789176771Sraj 1790250884Sattilio VM_OBJECT_ASSERT_LOCKED(m_start->object); 1791250884Sattilio 1792176771Sraj psize = atop(end - start); 1793176771Sraj m = m_start; 1794242535Salc rw_wlock(&pvh_global_lock); 1795176771Sraj PMAP_LOCK(pmap); 1796176771Sraj while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1797187151Sraj mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1798270439Skib prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1799270439Skib PMAP_ENTER_NOSLEEP, 0); 1800176771Sraj m = TAILQ_NEXT(m, listq); 1801176771Sraj } 1802242535Salc rw_wunlock(&pvh_global_lock); 1803176771Sraj PMAP_UNLOCK(pmap); 1804176771Sraj} 1805176771Sraj 1806176771Srajstatic void 1807176771Srajmmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1808176771Sraj vm_prot_t prot) 1809176771Sraj{ 1810176771Sraj 1811242535Salc rw_wlock(&pvh_global_lock); 1812176771Sraj PMAP_LOCK(pmap); 1813176771Sraj mmu_booke_enter_locked(mmu, pmap, va, m, 1814270439Skib prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 1815270439Skib 0); 1816242535Salc rw_wunlock(&pvh_global_lock); 1817176771Sraj PMAP_UNLOCK(pmap); 1818176771Sraj} 1819176771Sraj 1820176771Sraj/* 1821176771Sraj * Remove the given range of addresses from the specified map. 1822176771Sraj * 1823176771Sraj * It is assumed that the start and end are properly rounded to the page size. 1824176771Sraj */ 1825176771Srajstatic void 1826176771Srajmmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1827176771Sraj{ 1828176771Sraj pte_t *pte; 1829187151Sraj uint8_t hold_flag; 1830176771Sraj 1831176771Sraj int su = (pmap == kernel_pmap); 1832176771Sraj 1833176771Sraj //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1834176771Sraj // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1835176771Sraj 1836176771Sraj if (su) { 1837187151Sraj KASSERT(((va >= virtual_avail) && 1838187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1839187151Sraj ("mmu_booke_remove: kernel pmap, non kernel va")); 1840176771Sraj } else { 1841176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1842187151Sraj ("mmu_booke_remove: user pmap, non user va")); 1843176771Sraj } 1844176771Sraj 1845176771Sraj if (PMAP_REMOVE_DONE(pmap)) { 1846176771Sraj //debugf("mmu_booke_remove: e (empty)\n"); 1847176771Sraj return; 1848176771Sraj } 1849176771Sraj 1850176771Sraj hold_flag = PTBL_HOLD_FLAG(pmap); 1851176771Sraj //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1852176771Sraj 1853242535Salc rw_wlock(&pvh_global_lock); 1854176771Sraj PMAP_LOCK(pmap); 1855176771Sraj for (; va < endva; va += PAGE_SIZE) { 1856176771Sraj pte = pte_find(mmu, pmap, va); 1857187149Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 1858176771Sraj pte_remove(mmu, pmap, va, hold_flag); 1859176771Sraj } 1860176771Sraj PMAP_UNLOCK(pmap); 1861242535Salc rw_wunlock(&pvh_global_lock); 1862176771Sraj 1863176771Sraj //debugf("mmu_booke_remove: e\n"); 1864176771Sraj} 1865176771Sraj 1866176771Sraj/* 1867176771Sraj * Remove physical page from all pmaps in which it resides. 1868176771Sraj */ 1869176771Srajstatic void 1870176771Srajmmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1871176771Sraj{ 1872176771Sraj pv_entry_t pv, pvn; 1873187151Sraj uint8_t hold_flag; 1874176771Sraj 1875242535Salc rw_wlock(&pvh_global_lock); 1876176771Sraj for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1877176771Sraj pvn = TAILQ_NEXT(pv, pv_link); 1878176771Sraj 1879176771Sraj PMAP_LOCK(pv->pv_pmap); 1880176771Sraj hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1881176771Sraj pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1882176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1883176771Sraj } 1884225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1885242535Salc rw_wunlock(&pvh_global_lock); 1886176771Sraj} 1887176771Sraj 1888176771Sraj/* 1889176771Sraj * Map a range of physical addresses into kernel virtual address space. 1890176771Sraj */ 1891176771Srajstatic vm_offset_t 1892235936Srajmmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1893235936Sraj vm_paddr_t pa_end, int prot) 1894176771Sraj{ 1895176771Sraj vm_offset_t sva = *virt; 1896176771Sraj vm_offset_t va = sva; 1897176771Sraj 1898176771Sraj //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1899176771Sraj // sva, pa_start, pa_end); 1900176771Sraj 1901176771Sraj while (pa_start < pa_end) { 1902176771Sraj mmu_booke_kenter(mmu, va, pa_start); 1903176771Sraj va += PAGE_SIZE; 1904176771Sraj pa_start += PAGE_SIZE; 1905176771Sraj } 1906176771Sraj *virt = va; 1907176771Sraj 1908176771Sraj //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1909176771Sraj return (sva); 1910176771Sraj} 1911176771Sraj 1912176771Sraj/* 1913176771Sraj * The pmap must be activated before it's address space can be accessed in any 1914176771Sraj * way. 1915176771Sraj */ 1916176771Srajstatic void 1917176771Srajmmu_booke_activate(mmu_t mmu, struct thread *td) 1918176771Sraj{ 1919176771Sraj pmap_t pmap; 1920223758Sattilio u_int cpuid; 1921176771Sraj 1922176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1923176771Sraj 1924187149Sraj CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1925187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1926176771Sraj 1927176771Sraj KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1928176771Sraj 1929266001Sian sched_pin(); 1930176771Sraj 1931223758Sattilio cpuid = PCPU_GET(cpuid); 1932223758Sattilio CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1933176771Sraj PCPU_SET(curpmap, pmap); 1934187149Sraj 1935223758Sattilio if (pmap->pm_tid[cpuid] == TID_NONE) 1936176771Sraj tid_alloc(pmap); 1937176771Sraj 1938176771Sraj /* Load PID0 register with pmap tid value. */ 1939223758Sattilio mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1940187149Sraj __asm __volatile("isync"); 1941176771Sraj 1942266001Sian sched_unpin(); 1943176771Sraj 1944187149Sraj CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1945187149Sraj pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1946176771Sraj} 1947176771Sraj 1948176771Sraj/* 1949176771Sraj * Deactivate the specified process's address space. 1950176771Sraj */ 1951176771Srajstatic void 1952176771Srajmmu_booke_deactivate(mmu_t mmu, struct thread *td) 1953176771Sraj{ 1954176771Sraj pmap_t pmap; 1955176771Sraj 1956176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1957187149Sraj 1958187149Sraj CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1959187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1960187149Sraj 1961223758Sattilio CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1962176771Sraj PCPU_SET(curpmap, NULL); 1963176771Sraj} 1964176771Sraj 1965176771Sraj/* 1966176771Sraj * Copy the range specified by src_addr/len 1967176771Sraj * from the source map to the range dst_addr/len 1968176771Sraj * in the destination map. 1969176771Sraj * 1970176771Sraj * This routine is only advisory and need not do anything. 1971176771Sraj */ 1972176771Srajstatic void 1973194101Srajmmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1974194101Sraj vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1975176771Sraj{ 1976176771Sraj 1977176771Sraj} 1978176771Sraj 1979176771Sraj/* 1980176771Sraj * Set the physical protection on the specified range of this map as requested. 1981176771Sraj */ 1982176771Srajstatic void 1983176771Srajmmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1984176771Sraj vm_prot_t prot) 1985176771Sraj{ 1986176771Sraj vm_offset_t va; 1987176771Sraj vm_page_t m; 1988176771Sraj pte_t *pte; 1989176771Sraj 1990176771Sraj if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1991176771Sraj mmu_booke_remove(mmu, pmap, sva, eva); 1992176771Sraj return; 1993176771Sraj } 1994176771Sraj 1995176771Sraj if (prot & VM_PROT_WRITE) 1996176771Sraj return; 1997176771Sraj 1998176771Sraj PMAP_LOCK(pmap); 1999176771Sraj for (va = sva; va < eva; va += PAGE_SIZE) { 2000176771Sraj if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2001176771Sraj if (PTE_ISVALID(pte)) { 2002176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2003176771Sraj 2004187149Sraj mtx_lock_spin(&tlbivax_mutex); 2005192532Sraj tlb_miss_lock(); 2006187149Sraj 2007176771Sraj /* Handle modified pages. */ 2008207437Salc if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 2009178626Smarcel vm_page_dirty(m); 2010176771Sraj 2011187149Sraj tlb0_flush_entry(va); 2012207437Salc pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2013187149Sraj 2014192532Sraj tlb_miss_unlock(); 2015187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2016176771Sraj } 2017176771Sraj } 2018176771Sraj } 2019176771Sraj PMAP_UNLOCK(pmap); 2020176771Sraj} 2021176771Sraj 2022176771Sraj/* 2023176771Sraj * Clear the write and modified bits in each of the given page's mappings. 2024176771Sraj */ 2025176771Srajstatic void 2026176771Srajmmu_booke_remove_write(mmu_t mmu, vm_page_t m) 2027176771Sraj{ 2028176771Sraj pv_entry_t pv; 2029176771Sraj pte_t *pte; 2030176771Sraj 2031224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2032208175Salc ("mmu_booke_remove_write: page %p is not managed", m)); 2033208175Salc 2034208175Salc /* 2035254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2036254138Sattilio * set by another thread while the object is locked. Thus, 2037254138Sattilio * if PGA_WRITEABLE is clear, no page table entries need updating. 2038208175Salc */ 2039248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2040254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2041176771Sraj return; 2042242535Salc rw_wlock(&pvh_global_lock); 2043176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2044176771Sraj PMAP_LOCK(pv->pv_pmap); 2045176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2046176771Sraj if (PTE_ISVALID(pte)) { 2047176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2048176771Sraj 2049187149Sraj mtx_lock_spin(&tlbivax_mutex); 2050192532Sraj tlb_miss_lock(); 2051187149Sraj 2052176771Sraj /* Handle modified pages. */ 2053178626Smarcel if (PTE_ISMODIFIED(pte)) 2054178626Smarcel vm_page_dirty(m); 2055176771Sraj 2056176771Sraj /* Flush mapping from TLB0. */ 2057207437Salc pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2058187149Sraj 2059192532Sraj tlb_miss_unlock(); 2060187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2061176771Sraj } 2062176771Sraj } 2063176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2064176771Sraj } 2065225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2066242535Salc rw_wunlock(&pvh_global_lock); 2067176771Sraj} 2068176771Sraj 2069198341Smarcelstatic void 2070198341Smarcelmmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2071176771Sraj{ 2072176771Sraj pte_t *pte; 2073198341Smarcel pmap_t pmap; 2074198341Smarcel vm_page_t m; 2075198341Smarcel vm_offset_t addr; 2076198341Smarcel vm_paddr_t pa; 2077198341Smarcel int active, valid; 2078198341Smarcel 2079198341Smarcel va = trunc_page(va); 2080198341Smarcel sz = round_page(sz); 2081176771Sraj 2082242535Salc rw_wlock(&pvh_global_lock); 2083198341Smarcel pmap = PCPU_GET(curpmap); 2084198341Smarcel active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2085198341Smarcel while (sz > 0) { 2086198341Smarcel PMAP_LOCK(pm); 2087198341Smarcel pte = pte_find(mmu, pm, va); 2088198341Smarcel valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2089198341Smarcel if (valid) 2090198341Smarcel pa = PTE_PA(pte); 2091198341Smarcel PMAP_UNLOCK(pm); 2092198341Smarcel if (valid) { 2093198341Smarcel if (!active) { 2094198341Smarcel /* Create a mapping in the active pmap. */ 2095198341Smarcel addr = 0; 2096198341Smarcel m = PHYS_TO_VM_PAGE(pa); 2097198341Smarcel PMAP_LOCK(pmap); 2098198341Smarcel pte_enter(mmu, pmap, m, addr, 2099270439Skib PTE_SR | PTE_VALID | PTE_UR, FALSE); 2100198341Smarcel __syncicache((void *)addr, PAGE_SIZE); 2101198341Smarcel pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2102198341Smarcel PMAP_UNLOCK(pmap); 2103198341Smarcel } else 2104198341Smarcel __syncicache((void *)va, PAGE_SIZE); 2105198341Smarcel } 2106198341Smarcel va += PAGE_SIZE; 2107198341Smarcel sz -= PAGE_SIZE; 2108176771Sraj } 2109242535Salc rw_wunlock(&pvh_global_lock); 2110176771Sraj} 2111176771Sraj 2112176771Sraj/* 2113176771Sraj * Atomically extract and hold the physical page with the given 2114176771Sraj * pmap and virtual address pair if that mapping permits the given 2115176771Sraj * protection. 2116176771Sraj */ 2117176771Srajstatic vm_page_t 2118176771Srajmmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2119176771Sraj vm_prot_t prot) 2120176771Sraj{ 2121176771Sraj pte_t *pte; 2122176771Sraj vm_page_t m; 2123187151Sraj uint32_t pte_wbit; 2124207410Skmacy vm_paddr_t pa; 2125207410Skmacy 2126176771Sraj m = NULL; 2127207410Skmacy pa = 0; 2128176771Sraj PMAP_LOCK(pmap); 2129207410Skmacyretry: 2130176771Sraj pte = pte_find(mmu, pmap, va); 2131176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) { 2132176771Sraj if (pmap == kernel_pmap) 2133176771Sraj pte_wbit = PTE_SW; 2134176771Sraj else 2135176771Sraj pte_wbit = PTE_UW; 2136176771Sraj 2137176771Sraj if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2138207410Skmacy if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2139207410Skmacy goto retry; 2140176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2141176771Sraj vm_page_hold(m); 2142176771Sraj } 2143176771Sraj } 2144176771Sraj 2145207410Skmacy PA_UNLOCK_COND(pa); 2146176771Sraj PMAP_UNLOCK(pmap); 2147176771Sraj return (m); 2148176771Sraj} 2149176771Sraj 2150176771Sraj/* 2151176771Sraj * Initialize a vm_page's machine-dependent fields. 2152176771Sraj */ 2153176771Srajstatic void 2154176771Srajmmu_booke_page_init(mmu_t mmu, vm_page_t m) 2155176771Sraj{ 2156176771Sraj 2157176771Sraj TAILQ_INIT(&m->md.pv_list); 2158176771Sraj} 2159176771Sraj 2160176771Sraj/* 2161176771Sraj * mmu_booke_zero_page_area zeros the specified hardware page by 2162176771Sraj * mapping it into virtual memory and using bzero to clear 2163176771Sraj * its contents. 2164176771Sraj * 2165176771Sraj * off and size must reside within a single page. 2166176771Sraj */ 2167176771Srajstatic void 2168176771Srajmmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2169176771Sraj{ 2170176771Sraj vm_offset_t va; 2171176771Sraj 2172187151Sraj /* XXX KASSERT off and size are within a single page? */ 2173176771Sraj 2174176771Sraj mtx_lock(&zero_page_mutex); 2175176771Sraj va = zero_page_va; 2176176771Sraj 2177176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2178176771Sraj bzero((caddr_t)va + off, size); 2179176771Sraj mmu_booke_kremove(mmu, va); 2180176771Sraj 2181176771Sraj mtx_unlock(&zero_page_mutex); 2182176771Sraj} 2183176771Sraj 2184176771Sraj/* 2185176771Sraj * mmu_booke_zero_page zeros the specified hardware page. 2186176771Sraj */ 2187176771Srajstatic void 2188176771Srajmmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2189176771Sraj{ 2190176771Sraj 2191176771Sraj mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2192176771Sraj} 2193176771Sraj 2194176771Sraj/* 2195176771Sraj * mmu_booke_copy_page copies the specified (machine independent) page by 2196176771Sraj * mapping the page into virtual memory and using memcopy to copy the page, 2197176771Sraj * one machine dependent page at a time. 2198176771Sraj */ 2199176771Srajstatic void 2200176771Srajmmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2201176771Sraj{ 2202176771Sraj vm_offset_t sva, dva; 2203176771Sraj 2204176771Sraj sva = copy_page_src_va; 2205176771Sraj dva = copy_page_dst_va; 2206176771Sraj 2207187149Sraj mtx_lock(©_page_mutex); 2208176771Sraj mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2209176771Sraj mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2210176771Sraj memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2211176771Sraj mmu_booke_kremove(mmu, dva); 2212176771Sraj mmu_booke_kremove(mmu, sva); 2213176771Sraj mtx_unlock(©_page_mutex); 2214176771Sraj} 2215176771Sraj 2216248280Skibstatic inline void 2217248280Skibmmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2218248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2219248280Skib{ 2220248280Skib void *a_cp, *b_cp; 2221248280Skib vm_offset_t a_pg_offset, b_pg_offset; 2222248280Skib int cnt; 2223248280Skib 2224248280Skib mtx_lock(©_page_mutex); 2225248280Skib while (xfersize > 0) { 2226248280Skib a_pg_offset = a_offset & PAGE_MASK; 2227248280Skib cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2228248280Skib mmu_booke_kenter(mmu, copy_page_src_va, 2229248280Skib VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2230248280Skib a_cp = (char *)copy_page_src_va + a_pg_offset; 2231248280Skib b_pg_offset = b_offset & PAGE_MASK; 2232248280Skib cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2233248280Skib mmu_booke_kenter(mmu, copy_page_dst_va, 2234248280Skib VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2235248280Skib b_cp = (char *)copy_page_dst_va + b_pg_offset; 2236248280Skib bcopy(a_cp, b_cp, cnt); 2237248280Skib mmu_booke_kremove(mmu, copy_page_dst_va); 2238248280Skib mmu_booke_kremove(mmu, copy_page_src_va); 2239248280Skib a_offset += cnt; 2240248280Skib b_offset += cnt; 2241248280Skib xfersize -= cnt; 2242248280Skib } 2243248280Skib mtx_unlock(©_page_mutex); 2244248280Skib} 2245248280Skib 2246176771Sraj/* 2247176771Sraj * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2248176771Sraj * into virtual memory and using bzero to clear its contents. This is intended 2249176771Sraj * to be called from the vm_pagezero process only and outside of Giant. No 2250176771Sraj * lock is required. 2251176771Sraj */ 2252176771Srajstatic void 2253176771Srajmmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2254176771Sraj{ 2255176771Sraj vm_offset_t va; 2256176771Sraj 2257176771Sraj va = zero_page_idle_va; 2258176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2259176771Sraj bzero((caddr_t)va, PAGE_SIZE); 2260176771Sraj mmu_booke_kremove(mmu, va); 2261176771Sraj} 2262176771Sraj 2263176771Sraj/* 2264176771Sraj * Return whether or not the specified physical page was modified 2265176771Sraj * in any of physical maps. 2266176771Sraj */ 2267176771Srajstatic boolean_t 2268176771Srajmmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2269176771Sraj{ 2270176771Sraj pte_t *pte; 2271176771Sraj pv_entry_t pv; 2272208504Salc boolean_t rv; 2273176771Sraj 2274224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2275208504Salc ("mmu_booke_is_modified: page %p is not managed", m)); 2276208504Salc rv = FALSE; 2277176771Sraj 2278208504Salc /* 2279254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2280225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2281208504Salc * is clear, no PTEs can be modified. 2282208504Salc */ 2283248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2284254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2285208504Salc return (rv); 2286242535Salc rw_wlock(&pvh_global_lock); 2287176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2288176771Sraj PMAP_LOCK(pv->pv_pmap); 2289208504Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2290208504Salc PTE_ISVALID(pte)) { 2291208504Salc if (PTE_ISMODIFIED(pte)) 2292208504Salc rv = TRUE; 2293176771Sraj } 2294176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2295208504Salc if (rv) 2296208504Salc break; 2297176771Sraj } 2298242535Salc rw_wunlock(&pvh_global_lock); 2299208504Salc return (rv); 2300176771Sraj} 2301176771Sraj 2302176771Sraj/* 2303187151Sraj * Return whether or not the specified virtual address is eligible 2304176771Sraj * for prefault. 2305176771Sraj */ 2306176771Srajstatic boolean_t 2307176771Srajmmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2308176771Sraj{ 2309176771Sraj 2310176771Sraj return (FALSE); 2311176771Sraj} 2312176771Sraj 2313176771Sraj/* 2314207155Salc * Return whether or not the specified physical page was referenced 2315207155Salc * in any physical maps. 2316207155Salc */ 2317207155Salcstatic boolean_t 2318207155Salcmmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2319207155Salc{ 2320207155Salc pte_t *pte; 2321207155Salc pv_entry_t pv; 2322207155Salc boolean_t rv; 2323207155Salc 2324224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2325208574Salc ("mmu_booke_is_referenced: page %p is not managed", m)); 2326207155Salc rv = FALSE; 2327242535Salc rw_wlock(&pvh_global_lock); 2328207155Salc TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2329207155Salc PMAP_LOCK(pv->pv_pmap); 2330207155Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2331208574Salc PTE_ISVALID(pte)) { 2332208574Salc if (PTE_ISREFERENCED(pte)) 2333208574Salc rv = TRUE; 2334208574Salc } 2335207155Salc PMAP_UNLOCK(pv->pv_pmap); 2336207155Salc if (rv) 2337207155Salc break; 2338207155Salc } 2339242535Salc rw_wunlock(&pvh_global_lock); 2340207155Salc return (rv); 2341207155Salc} 2342207155Salc 2343207155Salc/* 2344176771Sraj * Clear the modify bits on the specified physical page. 2345176771Sraj */ 2346176771Srajstatic void 2347176771Srajmmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2348176771Sraj{ 2349176771Sraj pte_t *pte; 2350176771Sraj pv_entry_t pv; 2351176771Sraj 2352224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2353208504Salc ("mmu_booke_clear_modify: page %p is not managed", m)); 2354248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2355254138Sattilio KASSERT(!vm_page_xbusied(m), 2356254138Sattilio ("mmu_booke_clear_modify: page %p is exclusive busied", m)); 2357208504Salc 2358208504Salc /* 2359225418Skib * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2360208504Salc * If the object containing the page is locked and the page is not 2361254138Sattilio * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. 2362208504Salc */ 2363225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 2364176771Sraj return; 2365242535Salc rw_wlock(&pvh_global_lock); 2366176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2367176771Sraj PMAP_LOCK(pv->pv_pmap); 2368208504Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2369208504Salc PTE_ISVALID(pte)) { 2370187149Sraj mtx_lock_spin(&tlbivax_mutex); 2371192532Sraj tlb_miss_lock(); 2372187149Sraj 2373176771Sraj if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2374187149Sraj tlb0_flush_entry(pv->pv_va); 2375176771Sraj pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2376176771Sraj PTE_REFERENCED); 2377176771Sraj } 2378187149Sraj 2379192532Sraj tlb_miss_unlock(); 2380187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2381176771Sraj } 2382176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2383176771Sraj } 2384242535Salc rw_wunlock(&pvh_global_lock); 2385176771Sraj} 2386176771Sraj 2387176771Sraj/* 2388176771Sraj * Return a count of reference bits for a page, clearing those bits. 2389176771Sraj * It is not necessary for every reference bit to be cleared, but it 2390176771Sraj * is necessary that 0 only be returned when there are truly no 2391176771Sraj * reference bits set. 2392176771Sraj * 2393176771Sraj * XXX: The exact number of bits to check and clear is a matter that 2394176771Sraj * should be tested and standardized at some point in the future for 2395176771Sraj * optimal aging of shared pages. 2396176771Sraj */ 2397176771Srajstatic int 2398176771Srajmmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2399176771Sraj{ 2400176771Sraj pte_t *pte; 2401176771Sraj pv_entry_t pv; 2402176771Sraj int count; 2403176771Sraj 2404224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2405208990Salc ("mmu_booke_ts_referenced: page %p is not managed", m)); 2406176771Sraj count = 0; 2407242535Salc rw_wlock(&pvh_global_lock); 2408176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2409176771Sraj PMAP_LOCK(pv->pv_pmap); 2410208990Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2411208990Salc PTE_ISVALID(pte)) { 2412176771Sraj if (PTE_ISREFERENCED(pte)) { 2413187149Sraj mtx_lock_spin(&tlbivax_mutex); 2414192532Sraj tlb_miss_lock(); 2415187149Sraj 2416187149Sraj tlb0_flush_entry(pv->pv_va); 2417176771Sraj pte->flags &= ~PTE_REFERENCED; 2418176771Sraj 2419192532Sraj tlb_miss_unlock(); 2420187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2421187149Sraj 2422176771Sraj if (++count > 4) { 2423176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2424176771Sraj break; 2425176771Sraj } 2426176771Sraj } 2427176771Sraj } 2428176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2429176771Sraj } 2430242535Salc rw_wunlock(&pvh_global_lock); 2431176771Sraj return (count); 2432176771Sraj} 2433176771Sraj 2434176771Sraj/* 2435270920Skib * Clear the wired attribute from the mappings for the specified range of 2436270920Skib * addresses in the given pmap. Every valid mapping within that range must 2437270920Skib * have the wired attribute set. In contrast, invalid mappings cannot have 2438270920Skib * the wired attribute set, so they are ignored. 2439270920Skib * 2440270920Skib * The wired attribute of the page table entry is not a hardware feature, so 2441270920Skib * there is no need to invalidate any TLB entries. 2442176771Sraj */ 2443176771Srajstatic void 2444270920Skibmmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2445176771Sraj{ 2446270920Skib vm_offset_t va; 2447201758Smbr pte_t *pte; 2448176771Sraj 2449176771Sraj PMAP_LOCK(pmap); 2450270920Skib for (va = sva; va < eva; va += PAGE_SIZE) { 2451270920Skib if ((pte = pte_find(mmu, pmap, va)) != NULL && 2452270920Skib PTE_ISVALID(pte)) { 2453270920Skib if (!PTE_ISWIRED(pte)) 2454270920Skib panic("mmu_booke_unwire: pte %p isn't wired", 2455270920Skib pte); 2456270920Skib pte->flags &= ~PTE_WIRED; 2457270920Skib pmap->pm_stats.wired_count--; 2458176771Sraj } 2459176771Sraj } 2460176771Sraj PMAP_UNLOCK(pmap); 2461270920Skib 2462176771Sraj} 2463176771Sraj 2464176771Sraj/* 2465176771Sraj * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2466176771Sraj * page. This count may be changed upwards or downwards in the future; it is 2467176771Sraj * only necessary that true be returned for a small subset of pmaps for proper 2468176771Sraj * page aging. 2469176771Sraj */ 2470176771Srajstatic boolean_t 2471176771Srajmmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2472176771Sraj{ 2473176771Sraj pv_entry_t pv; 2474176771Sraj int loops; 2475208990Salc boolean_t rv; 2476176771Sraj 2477224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2478208990Salc ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2479176771Sraj loops = 0; 2480208990Salc rv = FALSE; 2481242535Salc rw_wlock(&pvh_global_lock); 2482176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2483208990Salc if (pv->pv_pmap == pmap) { 2484208990Salc rv = TRUE; 2485208990Salc break; 2486208990Salc } 2487176771Sraj if (++loops >= 16) 2488176771Sraj break; 2489176771Sraj } 2490242535Salc rw_wunlock(&pvh_global_lock); 2491208990Salc return (rv); 2492176771Sraj} 2493176771Sraj 2494176771Sraj/* 2495176771Sraj * Return the number of managed mappings to the given physical page that are 2496176771Sraj * wired. 2497176771Sraj */ 2498176771Srajstatic int 2499176771Srajmmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2500176771Sraj{ 2501176771Sraj pv_entry_t pv; 2502176771Sraj pte_t *pte; 2503176771Sraj int count = 0; 2504176771Sraj 2505224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 2506176771Sraj return (count); 2507242535Salc rw_wlock(&pvh_global_lock); 2508176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2509176771Sraj PMAP_LOCK(pv->pv_pmap); 2510176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2511176771Sraj if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2512176771Sraj count++; 2513176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2514176771Sraj } 2515242535Salc rw_wunlock(&pvh_global_lock); 2516176771Sraj return (count); 2517176771Sraj} 2518176771Sraj 2519176771Srajstatic int 2520235936Srajmmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2521176771Sraj{ 2522176771Sraj int i; 2523176771Sraj vm_offset_t va; 2524176771Sraj 2525176771Sraj /* 2526176771Sraj * This currently does not work for entries that 2527176771Sraj * overlap TLB1 entries. 2528176771Sraj */ 2529176771Sraj for (i = 0; i < tlb1_idx; i ++) { 2530176771Sraj if (tlb1_iomapped(i, pa, size, &va) == 0) 2531176771Sraj return (0); 2532176771Sraj } 2533176771Sraj 2534176771Sraj return (EFAULT); 2535176771Sraj} 2536176771Sraj 2537190701Smarcelvm_offset_t 2538190701Smarcelmmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2539190701Smarcel vm_size_t *sz) 2540190701Smarcel{ 2541190701Smarcel vm_paddr_t pa, ppa; 2542190701Smarcel vm_offset_t va; 2543190701Smarcel vm_size_t gran; 2544190701Smarcel 2545190701Smarcel /* Raw physical memory dumps don't have a virtual address. */ 2546190701Smarcel if (md->md_vaddr == ~0UL) { 2547190701Smarcel /* We always map a 256MB page at 256M. */ 2548190701Smarcel gran = 256 * 1024 * 1024; 2549190701Smarcel pa = md->md_paddr + ofs; 2550190701Smarcel ppa = pa & ~(gran - 1); 2551190701Smarcel ofs = pa - ppa; 2552190701Smarcel va = gran; 2553190701Smarcel tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2554190701Smarcel if (*sz > (gran - ofs)) 2555190701Smarcel *sz = gran - ofs; 2556190701Smarcel return (va + ofs); 2557190701Smarcel } 2558190701Smarcel 2559190701Smarcel /* Minidumps are based on virtual memory addresses. */ 2560190701Smarcel va = md->md_vaddr + ofs; 2561190701Smarcel if (va >= kernstart + kernsize) { 2562190701Smarcel gran = PAGE_SIZE - (va & PAGE_MASK); 2563190701Smarcel if (*sz > gran) 2564190701Smarcel *sz = gran; 2565190701Smarcel } 2566190701Smarcel return (va); 2567190701Smarcel} 2568190701Smarcel 2569190701Smarcelvoid 2570190701Smarcelmmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2571190701Smarcel vm_offset_t va) 2572190701Smarcel{ 2573190701Smarcel 2574190701Smarcel /* Raw physical memory dumps don't have a virtual address. */ 2575190701Smarcel if (md->md_vaddr == ~0UL) { 2576190701Smarcel tlb1_idx--; 2577190701Smarcel tlb1[tlb1_idx].mas1 = 0; 2578190701Smarcel tlb1[tlb1_idx].mas2 = 0; 2579190701Smarcel tlb1[tlb1_idx].mas3 = 0; 2580190701Smarcel tlb1_write_entry(tlb1_idx); 2581190701Smarcel return; 2582190701Smarcel } 2583190701Smarcel 2584190701Smarcel /* Minidumps are based on virtual memory addresses. */ 2585190701Smarcel /* Nothing to do... */ 2586190701Smarcel} 2587190701Smarcel 2588190701Smarcelstruct pmap_md * 2589190701Smarcelmmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2590190701Smarcel{ 2591190701Smarcel static struct pmap_md md; 2592190701Smarcel pte_t *pte; 2593190701Smarcel vm_offset_t va; 2594190701Smarcel 2595190701Smarcel if (dumpsys_minidump) { 2596190701Smarcel md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2597190701Smarcel if (prev == NULL) { 2598190701Smarcel /* 1st: kernel .data and .bss. */ 2599190701Smarcel md.md_index = 1; 2600190701Smarcel md.md_vaddr = trunc_page((uintptr_t)_etext); 2601190701Smarcel md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2602190701Smarcel return (&md); 2603190701Smarcel } 2604190701Smarcel switch (prev->md_index) { 2605190701Smarcel case 1: 2606190701Smarcel /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2607190701Smarcel md.md_index = 2; 2608190701Smarcel md.md_vaddr = data_start; 2609190701Smarcel md.md_size = data_end - data_start; 2610190701Smarcel break; 2611190701Smarcel case 2: 2612190701Smarcel /* 3rd: kernel VM. */ 2613190701Smarcel va = prev->md_vaddr + prev->md_size; 2614190701Smarcel /* Find start of next chunk (from va). */ 2615190701Smarcel while (va < virtual_end) { 2616190701Smarcel /* Don't dump the buffer cache. */ 2617190701Smarcel if (va >= kmi.buffer_sva && 2618190701Smarcel va < kmi.buffer_eva) { 2619190701Smarcel va = kmi.buffer_eva; 2620190701Smarcel continue; 2621190701Smarcel } 2622190701Smarcel pte = pte_find(mmu, kernel_pmap, va); 2623190701Smarcel if (pte != NULL && PTE_ISVALID(pte)) 2624190701Smarcel break; 2625190701Smarcel va += PAGE_SIZE; 2626190701Smarcel } 2627190701Smarcel if (va < virtual_end) { 2628190701Smarcel md.md_vaddr = va; 2629190701Smarcel va += PAGE_SIZE; 2630190701Smarcel /* Find last page in chunk. */ 2631190701Smarcel while (va < virtual_end) { 2632190701Smarcel /* Don't run into the buffer cache. */ 2633190701Smarcel if (va == kmi.buffer_sva) 2634190701Smarcel break; 2635190701Smarcel pte = pte_find(mmu, kernel_pmap, va); 2636190701Smarcel if (pte == NULL || !PTE_ISVALID(pte)) 2637190701Smarcel break; 2638190701Smarcel va += PAGE_SIZE; 2639190701Smarcel } 2640190701Smarcel md.md_size = va - md.md_vaddr; 2641190701Smarcel break; 2642190701Smarcel } 2643190701Smarcel md.md_index = 3; 2644190701Smarcel /* FALLTHROUGH */ 2645190701Smarcel default: 2646190701Smarcel return (NULL); 2647190701Smarcel } 2648190701Smarcel } else { /* minidumps */ 2649209908Sraj mem_regions(&physmem_regions, &physmem_regions_sz, 2650209908Sraj &availmem_regions, &availmem_regions_sz); 2651209908Sraj 2652190701Smarcel if (prev == NULL) { 2653190701Smarcel /* first physical chunk. */ 2654209908Sraj md.md_paddr = physmem_regions[0].mr_start; 2655209908Sraj md.md_size = physmem_regions[0].mr_size; 2656190701Smarcel md.md_vaddr = ~0UL; 2657190701Smarcel md.md_index = 1; 2658209908Sraj } else if (md.md_index < physmem_regions_sz) { 2659209908Sraj md.md_paddr = physmem_regions[md.md_index].mr_start; 2660209908Sraj md.md_size = physmem_regions[md.md_index].mr_size; 2661190701Smarcel md.md_vaddr = ~0UL; 2662190701Smarcel md.md_index++; 2663190701Smarcel } else { 2664190701Smarcel /* There's no next physical chunk. */ 2665190701Smarcel return (NULL); 2666190701Smarcel } 2667190701Smarcel } 2668190701Smarcel 2669190701Smarcel return (&md); 2670190701Smarcel} 2671190701Smarcel 2672176771Sraj/* 2673176771Sraj * Map a set of physical memory pages into the kernel virtual address space. 2674176771Sraj * Return a pointer to where it is mapped. This routine is intended to be used 2675176771Sraj * for mapping device memory, NOT real memory. 2676176771Sraj */ 2677176771Srajstatic void * 2678235936Srajmmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2679176771Sraj{ 2680265996Sian 2681265996Sian return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2682265996Sian} 2683265996Sian 2684265996Sianstatic void * 2685265996Sianmmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2686265996Sian{ 2687184244Smarcel void *res; 2688176771Sraj uintptr_t va; 2689184244Smarcel vm_size_t sz; 2690265996Sian int i; 2691176771Sraj 2692242526Smarcel /* 2693265996Sian * Check if this is premapped in TLB1. Note: this should probably also 2694265996Sian * check whether a sequence of TLB1 entries exist that match the 2695265996Sian * requirement, but now only checks the easy case. 2696242526Smarcel */ 2697265996Sian if (ma == VM_MEMATTR_DEFAULT) { 2698265996Sian for (i = 0; i < tlb1_idx; i++) { 2699265996Sian if (!(tlb1[i].mas1 & MAS1_VALID)) 2700265996Sian continue; 2701265996Sian if (pa >= tlb1[i].phys && 2702265996Sian (pa + size) <= (tlb1[i].phys + tlb1[i].size)) 2703265996Sian return (void *)(tlb1[i].virt + 2704265996Sian (pa - tlb1[i].phys)); 2705265996Sian } 2706242526Smarcel } 2707242526Smarcel 2708265996Sian size = roundup(size, PAGE_SIZE); 2709265996Sian 2710265998Sian /* 2711265998Sian * We leave a hole for device direct mapping between the maximum user 2712265998Sian * address (0x8000000) and the minimum KVA address (0xc0000000). If 2713265998Sian * devices are in there, just map them 1:1. If not, map them to the 2714265998Sian * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped 2715265998Sian * addresses should be pulled from an allocator, but since we do not 2716265998Sian * ever free TLB1 entries, it is safe just to increment a counter. 2717265998Sian * Note that there isn't a lot of address space here (128 MB) and it 2718265998Sian * is not at all difficult to imagine running out, since that is a 4:1 2719265998Sian * compression from the 0xc0000000 - 0xf0000000 address space that gets 2720265998Sian * mapped there. 2721265998Sian */ 2722265996Sian if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) && 2723265996Sian (pa + size - 1) < VM_MIN_KERNEL_ADDRESS) 2724265996Sian va = pa; 2725265996Sian else 2726265998Sian va = atomic_fetchadd_int(&tlb1_map_base, size); 2727184244Smarcel res = (void *)va; 2728184244Smarcel 2729184244Smarcel do { 2730184244Smarcel sz = 1 << (ilog2(size) & ~1); 2731184244Smarcel if (bootverbose) 2732184244Smarcel printf("Wiring VA=%x to PA=%x (size=%x), " 2733184244Smarcel "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2734265996Sian tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma)); 2735184244Smarcel size -= sz; 2736184244Smarcel pa += sz; 2737184244Smarcel va += sz; 2738184244Smarcel } while (size > 0); 2739184244Smarcel 2740184244Smarcel return (res); 2741176771Sraj} 2742176771Sraj 2743176771Sraj/* 2744176771Sraj * 'Unmap' a range mapped by mmu_booke_mapdev(). 2745176771Sraj */ 2746176771Srajstatic void 2747176771Srajmmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2748176771Sraj{ 2749265996Sian#ifdef SUPPORTS_SHRINKING_TLB1 2750176771Sraj vm_offset_t base, offset; 2751176771Sraj 2752176771Sraj /* 2753176771Sraj * Unmap only if this is inside kernel virtual space. 2754176771Sraj */ 2755176771Sraj if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2756176771Sraj base = trunc_page(va); 2757176771Sraj offset = va & PAGE_MASK; 2758176771Sraj size = roundup(offset + size, PAGE_SIZE); 2759254025Sjeff kva_free(base, size); 2760176771Sraj } 2761265996Sian#endif 2762176771Sraj} 2763176771Sraj 2764176771Sraj/* 2765187151Sraj * mmu_booke_object_init_pt preloads the ptes for a given object into the 2766187151Sraj * specified pmap. This eliminates the blast of soft faults on process startup 2767187151Sraj * and immediately after an mmap. 2768176771Sraj */ 2769176771Srajstatic void 2770176771Srajmmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2771176771Sraj vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2772176771Sraj{ 2773187151Sraj 2774248084Sattilio VM_OBJECT_ASSERT_WLOCKED(object); 2775195840Sjhb KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2776176771Sraj ("mmu_booke_object_init_pt: non-device object")); 2777176771Sraj} 2778176771Sraj 2779176771Sraj/* 2780176771Sraj * Perform the pmap work for mincore. 2781176771Sraj */ 2782176771Srajstatic int 2783208504Salcmmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2784208504Salc vm_paddr_t *locked_pa) 2785176771Sraj{ 2786176771Sraj 2787266000Sian /* XXX: this should be implemented at some point */ 2788176771Sraj return (0); 2789176771Sraj} 2790176771Sraj 2791176771Sraj/**************************************************************************/ 2792176771Sraj/* TID handling */ 2793176771Sraj/**************************************************************************/ 2794176771Sraj 2795176771Sraj/* 2796176771Sraj * Allocate a TID. If necessary, steal one from someone else. 2797176771Sraj * The new TID is flushed from the TLB before returning. 2798176771Sraj */ 2799176771Srajstatic tlbtid_t 2800176771Srajtid_alloc(pmap_t pmap) 2801176771Sraj{ 2802176771Sraj tlbtid_t tid; 2803187149Sraj int thiscpu; 2804176771Sraj 2805187149Sraj KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2806176771Sraj 2807187149Sraj CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2808176771Sraj 2809187149Sraj thiscpu = PCPU_GET(cpuid); 2810176771Sraj 2811187149Sraj tid = PCPU_GET(tid_next); 2812187149Sraj if (tid > TID_MAX) 2813187149Sraj tid = TID_MIN; 2814187149Sraj PCPU_SET(tid_next, tid + 1); 2815176771Sraj 2816187149Sraj /* If we are stealing TID then clear the relevant pmap's field */ 2817187149Sraj if (tidbusy[thiscpu][tid] != NULL) { 2818176771Sraj 2819187149Sraj CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2820187149Sraj 2821187149Sraj tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2822176771Sraj 2823187149Sraj /* Flush all entries from TLB0 matching this TID. */ 2824187149Sraj tid_flush(tid); 2825176771Sraj } 2826176771Sraj 2827187149Sraj tidbusy[thiscpu][tid] = pmap; 2828187149Sraj pmap->pm_tid[thiscpu] = tid; 2829187149Sraj __asm __volatile("msync; isync"); 2830176771Sraj 2831187149Sraj CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2832187149Sraj PCPU_GET(tid_next)); 2833176771Sraj 2834176771Sraj return (tid); 2835176771Sraj} 2836176771Sraj 2837176771Sraj/**************************************************************************/ 2838176771Sraj/* TLB0 handling */ 2839176771Sraj/**************************************************************************/ 2840176771Sraj 2841176771Srajstatic void 2842187149Srajtlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2843187149Sraj uint32_t mas7) 2844176771Sraj{ 2845176771Sraj int as; 2846176771Sraj char desc[3]; 2847176771Sraj tlbtid_t tid; 2848176771Sraj vm_size_t size; 2849176771Sraj unsigned int tsize; 2850176771Sraj 2851176771Sraj desc[2] = '\0'; 2852176771Sraj if (mas1 & MAS1_VALID) 2853176771Sraj desc[0] = 'V'; 2854176771Sraj else 2855176771Sraj desc[0] = ' '; 2856176771Sraj 2857176771Sraj if (mas1 & MAS1_IPROT) 2858176771Sraj desc[1] = 'P'; 2859176771Sraj else 2860176771Sraj desc[1] = ' '; 2861176771Sraj 2862187149Sraj as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2863176771Sraj tid = MAS1_GETTID(mas1); 2864176771Sraj 2865176771Sraj tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2866176771Sraj size = 0; 2867176771Sraj if (tsize) 2868176771Sraj size = tsize2size(tsize); 2869176771Sraj 2870176771Sraj debugf("%3d: (%s) [AS=%d] " 2871176771Sraj "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2872176771Sraj "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2873176771Sraj i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2874176771Sraj} 2875176771Sraj 2876176771Sraj/* Convert TLB0 va and way number to tlb0[] table index. */ 2877176771Srajstatic inline unsigned int 2878176771Srajtlb0_tableidx(vm_offset_t va, unsigned int way) 2879176771Sraj{ 2880176771Sraj unsigned int idx; 2881176771Sraj 2882176771Sraj idx = (way * TLB0_ENTRIES_PER_WAY); 2883176771Sraj idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2884176771Sraj return (idx); 2885176771Sraj} 2886176771Sraj 2887176771Sraj/* 2888187149Sraj * Invalidate TLB0 entry. 2889176771Sraj */ 2890187149Srajstatic inline void 2891187149Srajtlb0_flush_entry(vm_offset_t va) 2892176771Sraj{ 2893176771Sraj 2894187149Sraj CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2895176771Sraj 2896187149Sraj mtx_assert(&tlbivax_mutex, MA_OWNED); 2897176771Sraj 2898187149Sraj __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2899187149Sraj __asm __volatile("isync; msync"); 2900187149Sraj __asm __volatile("tlbsync; msync"); 2901176771Sraj 2902187149Sraj CTR1(KTR_PMAP, "%s: e", __func__); 2903176771Sraj} 2904176771Sraj 2905176771Sraj/* Print out contents of the MAS registers for each TLB0 entry */ 2906187149Srajvoid 2907176771Srajtlb0_print_tlbentries(void) 2908176771Sraj{ 2909187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 2910176771Sraj int entryidx, way, idx; 2911176771Sraj 2912176771Sraj debugf("TLB0 entries:\n"); 2913187149Sraj for (way = 0; way < TLB0_WAYS; way ++) 2914176771Sraj for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2915176771Sraj 2916176771Sraj mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2917176771Sraj mtspr(SPR_MAS0, mas0); 2918187149Sraj __asm __volatile("isync"); 2919176771Sraj 2920176771Sraj mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2921176771Sraj mtspr(SPR_MAS2, mas2); 2922176771Sraj 2923187149Sraj __asm __volatile("isync; tlbre"); 2924176771Sraj 2925176771Sraj mas1 = mfspr(SPR_MAS1); 2926176771Sraj mas2 = mfspr(SPR_MAS2); 2927176771Sraj mas3 = mfspr(SPR_MAS3); 2928176771Sraj mas7 = mfspr(SPR_MAS7); 2929176771Sraj 2930176771Sraj idx = tlb0_tableidx(mas2, way); 2931176771Sraj tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2932176771Sraj } 2933176771Sraj} 2934176771Sraj 2935176771Sraj/**************************************************************************/ 2936176771Sraj/* TLB1 handling */ 2937176771Sraj/**************************************************************************/ 2938187149Sraj 2939176771Sraj/* 2940187149Sraj * TLB1 mapping notes: 2941187149Sraj * 2942265996Sian * TLB1[0] Kernel text and data. 2943265996Sian * TLB1[1-15] Additional kernel text and data mappings (if required), PCI 2944187149Sraj * windows, other devices mappings. 2945187149Sraj */ 2946187149Sraj 2947187149Sraj/* 2948176771Sraj * Write given entry to TLB1 hardware. 2949176771Sraj * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2950176771Sraj */ 2951176771Srajstatic void 2952176771Srajtlb1_write_entry(unsigned int idx) 2953176771Sraj{ 2954187151Sraj uint32_t mas0, mas7; 2955176771Sraj 2956176771Sraj //debugf("tlb1_write_entry: s\n"); 2957176771Sraj 2958176771Sraj /* Clear high order RPN bits */ 2959176771Sraj mas7 = 0; 2960176771Sraj 2961176771Sraj /* Select entry */ 2962176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2963176771Sraj //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2964176771Sraj 2965176771Sraj mtspr(SPR_MAS0, mas0); 2966187151Sraj __asm __volatile("isync"); 2967176771Sraj mtspr(SPR_MAS1, tlb1[idx].mas1); 2968187151Sraj __asm __volatile("isync"); 2969176771Sraj mtspr(SPR_MAS2, tlb1[idx].mas2); 2970187151Sraj __asm __volatile("isync"); 2971176771Sraj mtspr(SPR_MAS3, tlb1[idx].mas3); 2972187151Sraj __asm __volatile("isync"); 2973176771Sraj mtspr(SPR_MAS7, mas7); 2974187151Sraj __asm __volatile("isync; tlbwe; isync; msync"); 2975176771Sraj 2976201758Smbr //debugf("tlb1_write_entry: e\n"); 2977176771Sraj} 2978176771Sraj 2979176771Sraj/* 2980176771Sraj * Return the largest uint value log such that 2^log <= num. 2981176771Sraj */ 2982176771Srajstatic unsigned int 2983176771Srajilog2(unsigned int num) 2984176771Sraj{ 2985176771Sraj int lz; 2986176771Sraj 2987176771Sraj __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2988176771Sraj return (31 - lz); 2989176771Sraj} 2990176771Sraj 2991176771Sraj/* 2992176771Sraj * Convert TLB TSIZE value to mapped region size. 2993176771Sraj */ 2994176771Srajstatic vm_size_t 2995176771Srajtsize2size(unsigned int tsize) 2996176771Sraj{ 2997176771Sraj 2998176771Sraj /* 2999176771Sraj * size = 4^tsize KB 3000176771Sraj * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 3001176771Sraj */ 3002176771Sraj 3003176771Sraj return ((1 << (2 * tsize)) * 1024); 3004176771Sraj} 3005176771Sraj 3006176771Sraj/* 3007176771Sraj * Convert region size (must be power of 4) to TLB TSIZE value. 3008176771Sraj */ 3009176771Srajstatic unsigned int 3010176771Srajsize2tsize(vm_size_t size) 3011176771Sraj{ 3012176771Sraj 3013176771Sraj return (ilog2(size) / 2 - 5); 3014176771Sraj} 3015176771Sraj 3016176771Sraj/* 3017187149Sraj * Register permanent kernel mapping in TLB1. 3018176771Sraj * 3019187149Sraj * Entries are created starting from index 0 (current free entry is 3020187149Sraj * kept in tlb1_idx) and are not supposed to be invalidated. 3021176771Sraj */ 3022187149Srajstatic int 3023187149Srajtlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 3024187149Sraj uint32_t flags) 3025176771Sraj{ 3026187149Sraj uint32_t ts, tid; 3027265996Sian int tsize, index; 3028265996Sian 3029265996Sian index = atomic_fetchadd_int(&tlb1_idx, 1); 3030265996Sian if (index >= TLB1_ENTRIES) { 3031187149Sraj printf("tlb1_set_entry: TLB1 full!\n"); 3032187149Sraj return (-1); 3033187149Sraj } 3034176771Sraj 3035176771Sraj /* Convert size to TSIZE */ 3036176771Sraj tsize = size2tsize(size); 3037176771Sraj 3038187149Sraj tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 3039187149Sraj /* XXX TS is hard coded to 0 for now as we only use single address space */ 3040187149Sraj ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 3041176771Sraj 3042265996Sian /* 3043265996Sian * Atomicity is preserved by the atomic increment above since nothing 3044265996Sian * is ever removed from tlb1. 3045265996Sian */ 3046176771Sraj 3047265996Sian tlb1[index].phys = pa; 3048265996Sian tlb1[index].virt = va; 3049265996Sian tlb1[index].size = size; 3050265996Sian tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 3051265996Sian tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 3052265996Sian tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags; 3053176771Sraj 3054187149Sraj /* Set supervisor RWX permission bits */ 3055265996Sian tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 3056176771Sraj 3057265996Sian tlb1_write_entry(index); 3058176771Sraj 3059187149Sraj /* 3060187149Sraj * XXX in general TLB1 updates should be propagated between CPUs, 3061187149Sraj * since current design assumes to have the same TLB1 set-up on all 3062187149Sraj * cores. 3063187149Sraj */ 3064176771Sraj return (0); 3065176771Sraj} 3066176771Sraj 3067176771Sraj/* 3068187151Sraj * Map in contiguous RAM region into the TLB1 using maximum of 3069176771Sraj * KERNEL_REGION_MAX_TLB_ENTRIES entries. 3070176771Sraj * 3071187151Sraj * If necessary round up last entry size and return total size 3072176771Sraj * used by all allocated entries. 3073176771Sraj */ 3074176771Srajvm_size_t 3075224611Smarceltlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 3076176771Sraj{ 3077224611Smarcel vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 3078224611Smarcel vm_size_t mapped, pgsz, base, mask; 3079224611Smarcel int idx, nents; 3080176771Sraj 3081224611Smarcel /* Round up to the next 1M */ 3082224611Smarcel size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); 3083176771Sraj 3084224611Smarcel mapped = 0; 3085224611Smarcel idx = 0; 3086224611Smarcel base = va; 3087224611Smarcel pgsz = 64*1024*1024; 3088224611Smarcel while (mapped < size) { 3089224611Smarcel while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 3090224611Smarcel while (pgsz > (size - mapped)) 3091224611Smarcel pgsz >>= 2; 3092224611Smarcel pgs[idx++] = pgsz; 3093224611Smarcel mapped += pgsz; 3094224611Smarcel } 3095176771Sraj 3096224611Smarcel /* We under-map. Correct for this. */ 3097224611Smarcel if (mapped < size) { 3098224611Smarcel while (pgs[idx - 1] == pgsz) { 3099224611Smarcel idx--; 3100224611Smarcel mapped -= pgsz; 3101224611Smarcel } 3102224611Smarcel /* XXX We may increase beyond out starting point. */ 3103224611Smarcel pgsz <<= 2; 3104224611Smarcel pgs[idx++] = pgsz; 3105224611Smarcel mapped += pgsz; 3106176771Sraj } 3107224611Smarcel } 3108176771Sraj 3109224611Smarcel nents = idx; 3110224611Smarcel mask = pgs[0] - 1; 3111224611Smarcel /* Align address to the boundary */ 3112224611Smarcel if (va & mask) { 3113224611Smarcel va = (va + mask) & ~mask; 3114224611Smarcel pa = (pa + mask) & ~mask; 3115176771Sraj } 3116176771Sraj 3117224611Smarcel for (idx = 0; idx < nents; idx++) { 3118224611Smarcel pgsz = pgs[idx]; 3119224611Smarcel debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); 3120224611Smarcel tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); 3121224611Smarcel pa += pgsz; 3122224611Smarcel va += pgsz; 3123176771Sraj } 3124176771Sraj 3125224611Smarcel mapped = (va - base); 3126265998Sian printf("mapped size 0x%08x (wasted space 0x%08x)\n", 3127224611Smarcel mapped, mapped - size); 3128224611Smarcel return (mapped); 3129176771Sraj} 3130176771Sraj 3131176771Sraj/* 3132176771Sraj * TLB1 initialization routine, to be called after the very first 3133176771Sraj * assembler level setup done in locore.S. 3134176771Sraj */ 3135176771Srajvoid 3136265996Siantlb1_init() 3137176771Sraj{ 3138265996Sian uint32_t mas0, mas1, mas2, mas3; 3139224611Smarcel uint32_t tsz; 3140224611Smarcel u_int i; 3141176771Sraj 3142224611Smarcel if (bootinfo != NULL && bootinfo[0] != 1) { 3143224611Smarcel tlb1_idx = *((uint16_t *)(bootinfo + 8)); 3144224611Smarcel } else 3145224611Smarcel tlb1_idx = 1; 3146176771Sraj 3147224611Smarcel /* The first entry/entries are used to map the kernel. */ 3148224611Smarcel for (i = 0; i < tlb1_idx; i++) { 3149224611Smarcel mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3150224611Smarcel mtspr(SPR_MAS0, mas0); 3151224611Smarcel __asm __volatile("isync; tlbre"); 3152176771Sraj 3153224611Smarcel mas1 = mfspr(SPR_MAS1); 3154224611Smarcel if ((mas1 & MAS1_VALID) == 0) 3155224611Smarcel continue; 3156224611Smarcel 3157265996Sian mas2 = mfspr(SPR_MAS2); 3158224611Smarcel mas3 = mfspr(SPR_MAS3); 3159224611Smarcel 3160224611Smarcel tlb1[i].mas1 = mas1; 3161224611Smarcel tlb1[i].mas2 = mfspr(SPR_MAS2); 3162224611Smarcel tlb1[i].mas3 = mas3; 3163265996Sian tlb1[i].virt = mas2 & MAS2_EPN_MASK; 3164265996Sian tlb1[i].phys = mas3 & MAS3_RPN; 3165224611Smarcel 3166224611Smarcel if (i == 0) 3167224611Smarcel kernload = mas3 & MAS3_RPN; 3168224611Smarcel 3169224611Smarcel tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3170265996Sian tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0; 3171265996Sian kernsize += tlb1[i].size; 3172224611Smarcel } 3173224611Smarcel 3174242526Smarcel#ifdef SMP 3175242526Smarcel bp_ntlb1s = tlb1_idx; 3176242526Smarcel#endif 3177242526Smarcel 3178238031Smarcel /* Purge the remaining entries */ 3179238031Smarcel for (i = tlb1_idx; i < TLB1_ENTRIES; i++) 3180238031Smarcel tlb1_write_entry(i); 3181238031Smarcel 3182176771Sraj /* Setup TLB miss defaults */ 3183176771Sraj set_mas4_defaults(); 3184176771Sraj} 3185176771Sraj 3186265996Sianvm_offset_t 3187265996Sianpmap_early_io_map(vm_paddr_t pa, vm_size_t size) 3188265996Sian{ 3189265996Sian vm_paddr_t pa_base; 3190265996Sian vm_offset_t va, sz; 3191265996Sian int i; 3192265996Sian 3193265996Sian KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!")); 3194265996Sian 3195265996Sian for (i = 0; i < tlb1_idx; i++) { 3196265996Sian if (!(tlb1[i].mas1 & MAS1_VALID)) 3197265996Sian continue; 3198265996Sian if (pa >= tlb1[i].phys && (pa + size) <= 3199265996Sian (tlb1[i].phys + tlb1[i].size)) 3200265996Sian return (tlb1[i].virt + (pa - tlb1[i].phys)); 3201265996Sian } 3202265996Sian 3203265996Sian pa_base = trunc_page(pa); 3204265996Sian size = roundup(size + (pa - pa_base), PAGE_SIZE); 3205266001Sian tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1)); 3206265998Sian va = tlb1_map_base + (pa - pa_base); 3207265996Sian 3208265996Sian do { 3209265996Sian sz = 1 << (ilog2(size) & ~1); 3210265998Sian tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO); 3211265996Sian size -= sz; 3212265996Sian pa_base += sz; 3213265998Sian tlb1_map_base += sz; 3214265996Sian } while (size > 0); 3215265996Sian 3216265996Sian#ifdef SMP 3217265996Sian bp_ntlb1s = tlb1_idx; 3218265996Sian#endif 3219265996Sian 3220265996Sian return (va); 3221265996Sian} 3222265996Sian 3223176771Sraj/* 3224176771Sraj * Setup MAS4 defaults. 3225176771Sraj * These values are loaded to MAS0-2 on a TLB miss. 3226176771Sraj */ 3227176771Srajstatic void 3228176771Srajset_mas4_defaults(void) 3229176771Sraj{ 3230187151Sraj uint32_t mas4; 3231176771Sraj 3232176771Sraj /* Defaults: TLB0, PID0, TSIZED=4K */ 3233176771Sraj mas4 = MAS4_TLBSELD0; 3234176771Sraj mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3235192532Sraj#ifdef SMP 3236192532Sraj mas4 |= MAS4_MD; 3237192532Sraj#endif 3238176771Sraj mtspr(SPR_MAS4, mas4); 3239187151Sraj __asm __volatile("isync"); 3240176771Sraj} 3241176771Sraj 3242176771Sraj/* 3243176771Sraj * Print out contents of the MAS registers for each TLB1 entry 3244176771Sraj */ 3245176771Srajvoid 3246176771Srajtlb1_print_tlbentries(void) 3247176771Sraj{ 3248187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 3249176771Sraj int i; 3250176771Sraj 3251176771Sraj debugf("TLB1 entries:\n"); 3252187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) { 3253176771Sraj 3254176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3255176771Sraj mtspr(SPR_MAS0, mas0); 3256176771Sraj 3257187149Sraj __asm __volatile("isync; tlbre"); 3258176771Sraj 3259176771Sraj mas1 = mfspr(SPR_MAS1); 3260176771Sraj mas2 = mfspr(SPR_MAS2); 3261176771Sraj mas3 = mfspr(SPR_MAS3); 3262176771Sraj mas7 = mfspr(SPR_MAS7); 3263176771Sraj 3264176771Sraj tlb_print_entry(i, mas1, mas2, mas3, mas7); 3265176771Sraj } 3266176771Sraj} 3267176771Sraj 3268176771Sraj/* 3269176771Sraj * Print out contents of the in-ram tlb1 table. 3270176771Sraj */ 3271176771Srajvoid 3272176771Srajtlb1_print_entries(void) 3273176771Sraj{ 3274176771Sraj int i; 3275176771Sraj 3276176771Sraj debugf("tlb1[] table entries:\n"); 3277187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) 3278176771Sraj tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3279176771Sraj} 3280176771Sraj 3281176771Sraj/* 3282176771Sraj * Return 0 if the physical IO range is encompassed by one of the 3283176771Sraj * the TLB1 entries, otherwise return related error code. 3284176771Sraj */ 3285176771Srajstatic int 3286176771Srajtlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3287176771Sraj{ 3288187151Sraj uint32_t prot; 3289176771Sraj vm_paddr_t pa_start; 3290176771Sraj vm_paddr_t pa_end; 3291176771Sraj unsigned int entry_tsize; 3292176771Sraj vm_size_t entry_size; 3293176771Sraj 3294176771Sraj *va = (vm_offset_t)NULL; 3295176771Sraj 3296176771Sraj /* Skip invalid entries */ 3297176771Sraj if (!(tlb1[i].mas1 & MAS1_VALID)) 3298176771Sraj return (EINVAL); 3299176771Sraj 3300176771Sraj /* 3301176771Sraj * The entry must be cache-inhibited, guarded, and r/w 3302176771Sraj * so it can function as an i/o page 3303176771Sraj */ 3304176771Sraj prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3305176771Sraj if (prot != (MAS2_I | MAS2_G)) 3306176771Sraj return (EPERM); 3307176771Sraj 3308176771Sraj prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3309176771Sraj if (prot != (MAS3_SR | MAS3_SW)) 3310176771Sraj return (EPERM); 3311176771Sraj 3312176771Sraj /* The address should be within the entry range. */ 3313176771Sraj entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3314176771Sraj KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3315176771Sraj 3316176771Sraj entry_size = tsize2size(entry_tsize); 3317176771Sraj pa_start = tlb1[i].mas3 & MAS3_RPN; 3318176771Sraj pa_end = pa_start + entry_size - 1; 3319176771Sraj 3320176771Sraj if ((pa < pa_start) || ((pa + size) > pa_end)) 3321176771Sraj return (ERANGE); 3322176771Sraj 3323176771Sraj /* Return virtual address of this mapping. */ 3324187149Sraj *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3325176771Sraj return (0); 3326176771Sraj} 3327