Deleted Added
full compact
mmu_oea64.c (270439) mmu_oea64.c (270920)
1/*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 70 unchanged lines hidden (view full) ---

79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 70 unchanged lines hidden (view full) ---

79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
87__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/mmu_oea64.c 270439 2014-08-24 07:53:15Z kib $");
87__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/mmu_oea64.c 270920 2014-09-01 07:58:15Z kib $");
88
89/*
90 * Manages physical address maps.
91 *
92 * Since the information managed by this module is also stored by the
93 * logical address mapping module, this module may throw away valid virtual
94 * to physical mappings at almost any time. However, invalidations of
95 * mappings must be done as requested.

--- 182 unchanged lines hidden (view full) ---

278static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
279static void moea64_kremove(mmu_t, vm_offset_t);
280static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
281 vm_offset_t pa, vm_size_t sz);
282
283/*
284 * Kernel MMU interface
285 */
88
89/*
90 * Manages physical address maps.
91 *
92 * Since the information managed by this module is also stored by the
93 * logical address mapping module, this module may throw away valid virtual
94 * to physical mappings at almost any time. However, invalidations of
95 * mappings must be done as requested.

--- 182 unchanged lines hidden (view full) ---

278static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
279static void moea64_kremove(mmu_t, vm_offset_t);
280static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
281 vm_offset_t pa, vm_size_t sz);
282
283/*
284 * Kernel MMU interface
285 */
286void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
287void moea64_clear_modify(mmu_t, vm_page_t);
288void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
289void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
290 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
291int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
292 u_int flags, int8_t psind);
293void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
294 vm_prot_t);

--- 13 unchanged lines hidden (view full) ---

308void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
309void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
310void moea64_qremove(mmu_t, vm_offset_t, int);
311void moea64_release(mmu_t, pmap_t);
312void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
313void moea64_remove_pages(mmu_t, pmap_t);
314void moea64_remove_all(mmu_t, vm_page_t);
315void moea64_remove_write(mmu_t, vm_page_t);
286void moea64_clear_modify(mmu_t, vm_page_t);
287void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
288void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
289 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
290int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
291 u_int flags, int8_t psind);
292void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
293 vm_prot_t);

--- 13 unchanged lines hidden (view full) ---

307void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
308void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
309void moea64_qremove(mmu_t, vm_offset_t, int);
310void moea64_release(mmu_t, pmap_t);
311void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
312void moea64_remove_pages(mmu_t, pmap_t);
313void moea64_remove_all(mmu_t, vm_page_t);
314void moea64_remove_write(mmu_t, vm_page_t);
315void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
316void moea64_zero_page(mmu_t, vm_page_t);
317void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
318void moea64_zero_page_idle(mmu_t, vm_page_t);
319void moea64_activate(mmu_t, struct thread *);
320void moea64_deactivate(mmu_t, struct thread *);
321void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
322void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
323void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
324vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
325void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
326void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
327void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
328boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
329static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
330vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
331 vm_size_t *sz);
332struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
333
334static mmu_method_t moea64_methods[] = {
316void moea64_zero_page(mmu_t, vm_page_t);
317void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
318void moea64_zero_page_idle(mmu_t, vm_page_t);
319void moea64_activate(mmu_t, struct thread *);
320void moea64_deactivate(mmu_t, struct thread *);
321void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
322void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
323void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
324vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
325void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
326void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
327void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
328boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
329static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
330vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
331 vm_size_t *sz);
332struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
333
334static mmu_method_t moea64_methods[] = {
335 MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
336 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
337 MMUMETHOD(mmu_copy_page, moea64_copy_page),
338 MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
339 MMUMETHOD(mmu_enter, moea64_enter),
340 MMUMETHOD(mmu_enter_object, moea64_enter_object),
341 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
342 MMUMETHOD(mmu_extract, moea64_extract),
343 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),

--- 11 unchanged lines hidden (view full) ---

355 MMUMETHOD(mmu_qenter, moea64_qenter),
356 MMUMETHOD(mmu_qremove, moea64_qremove),
357 MMUMETHOD(mmu_release, moea64_release),
358 MMUMETHOD(mmu_remove, moea64_remove),
359 MMUMETHOD(mmu_remove_pages, moea64_remove_pages),
360 MMUMETHOD(mmu_remove_all, moea64_remove_all),
361 MMUMETHOD(mmu_remove_write, moea64_remove_write),
362 MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
335 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
336 MMUMETHOD(mmu_copy_page, moea64_copy_page),
337 MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
338 MMUMETHOD(mmu_enter, moea64_enter),
339 MMUMETHOD(mmu_enter_object, moea64_enter_object),
340 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
341 MMUMETHOD(mmu_extract, moea64_extract),
342 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),

--- 11 unchanged lines hidden (view full) ---

354 MMUMETHOD(mmu_qenter, moea64_qenter),
355 MMUMETHOD(mmu_qremove, moea64_qremove),
356 MMUMETHOD(mmu_release, moea64_release),
357 MMUMETHOD(mmu_remove, moea64_remove),
358 MMUMETHOD(mmu_remove_pages, moea64_remove_pages),
359 MMUMETHOD(mmu_remove_all, moea64_remove_all),
360 MMUMETHOD(mmu_remove_write, moea64_remove_write),
361 MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
362 MMUMETHOD(mmu_unwire, moea64_unwire),
363 MMUMETHOD(mmu_zero_page, moea64_zero_page),
364 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
365 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
366 MMUMETHOD(mmu_activate, moea64_activate),
367 MMUMETHOD(mmu_deactivate, moea64_deactivate),
368 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
369
370 /* Internal interfaces */

--- 649 unchanged lines hidden (view full) ---

1020 #ifdef __powerpc64__
1021 PCPU_SET(userslb, NULL);
1022 #else
1023 PCPU_SET(curpmap, NULL);
1024 #endif
1025}
1026
1027void
363 MMUMETHOD(mmu_zero_page, moea64_zero_page),
364 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
365 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
366 MMUMETHOD(mmu_activate, moea64_activate),
367 MMUMETHOD(mmu_deactivate, moea64_deactivate),
368 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
369
370 /* Internal interfaces */

--- 649 unchanged lines hidden (view full) ---

1020 #ifdef __powerpc64__
1021 PCPU_SET(userslb, NULL);
1022 #else
1023 PCPU_SET(curpmap, NULL);
1024 #endif
1025}
1026
1027void
1028moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1028moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1029{
1029{
1030 struct pvo_entry *pvo;
1030 struct pvo_entry key, *pvo;
1031 uintptr_t pt;
1031 uintptr_t pt;
1032 uint64_t vsid;
1033 int i, ptegidx;
1034
1032
1035 LOCK_TABLE_WR();
1033 LOCK_TABLE_RD();
1036 PMAP_LOCK(pm);
1034 PMAP_LOCK(pm);
1037 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
1038
1039 if (pvo != NULL) {
1035 key.pvo_vaddr = sva;
1036 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1037 pvo != NULL && PVO_VADDR(pvo) < eva;
1038 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1040 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1039 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1041
1042 if (wired) {
1043 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1044 pm->pm_stats.wired_count++;
1045 pvo->pvo_vaddr |= PVO_WIRED;
1046 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
1047 } else {
1048 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1049 pm->pm_stats.wired_count--;
1050 pvo->pvo_vaddr &= ~PVO_WIRED;
1051 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1052 }
1053
1040 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1041 panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1042 pvo);
1043 pvo->pvo_vaddr &= ~PVO_WIRED;
1044 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
1045 panic("moea64_unwire: pte %p is missing LPTE_WIRED",
1046 &pvo->pvo_pte.lpte);
1047 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1054 if (pt != -1) {
1048 if (pt != -1) {
1055 /* Update wiring flag in page table. */
1056 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1057 pvo->pvo_vpn);
1058 } else if (wired) {
1059 /*
1049 /*
1060 * If we are wiring the page, and it wasn't in the
1061 * page table before, add it.
1050 * The PTE's wired attribute is not a hardware
1051 * feature, so there is no need to invalidate any TLB
1052 * entries.
1062 */
1053 */
1063 vsid = PVO_VSID(pvo);
1064 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
1065 pvo->pvo_vaddr & PVO_LARGE);
1066
1067 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
1068
1069 if (i >= 0) {
1070 PVO_PTEGIDX_CLR(pvo);
1071 PVO_PTEGIDX_SET(pvo, i);
1072 }
1054 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1055 pvo->pvo_vpn);
1073 }
1056 }
1074
1057 pm->pm_stats.wired_count--;
1075 }
1058 }
1076 UNLOCK_TABLE_WR();
1059 UNLOCK_TABLE_RD();
1077 PMAP_UNLOCK(pm);
1078}
1079
1080/*
1081 * This goes through and sets the physical address of our
1082 * special scratch PTE to the PA we want to zero or copy. Because
1083 * of locking issues (this can get called in pvo_enter() by
1084 * the UMA allocator), we can't use most other utility functions here

--- 1117 unchanged lines hidden (view full) ---

2202}
2203
2204static int
2205moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2206 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2207 uint64_t pte_lo, int flags, int8_t psind __unused)
2208{
2209 struct pvo_entry *pvo;
1060 PMAP_UNLOCK(pm);
1061}
1062
1063/*
1064 * This goes through and sets the physical address of our
1065 * special scratch PTE to the PA we want to zero or copy. Because
1066 * of locking issues (this can get called in pvo_enter() by
1067 * the UMA allocator), we can't use most other utility functions here

--- 1117 unchanged lines hidden (view full) ---

2185}
2186
2187static int
2188moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2189 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2190 uint64_t pte_lo, int flags, int8_t psind __unused)
2191{
2192 struct pvo_entry *pvo;
2193 uintptr_t pt;
2210 uint64_t vsid;
2211 int first;
2212 u_int ptegidx;
2213 int i;
2214 int bootstrap;
2215
2216 /*
2217 * One nasty thing that can happen here is that the UMA calls to

--- 26 unchanged lines hidden (view full) ---

2244 */
2245 moea64_pvo_enter_calls++;
2246
2247 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2248 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2249 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2250 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2251 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2194 uint64_t vsid;
2195 int first;
2196 u_int ptegidx;
2197 int i;
2198 int bootstrap;
2199
2200 /*
2201 * One nasty thing that can happen here is that the UMA calls to

--- 26 unchanged lines hidden (view full) ---

2228 */
2229 moea64_pvo_enter_calls++;
2230
2231 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2232 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2233 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2234 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2235 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2236 /*
2237 * The physical page and protection are not
2238 * changing. Instead, this may be a request
2239 * to change the mapping's wired attribute.
2240 */
2241 pt = -1;
2242 if ((flags & PVO_WIRED) != 0 &&
2243 (pvo->pvo_vaddr & PVO_WIRED) == 0) {
2244 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2245 pvo->pvo_vaddr |= PVO_WIRED;
2246 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2247 pm->pm_stats.wired_count++;
2248 } else if ((flags & PVO_WIRED) == 0 &&
2249 (pvo->pvo_vaddr & PVO_WIRED) != 0) {
2250 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2251 pvo->pvo_vaddr &= ~PVO_WIRED;
2252 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
2253 pm->pm_stats.wired_count--;
2254 }
2252 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2255 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2256 KASSERT(pt == -1,
2257 ("moea64_pvo_enter: valid pt"));
2253 /* Re-insert if spilled */
2254 i = MOEA64_PTE_INSERT(mmu, ptegidx,
2255 &pvo->pvo_pte.lpte);
2256 if (i >= 0)
2257 PVO_PTEGIDX_SET(pvo, i);
2258 moea64_pte_overflow--;
2258 /* Re-insert if spilled */
2259 i = MOEA64_PTE_INSERT(mmu, ptegidx,
2260 &pvo->pvo_pte.lpte);
2261 if (i >= 0)
2262 PVO_PTEGIDX_SET(pvo, i);
2263 moea64_pte_overflow--;
2264 } else if (pt != -1) {
2265 /*
2266 * The PTE's wired attribute is not a
2267 * hardware feature, so there is no
2268 * need to invalidate any TLB entries.
2269 */
2270 MOEA64_PTE_CHANGE(mmu, pt,
2271 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2259 }
2260 return (0);
2261 }
2262 moea64_pvo_remove(mmu, pvo);
2263 break;
2264 }
2265 }
2266

--- 439 unchanged lines hidden ---
2272 }
2273 return (0);
2274 }
2275 moea64_pvo_remove(mmu, pvo);
2276 break;
2277 }
2278 }
2279

--- 439 unchanged lines hidden ---