1139825Simp/*-
290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc.
390643Sbenno * All rights reserved.
490643Sbenno *
590643Sbenno * This code is derived from software contributed to The NetBSD Foundation
690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
790643Sbenno *
890643Sbenno * Redistribution and use in source and binary forms, with or without
990643Sbenno * modification, are permitted provided that the following conditions
1090643Sbenno * are met:
1190643Sbenno * 1. Redistributions of source code must retain the above copyright
1290643Sbenno *    notice, this list of conditions and the following disclaimer.
1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright
1490643Sbenno *    notice, this list of conditions and the following disclaimer in the
1590643Sbenno *    documentation and/or other materials provided with the distribution.
1690643Sbenno *
1790643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
1890643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
1990643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2090643Sbenno * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2190643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2290643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2390643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2490643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2590643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2690643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2790643Sbenno * POSSIBILITY OF SUCH DAMAGE.
2890643Sbenno */
29139825Simp/*-
3077957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3177957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH.
3277957Sbenno * All rights reserved.
3377957Sbenno *
3477957Sbenno * Redistribution and use in source and binary forms, with or without
3577957Sbenno * modification, are permitted provided that the following conditions
3677957Sbenno * are met:
3777957Sbenno * 1. Redistributions of source code must retain the above copyright
3877957Sbenno *    notice, this list of conditions and the following disclaimer.
3977957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
4077957Sbenno *    notice, this list of conditions and the following disclaimer in the
4177957Sbenno *    documentation and/or other materials provided with the distribution.
4277957Sbenno * 3. All advertising materials mentioning features or use of this software
4377957Sbenno *    must display the following acknowledgement:
4477957Sbenno *	This product includes software developed by TooLs GmbH.
4577957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products
4677957Sbenno *    derived from this software without specific prior written permission.
4777957Sbenno *
4877957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
4977957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5077957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5177957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5277957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5377957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
5477957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
5577957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
5677957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
5777957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5877957Sbenno *
5978880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
6077957Sbenno */
61139825Simp/*-
6277957Sbenno * Copyright (C) 2001 Benno Rice.
6377957Sbenno * All rights reserved.
6477957Sbenno *
6577957Sbenno * Redistribution and use in source and binary forms, with or without
6677957Sbenno * modification, are permitted provided that the following conditions
6777957Sbenno * are met:
6877957Sbenno * 1. Redistributions of source code must retain the above copyright
6977957Sbenno *    notice, this list of conditions and the following disclaimer.
7077957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
7177957Sbenno *    notice, this list of conditions and the following disclaimer in the
7277957Sbenno *    documentation and/or other materials provided with the distribution.
7377957Sbenno *
7477957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
7577957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
7677957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
7777957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
7877957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
7977957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8077957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8177957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8277957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
8377957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8477957Sbenno */
8577957Sbenno
86113038Sobrien#include <sys/cdefs.h>
87113038Sobrien__FBSDID("$FreeBSD$");
8877957Sbenno
8990643Sbenno/*
9090643Sbenno * Manages physical address maps.
9190643Sbenno *
9290643Sbenno * Since the information managed by this module is also stored by the
9390643Sbenno * logical address mapping module, this module may throw away valid virtual
9490643Sbenno * to physical mappings at almost any time.  However, invalidations of
9590643Sbenno * mappings must be done as requested.
9690643Sbenno *
9790643Sbenno * In order to cope with hardware architectures which make virtual to
9890643Sbenno * physical map invalidates expensive, this module may delay invalidate
9990643Sbenno * reduced protection operations until such time as they are actually
10090643Sbenno * necessary.  This module is given full information as to which processors
10190643Sbenno * are currently using which maps, and to when physical maps must be made
10290643Sbenno * correct.
10390643Sbenno */
10490643Sbenno
105118239Speter#include "opt_kstack_pages.h"
106118239Speter
10777957Sbenno#include <sys/param.h>
10880431Speter#include <sys/kernel.h>
109222813Sattilio#include <sys/queue.h>
110222813Sattilio#include <sys/cpuset.h>
11190643Sbenno#include <sys/ktr.h>
11290643Sbenno#include <sys/lock.h>
11390643Sbenno#include <sys/msgbuf.h>
11490643Sbenno#include <sys/mutex.h>
11577957Sbenno#include <sys/proc.h>
116238159Salc#include <sys/rwlock.h>
117222813Sattilio#include <sys/sched.h>
11890643Sbenno#include <sys/sysctl.h>
11990643Sbenno#include <sys/systm.h>
12077957Sbenno#include <sys/vmmeter.h>
12177957Sbenno
12290643Sbenno#include <dev/ofw/openfirm.h>
12390643Sbenno
124152180Sgrehan#include <vm/vm.h>
12577957Sbenno#include <vm/vm_param.h>
12677957Sbenno#include <vm/vm_kern.h>
12777957Sbenno#include <vm/vm_page.h>
12877957Sbenno#include <vm/vm_map.h>
12977957Sbenno#include <vm/vm_object.h>
13077957Sbenno#include <vm/vm_extern.h>
13177957Sbenno#include <vm/vm_pageout.h>
13292847Sjeff#include <vm/uma.h>
13377957Sbenno
134125687Sgrehan#include <machine/cpu.h>
135192067Snwhitehorn#include <machine/platform.h>
13683730Smp#include <machine/bat.h>
13790643Sbenno#include <machine/frame.h>
13890643Sbenno#include <machine/md_var.h>
13990643Sbenno#include <machine/psl.h>
14077957Sbenno#include <machine/pte.h>
141178628Smarcel#include <machine/smp.h>
14290643Sbenno#include <machine/sr.h>
143152180Sgrehan#include <machine/mmuvar.h>
144265974Sian#include <machine/trap.h>
14577957Sbenno
146152180Sgrehan#include "mmu_if.h"
14777957Sbenno
148152180Sgrehan#define	MOEA_DEBUG
149152180Sgrehan
15090643Sbenno#define TODO	panic("%s: not implemented", __func__);
15177957Sbenno
15290643Sbenno#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
15390643Sbenno#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
15490643Sbenno#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
15590643Sbenno
15690643Sbennostruct ofw_map {
15790643Sbenno	vm_offset_t	om_va;
15890643Sbenno	vm_size_t	om_len;
15990643Sbenno	vm_offset_t	om_pa;
16090643Sbenno	u_int		om_mode;
16190643Sbenno};
16277957Sbenno
163249864Sjhibbitsextern unsigned char _etext[];
164249864Sjhibbitsextern unsigned char _end[];
165249864Sjhibbits
166249864Sjhibbitsextern int dumpsys_minidump;
167249864Sjhibbits
16890643Sbenno/*
16990643Sbenno * Map of physical memory regions.
17090643Sbenno */
17197346Sbennostatic struct	mem_region *regions;
17297346Sbennostatic struct	mem_region *pregions;
173209975Snwhitehornstatic u_int    phys_avail_count;
174209975Snwhitehornstatic int	regions_sz, pregions_sz;
175100319Sbennostatic struct	ofw_map *translations;
17677957Sbenno
17790643Sbenno/*
178134535Salc * Lock for the pteg and pvo tables.
179134535Salc */
180152180Sgrehanstruct mtx	moea_table_mutex;
181212278Snwhitehornstruct mtx	moea_vsid_mutex;
182134535Salc
183183094Smarcel/* tlbie instruction synchronization */
184183094Smarcelstatic struct mtx tlbie_mtx;
185183094Smarcel
186134535Salc/*
18790643Sbenno * PTEG data.
18890643Sbenno */
189152180Sgrehanstatic struct	pteg *moea_pteg_table;
190152180Sgrehanu_int		moea_pteg_count;
191152180Sgrehanu_int		moea_pteg_mask;
19277957Sbenno
19390643Sbenno/*
19490643Sbenno * PVO data.
19590643Sbenno */
196152180Sgrehanstruct	pvo_head *moea_pvo_table;		/* pvo entries by pteg index */
197152180Sgrehanstruct	pvo_head moea_pvo_kunmanaged =
198152180Sgrehan    LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged);	/* list of unmanaged pages */
19977957Sbenno
200242534Sattiliostatic struct rwlock_padalign pvh_global_lock;
201238159Salc
202152180Sgrehanuma_zone_t	moea_upvo_zone;	/* zone for pvo entries for unmanaged pages */
203152180Sgrehanuma_zone_t	moea_mpvo_zone;	/* zone for pvo entries for managed pages */
20477957Sbenno
20599037Sbenno#define	BPVO_POOL_SIZE	32768
206152180Sgrehanstatic struct	pvo_entry *moea_bpvo_pool;
207152180Sgrehanstatic int	moea_bpvo_pool_index = 0;
20877957Sbenno
20990643Sbenno#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
210152180Sgrehanstatic u_int	moea_vsid_bitmap[NPMAPS / VSID_NBPW];
21177957Sbenno
212152180Sgrehanstatic boolean_t moea_initialized = FALSE;
21377957Sbenno
21490643Sbenno/*
21590643Sbenno * Statistics.
21690643Sbenno */
217152180Sgrehanu_int	moea_pte_valid = 0;
218152180Sgrehanu_int	moea_pte_overflow = 0;
219152180Sgrehanu_int	moea_pte_replacements = 0;
220152180Sgrehanu_int	moea_pvo_entries = 0;
221152180Sgrehanu_int	moea_pvo_enter_calls = 0;
222152180Sgrehanu_int	moea_pvo_remove_calls = 0;
223152180Sgrehanu_int	moea_pte_spills = 0;
224152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid,
22590643Sbenno    0, "");
226152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD,
227152180Sgrehan    &moea_pte_overflow, 0, "");
228152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD,
229152180Sgrehan    &moea_pte_replacements, 0, "");
230152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries,
23190643Sbenno    0, "");
232152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD,
233152180Sgrehan    &moea_pvo_enter_calls, 0, "");
234152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD,
235152180Sgrehan    &moea_pvo_remove_calls, 0, "");
236152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
237152180Sgrehan    &moea_pte_spills, 0, "");
23877957Sbenno
23990643Sbenno/*
240152180Sgrehan * Allocate physical memory for use in moea_bootstrap.
24190643Sbenno */
242152180Sgrehanstatic vm_offset_t	moea_bootstrap_alloc(vm_size_t, u_int);
24377957Sbenno
24490643Sbenno/*
24590643Sbenno * PTE calls.
24690643Sbenno */
247152180Sgrehanstatic int		moea_pte_insert(u_int, struct pte *);
24877957Sbenno
24977957Sbenno/*
25090643Sbenno * PVO calls.
25177957Sbenno */
252152180Sgrehanstatic int	moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
25390643Sbenno		    vm_offset_t, vm_offset_t, u_int, int);
254152180Sgrehanstatic void	moea_pvo_remove(struct pvo_entry *, int);
255152180Sgrehanstatic struct	pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
256152180Sgrehanstatic struct	pte *moea_pvo_to_pte(const struct pvo_entry *, int);
25790643Sbenno
25890643Sbenno/*
25990643Sbenno * Utility routines.
26090643Sbenno */
261270439Skibstatic int		moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
262270439Skib			    vm_prot_t, u_int, int8_t);
263152180Sgrehanstatic void		moea_syncicache(vm_offset_t, vm_size_t);
264152180Sgrehanstatic boolean_t	moea_query_bit(vm_page_t, int);
265208990Salcstatic u_int		moea_clear_bit(vm_page_t, int);
266152180Sgrehanstatic void		moea_kremove(mmu_t, vm_offset_t);
267152180Sgrehanint		moea_pte_spill(vm_offset_t);
26890643Sbenno
269152180Sgrehan/*
270152180Sgrehan * Kernel MMU interface
271152180Sgrehan */
272152180Sgrehanvoid moea_clear_modify(mmu_t, vm_page_t);
273152180Sgrehanvoid moea_copy_page(mmu_t, vm_page_t, vm_page_t);
274248280Skibvoid moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
275248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize);
276270439Skibint moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
277270439Skib    int8_t);
278159303Salcvoid moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
279159303Salc    vm_prot_t);
280159627Supsvoid moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
281152180Sgrehanvm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
282152180Sgrehanvm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
283152180Sgrehanvoid moea_init(mmu_t);
284152180Sgrehanboolean_t moea_is_modified(mmu_t, vm_page_t);
285214617Salcboolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
286207155Salcboolean_t moea_is_referenced(mmu_t, vm_page_t);
287238357Salcint moea_ts_referenced(mmu_t, vm_page_t);
288235936Srajvm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
289152180Sgrehanboolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
290173708Salcint moea_page_wired_mappings(mmu_t, vm_page_t);
291152180Sgrehanvoid moea_pinit(mmu_t, pmap_t);
292152180Sgrehanvoid moea_pinit0(mmu_t, pmap_t);
293152180Sgrehanvoid moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
294152180Sgrehanvoid moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
295152180Sgrehanvoid moea_qremove(mmu_t, vm_offset_t, int);
296152180Sgrehanvoid moea_release(mmu_t, pmap_t);
297152180Sgrehanvoid moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
298152180Sgrehanvoid moea_remove_all(mmu_t, vm_page_t);
299160889Salcvoid moea_remove_write(mmu_t, vm_page_t);
300270920Skibvoid moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
301152180Sgrehanvoid moea_zero_page(mmu_t, vm_page_t);
302152180Sgrehanvoid moea_zero_page_area(mmu_t, vm_page_t, int, int);
303152180Sgrehanvoid moea_zero_page_idle(mmu_t, vm_page_t);
304152180Sgrehanvoid moea_activate(mmu_t, struct thread *);
305152180Sgrehanvoid moea_deactivate(mmu_t, struct thread *);
306190681Snwhitehornvoid moea_cpu_bootstrap(mmu_t, int);
307152180Sgrehanvoid moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
308235936Srajvoid *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t);
309213307Snwhitehornvoid *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
310152180Sgrehanvoid moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
311235936Srajvm_paddr_t moea_kextract(mmu_t, vm_offset_t);
312213307Snwhitehornvoid moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t);
313235936Srajvoid moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
314213307Snwhitehornvoid moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
315235936Srajboolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
316198341Smarcelstatic void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
317249864Sjhibbitsvm_offset_t moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
318249864Sjhibbits    vm_size_t *sz);
319249864Sjhibbitsstruct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev);
320152180Sgrehan
321152180Sgrehanstatic mmu_method_t moea_methods[] = {
322152180Sgrehan	MMUMETHOD(mmu_clear_modify,	moea_clear_modify),
323152180Sgrehan	MMUMETHOD(mmu_copy_page,	moea_copy_page),
324248280Skib	MMUMETHOD(mmu_copy_pages,	moea_copy_pages),
325152180Sgrehan	MMUMETHOD(mmu_enter,		moea_enter),
326159303Salc	MMUMETHOD(mmu_enter_object,	moea_enter_object),
327152180Sgrehan	MMUMETHOD(mmu_enter_quick,	moea_enter_quick),
328152180Sgrehan	MMUMETHOD(mmu_extract,		moea_extract),
329152180Sgrehan	MMUMETHOD(mmu_extract_and_hold,	moea_extract_and_hold),
330152180Sgrehan	MMUMETHOD(mmu_init,		moea_init),
331152180Sgrehan	MMUMETHOD(mmu_is_modified,	moea_is_modified),
332214617Salc	MMUMETHOD(mmu_is_prefaultable,	moea_is_prefaultable),
333207155Salc	MMUMETHOD(mmu_is_referenced,	moea_is_referenced),
334152180Sgrehan	MMUMETHOD(mmu_ts_referenced,	moea_ts_referenced),
335152180Sgrehan	MMUMETHOD(mmu_map,     		moea_map),
336152180Sgrehan	MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
337173708Salc	MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
338152180Sgrehan	MMUMETHOD(mmu_pinit,		moea_pinit),
339152180Sgrehan	MMUMETHOD(mmu_pinit0,		moea_pinit0),
340152180Sgrehan	MMUMETHOD(mmu_protect,		moea_protect),
341152180Sgrehan	MMUMETHOD(mmu_qenter,		moea_qenter),
342152180Sgrehan	MMUMETHOD(mmu_qremove,		moea_qremove),
343152180Sgrehan	MMUMETHOD(mmu_release,		moea_release),
344152180Sgrehan	MMUMETHOD(mmu_remove,		moea_remove),
345152180Sgrehan	MMUMETHOD(mmu_remove_all,      	moea_remove_all),
346160889Salc	MMUMETHOD(mmu_remove_write,	moea_remove_write),
347198341Smarcel	MMUMETHOD(mmu_sync_icache,	moea_sync_icache),
348270920Skib	MMUMETHOD(mmu_unwire,		moea_unwire),
349152180Sgrehan	MMUMETHOD(mmu_zero_page,       	moea_zero_page),
350152180Sgrehan	MMUMETHOD(mmu_zero_page_area,	moea_zero_page_area),
351152180Sgrehan	MMUMETHOD(mmu_zero_page_idle,	moea_zero_page_idle),
352152180Sgrehan	MMUMETHOD(mmu_activate,		moea_activate),
353152180Sgrehan	MMUMETHOD(mmu_deactivate,      	moea_deactivate),
354213307Snwhitehorn	MMUMETHOD(mmu_page_set_memattr,	moea_page_set_memattr),
355152180Sgrehan
356152180Sgrehan	/* Internal interfaces */
357152180Sgrehan	MMUMETHOD(mmu_bootstrap,       	moea_bootstrap),
358190681Snwhitehorn	MMUMETHOD(mmu_cpu_bootstrap,   	moea_cpu_bootstrap),
359213307Snwhitehorn	MMUMETHOD(mmu_mapdev_attr,	moea_mapdev_attr),
360152180Sgrehan	MMUMETHOD(mmu_mapdev,		moea_mapdev),
361152180Sgrehan	MMUMETHOD(mmu_unmapdev,		moea_unmapdev),
362152180Sgrehan	MMUMETHOD(mmu_kextract,		moea_kextract),
363152180Sgrehan	MMUMETHOD(mmu_kenter,		moea_kenter),
364213307Snwhitehorn	MMUMETHOD(mmu_kenter_attr,	moea_kenter_attr),
365152180Sgrehan	MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
366249864Sjhibbits	MMUMETHOD(mmu_scan_md,		moea_scan_md),
367249864Sjhibbits	MMUMETHOD(mmu_dumpsys_map,	moea_dumpsys_map),
368152180Sgrehan
369152180Sgrehan	{ 0, 0 }
370152180Sgrehan};
371152180Sgrehan
372212627SgrehanMMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0);
373152180Sgrehan
374213307Snwhitehornstatic __inline uint32_t
375213307Snwhitehornmoea_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
376213307Snwhitehorn{
377213307Snwhitehorn	uint32_t pte_lo;
378213307Snwhitehorn	int i;
379212627Sgrehan
380213307Snwhitehorn	if (ma != VM_MEMATTR_DEFAULT) {
381213307Snwhitehorn		switch (ma) {
382213307Snwhitehorn		case VM_MEMATTR_UNCACHEABLE:
383213307Snwhitehorn			return (PTE_I | PTE_G);
384213307Snwhitehorn		case VM_MEMATTR_WRITE_COMBINING:
385213307Snwhitehorn		case VM_MEMATTR_WRITE_BACK:
386213307Snwhitehorn		case VM_MEMATTR_PREFETCHABLE:
387213307Snwhitehorn			return (PTE_I);
388213307Snwhitehorn		case VM_MEMATTR_WRITE_THROUGH:
389213307Snwhitehorn			return (PTE_W | PTE_M);
390213307Snwhitehorn		}
391213307Snwhitehorn	}
392213307Snwhitehorn
393213307Snwhitehorn	/*
394213307Snwhitehorn	 * Assume the page is cache inhibited and access is guarded unless
395213307Snwhitehorn	 * it's in our available memory array.
396213307Snwhitehorn	 */
397213307Snwhitehorn	pte_lo = PTE_I | PTE_G;
398213307Snwhitehorn	for (i = 0; i < pregions_sz; i++) {
399213307Snwhitehorn		if ((pa >= pregions[i].mr_start) &&
400213307Snwhitehorn		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
401213307Snwhitehorn			pte_lo = PTE_M;
402213307Snwhitehorn			break;
403213307Snwhitehorn		}
404213307Snwhitehorn	}
405213307Snwhitehorn
406213307Snwhitehorn	return pte_lo;
407213307Snwhitehorn}
408213307Snwhitehorn
409183094Smarcelstatic void
410183094Smarceltlbie(vm_offset_t va)
411183094Smarcel{
412152180Sgrehan
413183094Smarcel	mtx_lock_spin(&tlbie_mtx);
414213407Snwhitehorn	__asm __volatile("ptesync");
415183094Smarcel	__asm __volatile("tlbie %0" :: "r"(va));
416213407Snwhitehorn	__asm __volatile("eieio; tlbsync; ptesync");
417183094Smarcel	mtx_unlock_spin(&tlbie_mtx);
418183094Smarcel}
419183094Smarcel
420183094Smarcelstatic void
421183094Smarceltlbia(void)
422183094Smarcel{
423183094Smarcel	vm_offset_t va;
424183094Smarcel
425183094Smarcel	for (va = 0; va < 0x00040000; va += 0x00001000) {
426183094Smarcel		__asm __volatile("tlbie %0" :: "r"(va));
427183094Smarcel		powerpc_sync();
428183094Smarcel	}
429183094Smarcel	__asm __volatile("tlbsync");
430183094Smarcel	powerpc_sync();
431183094Smarcel}
432183094Smarcel
43390643Sbennostatic __inline int
43490643Sbennova_to_sr(u_int *sr, vm_offset_t va)
43577957Sbenno{
43690643Sbenno	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
43790643Sbenno}
43877957Sbenno
43990643Sbennostatic __inline u_int
44090643Sbennova_to_pteg(u_int sr, vm_offset_t addr)
44190643Sbenno{
44290643Sbenno	u_int hash;
44390643Sbenno
44490643Sbenno	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
44590643Sbenno	    ADDR_PIDX_SHFT);
446152180Sgrehan	return (hash & moea_pteg_mask);
44777957Sbenno}
44877957Sbenno
44990643Sbennostatic __inline struct pvo_head *
45090643Sbennovm_page_to_pvoh(vm_page_t m)
45190643Sbenno{
45290643Sbenno
45390643Sbenno	return (&m->md.mdpg_pvoh);
45490643Sbenno}
45590643Sbenno
45677957Sbennostatic __inline void
457152180Sgrehanmoea_attr_clear(vm_page_t m, int ptebit)
45877957Sbenno{
45990643Sbenno
460238159Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
46190643Sbenno	m->md.mdpg_attrs &= ~ptebit;
46277957Sbenno}
46377957Sbenno
46477957Sbennostatic __inline int
465152180Sgrehanmoea_attr_fetch(vm_page_t m)
46677957Sbenno{
46777957Sbenno
46890643Sbenno	return (m->md.mdpg_attrs);
46977957Sbenno}
47077957Sbenno
47190643Sbennostatic __inline void
472152180Sgrehanmoea_attr_save(vm_page_t m, int ptebit)
47390643Sbenno{
47490643Sbenno
475238159Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
47690643Sbenno	m->md.mdpg_attrs |= ptebit;
47790643Sbenno}
47890643Sbenno
47977957Sbennostatic __inline int
480152180Sgrehanmoea_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
48177957Sbenno{
48290643Sbenno	if (pt->pte_hi == pvo_pt->pte_hi)
48390643Sbenno		return (1);
48490643Sbenno
48590643Sbenno	return (0);
48677957Sbenno}
48777957Sbenno
48877957Sbennostatic __inline int
489152180Sgrehanmoea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
49077957Sbenno{
49190643Sbenno	return (pt->pte_hi & ~PTE_VALID) ==
49290643Sbenno	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
49390643Sbenno	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
49490643Sbenno}
49577957Sbenno
49690643Sbennostatic __inline void
497152180Sgrehanmoea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
49890643Sbenno{
499159928Salc
500159928Salc	mtx_assert(&moea_table_mutex, MA_OWNED);
501159928Salc
50290643Sbenno	/*
50390643Sbenno	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
50490643Sbenno	 * set when the real pte is set in memory.
50590643Sbenno	 *
50690643Sbenno	 * Note: Don't set the valid bit for correct operation of tlb update.
50790643Sbenno	 */
50890643Sbenno	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
50990643Sbenno	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
51090643Sbenno	pt->pte_lo = pte_lo;
51177957Sbenno}
51277957Sbenno
51390643Sbennostatic __inline void
514152180Sgrehanmoea_pte_synch(struct pte *pt, struct pte *pvo_pt)
51577957Sbenno{
51677957Sbenno
517159928Salc	mtx_assert(&moea_table_mutex, MA_OWNED);
51890643Sbenno	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
51977957Sbenno}
52077957Sbenno
52190643Sbennostatic __inline void
522152180Sgrehanmoea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
52377957Sbenno{
52477957Sbenno
525159928Salc	mtx_assert(&moea_table_mutex, MA_OWNED);
526159928Salc
52790643Sbenno	/*
52890643Sbenno	 * As shown in Section 7.6.3.2.3
52990643Sbenno	 */
53090643Sbenno	pt->pte_lo &= ~ptebit;
531183094Smarcel	tlbie(va);
53277957Sbenno}
53377957Sbenno
53490643Sbennostatic __inline void
535152180Sgrehanmoea_pte_set(struct pte *pt, struct pte *pvo_pt)
53677957Sbenno{
53777957Sbenno
538159928Salc	mtx_assert(&moea_table_mutex, MA_OWNED);
53990643Sbenno	pvo_pt->pte_hi |= PTE_VALID;
54090643Sbenno
54177957Sbenno	/*
54290643Sbenno	 * Update the PTE as defined in section 7.6.3.1.
543253976Sjhibbits	 * Note that the REF/CHG bits are from pvo_pt and thus should have
54490643Sbenno	 * been saved so this routine can restore them (if desired).
54577957Sbenno	 */
54690643Sbenno	pt->pte_lo = pvo_pt->pte_lo;
547183094Smarcel	powerpc_sync();
54890643Sbenno	pt->pte_hi = pvo_pt->pte_hi;
549183094Smarcel	powerpc_sync();
550152180Sgrehan	moea_pte_valid++;
55190643Sbenno}
55277957Sbenno
55390643Sbennostatic __inline void
554152180Sgrehanmoea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
55590643Sbenno{
55690643Sbenno
557159928Salc	mtx_assert(&moea_table_mutex, MA_OWNED);
55890643Sbenno	pvo_pt->pte_hi &= ~PTE_VALID;
55990643Sbenno
56077957Sbenno	/*
56190643Sbenno	 * Force the reg & chg bits back into the PTEs.
56277957Sbenno	 */
563183094Smarcel	powerpc_sync();
56477957Sbenno
56590643Sbenno	/*
56690643Sbenno	 * Invalidate the pte.
56790643Sbenno	 */
56890643Sbenno	pt->pte_hi &= ~PTE_VALID;
56977957Sbenno
570183094Smarcel	tlbie(va);
57177957Sbenno
57290643Sbenno	/*
57390643Sbenno	 * Save the reg & chg bits.
57490643Sbenno	 */
575152180Sgrehan	moea_pte_synch(pt, pvo_pt);
576152180Sgrehan	moea_pte_valid--;
57777957Sbenno}
57877957Sbenno
57990643Sbennostatic __inline void
580152180Sgrehanmoea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
58190643Sbenno{
58290643Sbenno
58390643Sbenno	/*
58490643Sbenno	 * Invalidate the PTE
58590643Sbenno	 */
586152180Sgrehan	moea_pte_unset(pt, pvo_pt, va);
587152180Sgrehan	moea_pte_set(pt, pvo_pt);
58890643Sbenno}
58990643Sbenno
59077957Sbenno/*
59190643Sbenno * Quick sort callout for comparing memory regions.
59277957Sbenno */
59390643Sbennostatic int	om_cmp(const void *a, const void *b);
59490643Sbenno
59590643Sbennostatic int
59690643Sbennoom_cmp(const void *a, const void *b)
59790643Sbenno{
59890643Sbenno	const struct	ofw_map *mapa;
59990643Sbenno	const struct	ofw_map *mapb;
60090643Sbenno
60190643Sbenno	mapa = a;
60290643Sbenno	mapb = b;
60390643Sbenno	if (mapa->om_pa < mapb->om_pa)
60490643Sbenno		return (-1);
60590643Sbenno	else if (mapa->om_pa > mapb->om_pa)
60690643Sbenno		return (1);
60790643Sbenno	else
60890643Sbenno		return (0);
60977957Sbenno}
61077957Sbenno
61177957Sbennovoid
612190681Snwhitehornmoea_cpu_bootstrap(mmu_t mmup, int ap)
613178628Smarcel{
614178628Smarcel	u_int sdr;
615178628Smarcel	int i;
616178628Smarcel
617178628Smarcel	if (ap) {
618183094Smarcel		powerpc_sync();
619178628Smarcel		__asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu));
620178628Smarcel		__asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl));
621178628Smarcel		isync();
622178628Smarcel		__asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu));
623178628Smarcel		__asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl));
624178628Smarcel		isync();
625178628Smarcel	}
626178628Smarcel
627243370Sadrian#ifdef WII
628243370Sadrian	/*
629243370Sadrian	 * Special case for the Wii: don't install the PCI BAT.
630243370Sadrian	 */
631243370Sadrian	if (strcmp(installed_platform(), "wii") != 0) {
632243370Sadrian#endif
633243370Sadrian		__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
634243370Sadrian		__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
635243370Sadrian#ifdef WII
636243370Sadrian	}
637243370Sadrian#endif
638178629Smarcel	isync();
639178628Smarcel
640178629Smarcel	__asm __volatile("mtibatu 1,%0" :: "r"(0));
641178629Smarcel	__asm __volatile("mtdbatu 2,%0" :: "r"(0));
642178629Smarcel	__asm __volatile("mtibatu 2,%0" :: "r"(0));
643178629Smarcel	__asm __volatile("mtdbatu 3,%0" :: "r"(0));
644178629Smarcel	__asm __volatile("mtibatu 3,%0" :: "r"(0));
645178628Smarcel	isync();
646178628Smarcel
647178628Smarcel	for (i = 0; i < 16; i++)
648215163Snwhitehorn		mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
649183094Smarcel	powerpc_sync();
650178628Smarcel
651178628Smarcel	sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10);
652178628Smarcel	__asm __volatile("mtsdr1 %0" :: "r"(sdr));
653178628Smarcel	isync();
654178628Smarcel
655179254Smarcel	tlbia();
656178628Smarcel}
657178628Smarcel
658178628Smarcelvoid
659152180Sgrehanmoea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
66077957Sbenno{
66197346Sbenno	ihandle_t	mmui;
66290643Sbenno	phandle_t	chosen, mmu;
66390643Sbenno	int		sz;
66490643Sbenno	int		i, j;
665143200Sgrehan	vm_size_t	size, physsz, hwphyssz;
66690643Sbenno	vm_offset_t	pa, va, off;
667194784Sjeff	void		*dpcpu;
668209369Snwhitehorn	register_t	msr;
66977957Sbenno
67099037Sbenno        /*
671103604Sgrehan         * Set up BAT0 to map the lowest 256 MB area
67299037Sbenno         */
67399037Sbenno        battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
67499037Sbenno        battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
67599037Sbenno
676243370Sadrian	/*
677243370Sadrian	 * Map PCI memory space.
678243370Sadrian	 */
679243370Sadrian	battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
680243370Sadrian	battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
68199037Sbenno
682243370Sadrian	battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
683243370Sadrian	battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
68499037Sbenno
685243370Sadrian	battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
686243370Sadrian	battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
68799037Sbenno
688243370Sadrian	battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
689243370Sadrian	battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
69099037Sbenno
691243370Sadrian	/*
692243370Sadrian	 * Map obio devices.
693243370Sadrian	 */
694243370Sadrian	battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
695243370Sadrian	battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
69699037Sbenno
69777957Sbenno	/*
69890643Sbenno	 * Use an IBAT and a DBAT to map the bottom segment of memory
699209369Snwhitehorn	 * where we are. Turn off instruction relocation temporarily
700209369Snwhitehorn	 * to prevent faults while reprogramming the IBAT.
70177957Sbenno	 */
702209369Snwhitehorn	msr = mfmsr();
703209369Snwhitehorn	mtmsr(msr & ~PSL_IR);
704152180Sgrehan	__asm (".balign 32; \n"
705149958Sgrehan	       "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
706131808Sgrehan	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
707178628Smarcel	    :: "r"(battable[0].batu), "r"(battable[0].batl));
708209369Snwhitehorn	mtmsr(msr);
70999037Sbenno
710243370Sadrian#ifdef WII
711243370Sadrian        if (strcmp(installed_platform(), "wii") != 0) {
712243370Sadrian#endif
713243370Sadrian		/* map pci space */
714243370Sadrian		__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
715243370Sadrian		__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
716243370Sadrian#ifdef WII
717243370Sadrian	}
718243370Sadrian#endif
719178628Smarcel	isync();
72077957Sbenno
721190681Snwhitehorn	/* set global direct map flag */
722190681Snwhitehorn	hw_direct_map = 1;
723190681Snwhitehorn
72497346Sbenno	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
725152180Sgrehan	CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
72697346Sbenno
72797346Sbenno	for (i = 0; i < pregions_sz; i++) {
728103604Sgrehan		vm_offset_t pa;
729103604Sgrehan		vm_offset_t end;
730103604Sgrehan
73197346Sbenno		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
73297346Sbenno			pregions[i].mr_start,
73397346Sbenno			pregions[i].mr_start + pregions[i].mr_size,
73497346Sbenno			pregions[i].mr_size);
735103604Sgrehan		/*
736103604Sgrehan		 * Install entries into the BAT table to allow all
737103604Sgrehan		 * of physmem to be convered by on-demand BAT entries.
738103604Sgrehan		 * The loop will sometimes set the same battable element
739103604Sgrehan		 * twice, but that's fine since they won't be used for
740103604Sgrehan		 * a while yet.
741103604Sgrehan		 */
742103604Sgrehan		pa = pregions[i].mr_start & 0xf0000000;
743103604Sgrehan		end = pregions[i].mr_start + pregions[i].mr_size;
744103604Sgrehan		do {
745103604Sgrehan                        u_int n = pa >> ADDR_SR_SHFT;
746152180Sgrehan
747103604Sgrehan			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
748103604Sgrehan			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
749103604Sgrehan			pa += SEGMENT_LENGTH;
750103604Sgrehan		} while (pa < end);
75197346Sbenno	}
75297346Sbenno
75397346Sbenno	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
754152180Sgrehan		panic("moea_bootstrap: phys_avail too small");
755222614Snwhitehorn
75690643Sbenno	phys_avail_count = 0;
75791793Sbenno	physsz = 0;
758143234Sgrehan	hwphyssz = 0;
759143234Sgrehan	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
76097346Sbenno	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
76190643Sbenno		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
76290643Sbenno		    regions[i].mr_start + regions[i].mr_size,
76390643Sbenno		    regions[i].mr_size);
764143200Sgrehan		if (hwphyssz != 0 &&
765143200Sgrehan		    (physsz + regions[i].mr_size) >= hwphyssz) {
766143200Sgrehan			if (physsz < hwphyssz) {
767143200Sgrehan				phys_avail[j] = regions[i].mr_start;
768143200Sgrehan				phys_avail[j + 1] = regions[i].mr_start +
769143200Sgrehan				    hwphyssz - physsz;
770143200Sgrehan				physsz = hwphyssz;
771143200Sgrehan				phys_avail_count++;
772143200Sgrehan			}
773143200Sgrehan			break;
774143200Sgrehan		}
77590643Sbenno		phys_avail[j] = regions[i].mr_start;
77690643Sbenno		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
77790643Sbenno		phys_avail_count++;
77891793Sbenno		physsz += regions[i].mr_size;
77977957Sbenno	}
780228609Snwhitehorn
781228609Snwhitehorn	/* Check for overlap with the kernel and exception vectors */
782228609Snwhitehorn	for (j = 0; j < 2*phys_avail_count; j+=2) {
783228609Snwhitehorn		if (phys_avail[j] < EXC_LAST)
784228609Snwhitehorn			phys_avail[j] += EXC_LAST;
785228609Snwhitehorn
786228609Snwhitehorn		if (kernelstart >= phys_avail[j] &&
787228609Snwhitehorn		    kernelstart < phys_avail[j+1]) {
788228609Snwhitehorn			if (kernelend < phys_avail[j+1]) {
789228609Snwhitehorn				phys_avail[2*phys_avail_count] =
790228609Snwhitehorn				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
791228609Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
792228609Snwhitehorn				    phys_avail[j+1];
793228609Snwhitehorn				phys_avail_count++;
794228609Snwhitehorn			}
795228609Snwhitehorn
796228609Snwhitehorn			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
797228609Snwhitehorn		}
798228609Snwhitehorn
799228609Snwhitehorn		if (kernelend >= phys_avail[j] &&
800228609Snwhitehorn		    kernelend < phys_avail[j+1]) {
801228609Snwhitehorn			if (kernelstart > phys_avail[j]) {
802228609Snwhitehorn				phys_avail[2*phys_avail_count] = phys_avail[j];
803228609Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
804228609Snwhitehorn				    kernelstart & ~PAGE_MASK;
805228609Snwhitehorn				phys_avail_count++;
806228609Snwhitehorn			}
807228609Snwhitehorn
808228609Snwhitehorn			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
809228609Snwhitehorn		}
810228609Snwhitehorn	}
811228609Snwhitehorn
81291793Sbenno	physmem = btoc(physsz);
81377957Sbenno
81477957Sbenno	/*
81590643Sbenno	 * Allocate PTEG table.
81677957Sbenno	 */
81790643Sbenno#ifdef PTEGCOUNT
818152180Sgrehan	moea_pteg_count = PTEGCOUNT;
81990643Sbenno#else
820152180Sgrehan	moea_pteg_count = 0x1000;
82177957Sbenno
822152180Sgrehan	while (moea_pteg_count < physmem)
823152180Sgrehan		moea_pteg_count <<= 1;
82477957Sbenno
825152180Sgrehan	moea_pteg_count >>= 1;
82690643Sbenno#endif /* PTEGCOUNT */
82777957Sbenno
828152180Sgrehan	size = moea_pteg_count * sizeof(struct pteg);
829152180Sgrehan	CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count,
83090643Sbenno	    size);
831152180Sgrehan	moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size);
832152180Sgrehan	CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table);
833152180Sgrehan	bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg));
834152180Sgrehan	moea_pteg_mask = moea_pteg_count - 1;
83577957Sbenno
83690643Sbenno	/*
83794839Sbenno	 * Allocate pv/overflow lists.
83890643Sbenno	 */
839152180Sgrehan	size = sizeof(struct pvo_head) * moea_pteg_count;
840152180Sgrehan	moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size,
84190643Sbenno	    PAGE_SIZE);
842152180Sgrehan	CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table);
843152180Sgrehan	for (i = 0; i < moea_pteg_count; i++)
844152180Sgrehan		LIST_INIT(&moea_pvo_table[i]);
84577957Sbenno
84690643Sbenno	/*
847134535Salc	 * Initialize the lock that synchronizes access to the pteg and pvo
848134535Salc	 * tables.
849134535Salc	 */
850159928Salc	mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF |
851159928Salc	    MTX_RECURSE);
852212278Snwhitehorn	mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF);
853134535Salc
854183094Smarcel	mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN);
855183094Smarcel
856134535Salc	/*
85790643Sbenno	 * Initialise the unmanaged pvo pool.
85890643Sbenno	 */
859152180Sgrehan	moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc(
86099037Sbenno		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
861152180Sgrehan	moea_bpvo_pool_index = 0;
86277957Sbenno
86377957Sbenno	/*
86490643Sbenno	 * Make sure kernel vsid is allocated as well as VSID 0.
86577957Sbenno	 */
866152180Sgrehan	moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
86790643Sbenno		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
868152180Sgrehan	moea_vsid_bitmap[0] |= 1;
86977957Sbenno
87090643Sbenno	/*
871215163Snwhitehorn	 * Initialize the kernel pmap (which is statically allocated).
87290643Sbenno	 */
873215163Snwhitehorn	PMAP_LOCK_INIT(kernel_pmap);
874215163Snwhitehorn	for (i = 0; i < 16; i++)
875215163Snwhitehorn		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
876222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
877235689Snwhitehorn	RB_INIT(&kernel_pmap->pmap_pvo);
878215163Snwhitehorn
879238159Salc 	/*
880238159Salc	 * Initialize the global pv list lock.
881238159Salc	 */
882238159Salc	rw_init(&pvh_global_lock, "pmap pv global");
883238159Salc
884215163Snwhitehorn	/*
885215163Snwhitehorn	 * Set up the Open Firmware mappings
886215163Snwhitehorn	 */
887228609Snwhitehorn	chosen = OF_finddevice("/chosen");
888228609Snwhitehorn	if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 &&
889228609Snwhitehorn	    (mmu = OF_instance_to_package(mmui)) != -1 &&
890228609Snwhitehorn	    (sz = OF_getproplen(mmu, "translations")) != -1) {
891228609Snwhitehorn		translations = NULL;
892228609Snwhitehorn		for (i = 0; phys_avail[i] != 0; i += 2) {
893228609Snwhitehorn			if (phys_avail[i + 1] >= sz) {
894228609Snwhitehorn				translations = (struct ofw_map *)phys_avail[i];
895228609Snwhitehorn				break;
896228609Snwhitehorn			}
897131401Sgrehan		}
898228609Snwhitehorn		if (translations == NULL)
899228609Snwhitehorn			panic("moea_bootstrap: no space to copy translations");
900228609Snwhitehorn		bzero(translations, sz);
901228609Snwhitehorn		if (OF_getprop(mmu, "translations", translations, sz) == -1)
902228609Snwhitehorn			panic("moea_bootstrap: can't get ofw translations");
903228609Snwhitehorn		CTR0(KTR_PMAP, "moea_bootstrap: translations");
904228609Snwhitehorn		sz /= sizeof(*translations);
905228609Snwhitehorn		qsort(translations, sz, sizeof (*translations), om_cmp);
906228609Snwhitehorn		for (i = 0; i < sz; i++) {
907228609Snwhitehorn			CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
908228609Snwhitehorn			    translations[i].om_pa, translations[i].om_va,
909228609Snwhitehorn			    translations[i].om_len);
91077957Sbenno
911228609Snwhitehorn			/*
912228609Snwhitehorn			 * If the mapping is 1:1, let the RAM and device
913228609Snwhitehorn			 * on-demand BAT tables take care of the translation.
914228609Snwhitehorn			 */
915228609Snwhitehorn			if (translations[i].om_va == translations[i].om_pa)
916228609Snwhitehorn				continue;
91777957Sbenno
918228609Snwhitehorn			/* Enter the pages */
919228609Snwhitehorn			for (off = 0; off < translations[i].om_len;
920228609Snwhitehorn			    off += PAGE_SIZE)
921228609Snwhitehorn				moea_kenter(mmup, translations[i].om_va + off,
922228609Snwhitehorn					    translations[i].om_pa + off);
923228609Snwhitehorn		}
92477957Sbenno	}
92577957Sbenno
92690643Sbenno	/*
927178261Smarcel	 * Calculate the last available physical address.
928178261Smarcel	 */
929178261Smarcel	for (i = 0; phys_avail[i + 2] != 0; i += 2)
930178261Smarcel		;
931178261Smarcel	Maxmem = powerpc_btop(phys_avail[i + 1]);
932178261Smarcel
933190681Snwhitehorn	moea_cpu_bootstrap(mmup,0);
93477957Sbenno
93590643Sbenno	pmap_bootstrapped++;
936178261Smarcel
937178261Smarcel	/*
938178261Smarcel	 * Set the start and end of kva.
939178261Smarcel	 */
940178261Smarcel	virtual_avail = VM_MIN_KERNEL_ADDRESS;
941204128Snwhitehorn	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
942178261Smarcel
943178261Smarcel	/*
944178261Smarcel	 * Allocate a kernel stack with a guard page for thread0 and map it
945178261Smarcel	 * into the kernel page map.
946178261Smarcel	 */
947178261Smarcel	pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
948178261Smarcel	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
949178261Smarcel	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
950178261Smarcel	CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
951178261Smarcel	thread0.td_kstack = va;
952178261Smarcel	thread0.td_kstack_pages = KSTACK_PAGES;
953178261Smarcel	for (i = 0; i < KSTACK_PAGES; i++) {
954201758Smbr		moea_kenter(mmup, va, pa);
955178261Smarcel		pa += PAGE_SIZE;
956178261Smarcel		va += PAGE_SIZE;
957178261Smarcel	}
958178261Smarcel
959178261Smarcel	/*
960178261Smarcel	 * Allocate virtual address space for the message buffer.
961178261Smarcel	 */
962217688Spluknet	pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE);
963178261Smarcel	msgbufp = (struct msgbuf *)virtual_avail;
964178261Smarcel	va = virtual_avail;
965217688Spluknet	virtual_avail += round_page(msgbufsize);
966178261Smarcel	while (va < virtual_avail) {
967201758Smbr		moea_kenter(mmup, va, pa);
968178261Smarcel		pa += PAGE_SIZE;
969178261Smarcel		va += PAGE_SIZE;
970178261Smarcel	}
971194784Sjeff
972194784Sjeff	/*
973194784Sjeff	 * Allocate virtual address space for the dynamic percpu area.
974194784Sjeff	 */
975194784Sjeff	pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
976194784Sjeff	dpcpu = (void *)virtual_avail;
977194784Sjeff	va = virtual_avail;
978194784Sjeff	virtual_avail += DPCPU_SIZE;
979194784Sjeff	while (va < virtual_avail) {
980201758Smbr		moea_kenter(mmup, va, pa);
981194784Sjeff		pa += PAGE_SIZE;
982194784Sjeff		va += PAGE_SIZE;
983194784Sjeff	}
984194784Sjeff	dpcpu_init(dpcpu, 0);
98577957Sbenno}
98677957Sbenno
98777957Sbenno/*
98890643Sbenno * Activate a user pmap.  The pmap must be activated before it's address
98990643Sbenno * space can be accessed in any way.
99077957Sbenno */
99177957Sbennovoid
992152180Sgrehanmoea_activate(mmu_t mmu, struct thread *td)
99377957Sbenno{
99496250Sbenno	pmap_t	pm, pmr;
99577957Sbenno
99677957Sbenno	/*
997103604Sgrehan	 * Load all the data we need up front to encourage the compiler to
99890643Sbenno	 * not issue any loads while we have interrupts disabled below.
99977957Sbenno	 */
100090643Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
1001183290Snwhitehorn	pmr = pm->pmap_phys;
100277957Sbenno
1003223758Sattilio	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
100496250Sbenno	PCPU_SET(curpmap, pmr);
100577957Sbenno}
100677957Sbenno
100791483Sbennovoid
1008152180Sgrehanmoea_deactivate(mmu_t mmu, struct thread *td)
100991483Sbenno{
101091483Sbenno	pmap_t	pm;
101191483Sbenno
101291483Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
1013223758Sattilio	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
101496250Sbenno	PCPU_SET(curpmap, NULL);
101591483Sbenno}
101691483Sbenno
101777957Sbennovoid
1018270920Skibmoea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
101977957Sbenno{
1020270920Skib	struct	pvo_entry key, *pvo;
102196353Sbenno
1022134329Salc	PMAP_LOCK(pm);
1023270920Skib	key.pvo_vaddr = sva;
1024270920Skib	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1025270920Skib	    pvo != NULL && PVO_VADDR(pvo) < eva;
1026270920Skib	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1027270920Skib		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1028270920Skib			panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
1029270920Skib		pvo->pvo_vaddr &= ~PVO_WIRED;
1030270920Skib		pm->pm_stats.wired_count--;
103196353Sbenno	}
1032134329Salc	PMAP_UNLOCK(pm);
103377957Sbenno}
103477957Sbenno
103577957Sbennovoid
1036152180Sgrehanmoea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
103777957Sbenno{
103897385Sbenno	vm_offset_t	dst;
103997385Sbenno	vm_offset_t	src;
104097385Sbenno
104197385Sbenno	dst = VM_PAGE_TO_PHYS(mdst);
104297385Sbenno	src = VM_PAGE_TO_PHYS(msrc);
104397385Sbenno
1044234156Snwhitehorn	bcopy((void *)src, (void *)dst, PAGE_SIZE);
104577957Sbenno}
104677957Sbenno
1047248280Skibvoid
1048248280Skibmoea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1049248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1050248280Skib{
1051248280Skib	void *a_cp, *b_cp;
1052248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
1053248280Skib	int cnt;
1054248280Skib
1055248280Skib	while (xfersize > 0) {
1056248280Skib		a_pg_offset = a_offset & PAGE_MASK;
1057248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1058248280Skib		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
1059248280Skib		    a_pg_offset;
1060248280Skib		b_pg_offset = b_offset & PAGE_MASK;
1061248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1062248280Skib		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
1063248280Skib		    b_pg_offset;
1064248280Skib		bcopy(a_cp, b_cp, cnt);
1065248280Skib		a_offset += cnt;
1066248280Skib		b_offset += cnt;
1067248280Skib		xfersize -= cnt;
1068248280Skib	}
1069248280Skib}
1070248280Skib
107177957Sbenno/*
107290643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb.
107377957Sbenno */
107477957Sbennovoid
1075152180Sgrehanmoea_zero_page(mmu_t mmu, vm_page_t m)
107677957Sbenno{
1077262691Sjhibbits	vm_offset_t off, pa = VM_PAGE_TO_PHYS(m);
107877957Sbenno
1079262691Sjhibbits	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1080262691Sjhibbits		__asm __volatile("dcbz 0,%0" :: "r"(pa + off));
108177957Sbenno}
108277957Sbenno
108377957Sbennovoid
1084152180Sgrehanmoea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
108577957Sbenno{
108699666Sbenno	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1087178265Smarcel	void *va = (void *)(pa + off);
108899666Sbenno
1089178265Smarcel	bzero(va, size);
109077957Sbenno}
109177957Sbenno
109299571Spetervoid
1093152180Sgrehanmoea_zero_page_idle(mmu_t mmu, vm_page_t m)
109499571Speter{
109599571Speter
1096262691Sjhibbits	moea_zero_page(mmu, m);
109799571Speter}
109899571Speter
109977957Sbenno/*
110090643Sbenno * Map the given physical page at the specified virtual address in the
110190643Sbenno * target pmap with the protection requested.  If specified the page
110290643Sbenno * will be wired down.
110377957Sbenno */
1104270439Skibint
1105152180Sgrehanmoea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1106270439Skib    u_int flags, int8_t psind)
110777957Sbenno{
1108270439Skib	int error;
1109159303Salc
1110270439Skib	for (;;) {
1111270439Skib		rw_wlock(&pvh_global_lock);
1112270439Skib		PMAP_LOCK(pmap);
1113270439Skib		error = moea_enter_locked(pmap, va, m, prot, flags, psind);
1114270439Skib		rw_wunlock(&pvh_global_lock);
1115270439Skib		PMAP_UNLOCK(pmap);
1116270439Skib		if (error != ENOMEM)
1117270439Skib			return (KERN_SUCCESS);
1118270439Skib		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
1119270439Skib			return (KERN_RESOURCE_SHORTAGE);
1120270439Skib		VM_OBJECT_ASSERT_UNLOCKED(m->object);
1121270439Skib		VM_WAIT;
1122270439Skib	}
1123159303Salc}
1124159303Salc
1125159303Salc/*
1126159303Salc * Map the given physical page at the specified virtual address in the
1127159303Salc * target pmap with the protection requested.  If specified the page
1128159303Salc * will be wired down.
1129159303Salc *
1130159303Salc * The page queues and pmap must be locked.
1131159303Salc */
1132270439Skibstatic int
1133159303Salcmoea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1134270439Skib    u_int flags, int8_t psind __unused)
1135159303Salc{
113690643Sbenno	struct		pvo_head *pvo_head;
113792847Sjeff	uma_zone_t	zone;
113896250Sbenno	vm_page_t	pg;
1139233949Snwhitehorn	u_int		pte_lo, pvo_flags;
114090643Sbenno	int		error;
114177957Sbenno
1142152180Sgrehan	if (!moea_initialized) {
1143152180Sgrehan		pvo_head = &moea_pvo_kunmanaged;
1144152180Sgrehan		zone = moea_upvo_zone;
114590643Sbenno		pvo_flags = 0;
114696250Sbenno		pg = NULL;
114790643Sbenno	} else {
1148110172Sgrehan		pvo_head = vm_page_to_pvoh(m);
1149110172Sgrehan		pg = m;
1150152180Sgrehan		zone = moea_mpvo_zone;
115190643Sbenno		pvo_flags = PVO_MANAGED;
115290643Sbenno	}
1153134535Salc	if (pmap_bootstrapped)
1154238159Salc		rw_assert(&pvh_global_lock, RA_WLOCKED);
1155159303Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1156254138Sattilio	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1157250747Salc		VM_OBJECT_ASSERT_LOCKED(m->object);
115877957Sbenno
1159142416Sgrehan	/* XXX change the pvo head for fake pages */
1160224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1161189675Snwhitehorn		pvo_flags &= ~PVO_MANAGED;
1162152180Sgrehan		pvo_head = &moea_pvo_kunmanaged;
1163189675Snwhitehorn		zone = moea_upvo_zone;
1164189675Snwhitehorn	}
1165142416Sgrehan
1166213335Snwhitehorn	pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
116777957Sbenno
1168164229Salc	if (prot & VM_PROT_WRITE) {
116990643Sbenno		pte_lo |= PTE_BW;
1170208810Salc		if (pmap_bootstrapped &&
1171224746Skib		    (m->oflags & VPO_UNMANAGED) == 0)
1172225418Skib			vm_page_aflag_set(m, PGA_WRITEABLE);
1173164229Salc	} else
117490643Sbenno		pte_lo |= PTE_BR;
117577957Sbenno
1176270439Skib	if ((flags & PMAP_ENTER_WIRED) != 0)
117790643Sbenno		pvo_flags |= PVO_WIRED;
117877957Sbenno
1179152180Sgrehan	error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
118096250Sbenno	    pte_lo, pvo_flags);
118190643Sbenno
118296250Sbenno	/*
1183233949Snwhitehorn	 * Flush the real page from the instruction cache. This has be done
1184233949Snwhitehorn	 * for all user mappings to prevent information leakage via the
1185234149Snwhitehorn	 * instruction cache. moea_pvo_enter() returns ENOENT for the first
1186234149Snwhitehorn	 * mapping for a page.
118796250Sbenno	 */
1188234149Snwhitehorn	if (pmap != kernel_pmap && error == ENOENT &&
1189234149Snwhitehorn	    (pte_lo & (PTE_I | PTE_G)) == 0)
1190152180Sgrehan		moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1191270439Skib
1192270439Skib	return (error);
119377957Sbenno}
119477957Sbenno
1195159303Salc/*
1196159303Salc * Maps a sequence of resident pages belonging to the same object.
1197159303Salc * The sequence begins with the given page m_start.  This page is
1198159303Salc * mapped at the given virtual address start.  Each subsequent page is
1199159303Salc * mapped at a virtual address that is offset from start by the same
1200159303Salc * amount as the page is offset from m_start within the object.  The
1201159303Salc * last page in the sequence is the page with the largest offset from
1202159303Salc * m_start that can be mapped at a virtual address less than the given
1203159303Salc * virtual address end.  Not every virtual page between start and end
1204159303Salc * is mapped; only those for which a resident page exists with the
1205159303Salc * corresponding offset from m_start are mapped.
1206159303Salc */
1207159303Salcvoid
1208159303Salcmoea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1209159303Salc    vm_page_t m_start, vm_prot_t prot)
1210159303Salc{
1211159303Salc	vm_page_t m;
1212159303Salc	vm_pindex_t diff, psize;
1213159303Salc
1214250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1215250884Sattilio
1216159303Salc	psize = atop(end - start);
1217159303Salc	m = m_start;
1218238159Salc	rw_wlock(&pvh_global_lock);
1219159303Salc	PMAP_LOCK(pm);
1220159303Salc	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1221159303Salc		moea_enter_locked(pm, start + ptoa(diff), m, prot &
1222270439Skib		    (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
1223159303Salc		m = TAILQ_NEXT(m, listq);
1224159303Salc	}
1225238159Salc	rw_wunlock(&pvh_global_lock);
1226159303Salc	PMAP_UNLOCK(pm);
1227159303Salc}
1228159303Salc
1229159627Supsvoid
1230152180Sgrehanmoea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1231159627Sups    vm_prot_t prot)
1232117045Salc{
1233117045Salc
1234238159Salc	rw_wlock(&pvh_global_lock);
1235159303Salc	PMAP_LOCK(pm);
1236159303Salc	moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1237270439Skib	    0, 0);
1238238159Salc	rw_wunlock(&pvh_global_lock);
1239159303Salc	PMAP_UNLOCK(pm);
1240117045Salc}
1241117045Salc
1242131658Salcvm_paddr_t
1243152180Sgrehanmoea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
124477957Sbenno{
124596353Sbenno	struct	pvo_entry *pvo;
1246134329Salc	vm_paddr_t pa;
124796353Sbenno
1248134329Salc	PMAP_LOCK(pm);
1249152180Sgrehan	pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1250134329Salc	if (pvo == NULL)
1251134329Salc		pa = 0;
1252134329Salc	else
1253183290Snwhitehorn		pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1254134329Salc	PMAP_UNLOCK(pm);
1255134329Salc	return (pa);
125677957Sbenno}
125777957Sbenno
125877957Sbenno/*
1259120336Sgrehan * Atomically extract and hold the physical page with the given
1260120336Sgrehan * pmap and virtual address pair if that mapping permits the given
1261120336Sgrehan * protection.
1262120336Sgrehan */
1263120336Sgrehanvm_page_t
1264152180Sgrehanmoea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1265120336Sgrehan{
1266132666Salc	struct	pvo_entry *pvo;
1267120336Sgrehan	vm_page_t m;
1268207410Skmacy        vm_paddr_t pa;
1269207410Skmacy
1270120336Sgrehan	m = NULL;
1271207410Skmacy	pa = 0;
1272134329Salc	PMAP_LOCK(pmap);
1273207410Skmacyretry:
1274152180Sgrehan	pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1275183290Snwhitehorn	if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) &&
1276183290Snwhitehorn	    ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW ||
1277132666Salc	     (prot & VM_PROT_WRITE) == 0)) {
1278207410Skmacy		if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa))
1279207410Skmacy			goto retry;
1280183290Snwhitehorn		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
1281120336Sgrehan		vm_page_hold(m);
1282120336Sgrehan	}
1283207410Skmacy	PA_UNLOCK_COND(pa);
1284134329Salc	PMAP_UNLOCK(pmap);
1285120336Sgrehan	return (m);
1286120336Sgrehan}
1287120336Sgrehan
128890643Sbennovoid
1289152180Sgrehanmoea_init(mmu_t mmu)
129077957Sbenno{
129177957Sbenno
1292152180Sgrehan	moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1293125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1294125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1295152180Sgrehan	moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1296125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1297125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1298152180Sgrehan	moea_initialized = TRUE;
129977957Sbenno}
130077957Sbenno
130190643Sbennoboolean_t
1302207155Salcmoea_is_referenced(mmu_t mmu, vm_page_t m)
1303207155Salc{
1304238357Salc	boolean_t rv;
1305207155Salc
1306224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1307208574Salc	    ("moea_is_referenced: page %p is not managed", m));
1308238357Salc	rw_wlock(&pvh_global_lock);
1309238357Salc	rv = moea_query_bit(m, PTE_REF);
1310238357Salc	rw_wunlock(&pvh_global_lock);
1311238357Salc	return (rv);
1312207155Salc}
1313207155Salc
1314207155Salcboolean_t
1315152180Sgrehanmoea_is_modified(mmu_t mmu, vm_page_t m)
131690643Sbenno{
1317238357Salc	boolean_t rv;
131896353Sbenno
1319224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1320208504Salc	    ("moea_is_modified: page %p is not managed", m));
1321208504Salc
1322208504Salc	/*
1323254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1324225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1325208504Salc	 * is clear, no PTEs can have PTE_CHG set.
1326208504Salc	 */
1327248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
1328254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
132996353Sbenno		return (FALSE);
1330238357Salc	rw_wlock(&pvh_global_lock);
1331238357Salc	rv = moea_query_bit(m, PTE_CHG);
1332238357Salc	rw_wunlock(&pvh_global_lock);
1333238357Salc	return (rv);
133490643Sbenno}
133590643Sbenno
1336214617Salcboolean_t
1337214617Salcmoea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1338214617Salc{
1339214617Salc	struct pvo_entry *pvo;
1340214617Salc	boolean_t rv;
1341214617Salc
1342214617Salc	PMAP_LOCK(pmap);
1343214617Salc	pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1344214617Salc	rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0;
1345214617Salc	PMAP_UNLOCK(pmap);
1346214617Salc	return (rv);
1347214617Salc}
1348214617Salc
134990643Sbennovoid
1350152180Sgrehanmoea_clear_modify(mmu_t mmu, vm_page_t m)
1351110172Sgrehan{
1352110172Sgrehan
1353224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1354208504Salc	    ("moea_clear_modify: page %p is not managed", m));
1355248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
1356254138Sattilio	KASSERT(!vm_page_xbusied(m),
1357254138Sattilio	    ("moea_clear_modify: page %p is exclusive busy", m));
1358208504Salc
1359208504Salc	/*
1360225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
1361208504Salc	 * set.  If the object containing the page is locked and the page is
1362254138Sattilio	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
1363208504Salc	 */
1364225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
1365110172Sgrehan		return;
1366238357Salc	rw_wlock(&pvh_global_lock);
1367208990Salc	moea_clear_bit(m, PTE_CHG);
1368238357Salc	rw_wunlock(&pvh_global_lock);
1369110172Sgrehan}
1370110172Sgrehan
137191403Ssilby/*
1372160889Salc * Clear the write and modified bits in each of the given page's mappings.
1373160889Salc */
1374160889Salcvoid
1375160889Salcmoea_remove_write(mmu_t mmu, vm_page_t m)
1376160889Salc{
1377160889Salc	struct	pvo_entry *pvo;
1378160889Salc	struct	pte *pt;
1379160889Salc	pmap_t	pmap;
1380160889Salc	u_int	lo;
1381160889Salc
1382224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1383208175Salc	    ("moea_remove_write: page %p is not managed", m));
1384208175Salc
1385208175Salc	/*
1386254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1387254138Sattilio	 * set by another thread while the object is locked.  Thus,
1388254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
1389208175Salc	 */
1390248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
1391254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1392160889Salc		return;
1393238159Salc	rw_wlock(&pvh_global_lock);
1394160889Salc	lo = moea_attr_fetch(m);
1395183094Smarcel	powerpc_sync();
1396160889Salc	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1397160889Salc		pmap = pvo->pvo_pmap;
1398160889Salc		PMAP_LOCK(pmap);
1399183290Snwhitehorn		if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) {
1400160889Salc			pt = moea_pvo_to_pte(pvo, -1);
1401183290Snwhitehorn			pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
1402183290Snwhitehorn			pvo->pvo_pte.pte.pte_lo |= PTE_BR;
1403160889Salc			if (pt != NULL) {
1404183290Snwhitehorn				moea_pte_synch(pt, &pvo->pvo_pte.pte);
1405183290Snwhitehorn				lo |= pvo->pvo_pte.pte.pte_lo;
1406183290Snwhitehorn				pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG;
1407183290Snwhitehorn				moea_pte_change(pt, &pvo->pvo_pte.pte,
1408160889Salc				    pvo->pvo_vaddr);
1409160889Salc				mtx_unlock(&moea_table_mutex);
1410160889Salc			}
1411160889Salc		}
1412160889Salc		PMAP_UNLOCK(pmap);
1413160889Salc	}
1414160889Salc	if ((lo & PTE_CHG) != 0) {
1415160889Salc		moea_attr_clear(m, PTE_CHG);
1416160889Salc		vm_page_dirty(m);
1417160889Salc	}
1418225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1419238159Salc	rw_wunlock(&pvh_global_lock);
1420160889Salc}
1421160889Salc
1422160889Salc/*
1423152180Sgrehan *	moea_ts_referenced:
142491403Ssilby *
142591403Ssilby *	Return a count of reference bits for a page, clearing those bits.
142691403Ssilby *	It is not necessary for every reference bit to be cleared, but it
142791403Ssilby *	is necessary that 0 only be returned when there are truly no
142891403Ssilby *	reference bits set.
142991403Ssilby *
143091403Ssilby *	XXX: The exact number of bits to check and clear is a matter that
143191403Ssilby *	should be tested and standardized at some point in the future for
143291403Ssilby *	optimal aging of shared pages.
143391403Ssilby */
1434238357Salcint
1435152180Sgrehanmoea_ts_referenced(mmu_t mmu, vm_page_t m)
143690643Sbenno{
1437238357Salc	int count;
1438110172Sgrehan
1439224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1440208990Salc	    ("moea_ts_referenced: page %p is not managed", m));
1441238357Salc	rw_wlock(&pvh_global_lock);
1442238357Salc	count = moea_clear_bit(m, PTE_REF);
1443238357Salc	rw_wunlock(&pvh_global_lock);
1444238357Salc	return (count);
144590643Sbenno}
144690643Sbenno
144777957Sbenno/*
1448213307Snwhitehorn * Modify the WIMG settings of all mappings for a page.
1449213307Snwhitehorn */
1450213307Snwhitehornvoid
1451213307Snwhitehornmoea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1452213307Snwhitehorn{
1453213307Snwhitehorn	struct	pvo_entry *pvo;
1454213335Snwhitehorn	struct	pvo_head *pvo_head;
1455213307Snwhitehorn	struct	pte *pt;
1456213307Snwhitehorn	pmap_t	pmap;
1457213307Snwhitehorn	u_int	lo;
1458213307Snwhitehorn
1459224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1460213335Snwhitehorn		m->md.mdpg_cache_attrs = ma;
1461213335Snwhitehorn		return;
1462213335Snwhitehorn	}
1463213335Snwhitehorn
1464238159Salc	rw_wlock(&pvh_global_lock);
1465213335Snwhitehorn	pvo_head = vm_page_to_pvoh(m);
1466213307Snwhitehorn	lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1467213335Snwhitehorn
1468213335Snwhitehorn	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1469213307Snwhitehorn		pmap = pvo->pvo_pmap;
1470213307Snwhitehorn		PMAP_LOCK(pmap);
1471213307Snwhitehorn		pt = moea_pvo_to_pte(pvo, -1);
1472213307Snwhitehorn		pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG;
1473213307Snwhitehorn		pvo->pvo_pte.pte.pte_lo |= lo;
1474213307Snwhitehorn		if (pt != NULL) {
1475213307Snwhitehorn			moea_pte_change(pt, &pvo->pvo_pte.pte,
1476213307Snwhitehorn			    pvo->pvo_vaddr);
1477213307Snwhitehorn			if (pvo->pvo_pmap == kernel_pmap)
1478213307Snwhitehorn				isync();
1479213307Snwhitehorn		}
1480213307Snwhitehorn		mtx_unlock(&moea_table_mutex);
1481213307Snwhitehorn		PMAP_UNLOCK(pmap);
1482213307Snwhitehorn	}
1483213307Snwhitehorn	m->md.mdpg_cache_attrs = ma;
1484238159Salc	rw_wunlock(&pvh_global_lock);
1485213307Snwhitehorn}
1486213307Snwhitehorn
1487213307Snwhitehorn/*
148890643Sbenno * Map a wired page into kernel virtual address space.
148977957Sbenno */
149077957Sbennovoid
1491235936Srajmoea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
149277957Sbenno{
1493213307Snwhitehorn
1494213307Snwhitehorn	moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1495213307Snwhitehorn}
1496213307Snwhitehorn
1497213307Snwhitehornvoid
1498213307Snwhitehornmoea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1499213307Snwhitehorn{
150090643Sbenno	u_int		pte_lo;
150190643Sbenno	int		error;
150277957Sbenno
150390643Sbenno#if 0
150490643Sbenno	if (va < VM_MIN_KERNEL_ADDRESS)
1505152180Sgrehan		panic("moea_kenter: attempt to enter non-kernel address %#x",
150690643Sbenno		    va);
150790643Sbenno#endif
150877957Sbenno
1509213307Snwhitehorn	pte_lo = moea_calc_wimg(pa, ma);
151077957Sbenno
1511135172Salc	PMAP_LOCK(kernel_pmap);
1512152180Sgrehan	error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
1513152180Sgrehan	    &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
151490643Sbenno
151590643Sbenno	if (error != 0 && error != ENOENT)
1516152180Sgrehan		panic("moea_kenter: failed to enter va %#x pa %#x: %d", va,
151790643Sbenno		    pa, error);
151890643Sbenno
1519135172Salc	PMAP_UNLOCK(kernel_pmap);
152077957Sbenno}
152177957Sbenno
152294838Sbenno/*
152394838Sbenno * Extract the physical page address associated with the given kernel virtual
152494838Sbenno * address.
152594838Sbenno */
1526235936Srajvm_paddr_t
1527152180Sgrehanmoea_kextract(mmu_t mmu, vm_offset_t va)
152877957Sbenno{
152994838Sbenno	struct		pvo_entry *pvo;
1530134329Salc	vm_paddr_t pa;
153194838Sbenno
1532125185Sgrehan	/*
1533183290Snwhitehorn	 * Allow direct mappings on 32-bit OEA
1534125185Sgrehan	 */
1535125185Sgrehan	if (va < VM_MIN_KERNEL_ADDRESS) {
1536125185Sgrehan		return (va);
1537125185Sgrehan	}
1538125185Sgrehan
1539134329Salc	PMAP_LOCK(kernel_pmap);
1540152180Sgrehan	pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1541152180Sgrehan	KASSERT(pvo != NULL, ("moea_kextract: no addr found"));
1542183290Snwhitehorn	pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1543134329Salc	PMAP_UNLOCK(kernel_pmap);
1544134329Salc	return (pa);
154577957Sbenno}
154677957Sbenno
154791456Sbenno/*
154891456Sbenno * Remove a wired page from kernel virtual address space.
154991456Sbenno */
155077957Sbennovoid
1551152180Sgrehanmoea_kremove(mmu_t mmu, vm_offset_t va)
155277957Sbenno{
155391456Sbenno
1554152180Sgrehan	moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
155577957Sbenno}
155677957Sbenno
155777957Sbenno/*
155890643Sbenno * Map a range of physical addresses into kernel virtual address space.
155990643Sbenno *
156090643Sbenno * The value passed in *virt is a suggested virtual address for the mapping.
156190643Sbenno * Architectures which can support a direct-mapped physical to virtual region
156290643Sbenno * can return the appropriate address within that region, leaving '*virt'
156390643Sbenno * unchanged.  We cannot and therefore do not; *virt is updated with the
156490643Sbenno * first usable address after the mapped region.
156577957Sbenno */
156690643Sbennovm_offset_t
1567235936Srajmoea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1568235936Sraj    vm_paddr_t pa_end, int prot)
156977957Sbenno{
157090643Sbenno	vm_offset_t	sva, va;
157177957Sbenno
157290643Sbenno	sva = *virt;
157390643Sbenno	va = sva;
157490643Sbenno	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1575152180Sgrehan		moea_kenter(mmu, va, pa_start);
157690643Sbenno	*virt = va;
157790643Sbenno	return (sva);
157877957Sbenno}
157977957Sbenno
158077957Sbenno/*
158191403Ssilby * Returns true if the pmap's pv is one of the first
158291403Ssilby * 16 pvs linked to from this page.  This count may
158391403Ssilby * be changed upwards or downwards in the future; it
158491403Ssilby * is only necessary that true be returned for a small
158591403Ssilby * subset of pmaps for proper page aging.
158691403Ssilby */
158790643Sbennoboolean_t
1588152180Sgrehanmoea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
158990643Sbenno{
1590110172Sgrehan        int loops;
1591110172Sgrehan	struct pvo_entry *pvo;
1592208990Salc	boolean_t rv;
1593110172Sgrehan
1594224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1595208990Salc	    ("moea_page_exists_quick: page %p is not managed", m));
1596110172Sgrehan	loops = 0;
1597208990Salc	rv = FALSE;
1598238159Salc	rw_wlock(&pvh_global_lock);
1599110172Sgrehan	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1600208990Salc		if (pvo->pvo_pmap == pmap) {
1601208990Salc			rv = TRUE;
1602208990Salc			break;
1603208990Salc		}
1604110172Sgrehan		if (++loops >= 16)
1605110172Sgrehan			break;
1606110172Sgrehan	}
1607238159Salc	rw_wunlock(&pvh_global_lock);
1608208990Salc	return (rv);
160990643Sbenno}
161077957Sbenno
1611173708Salc/*
1612173708Salc * Return the number of managed mappings to the given physical page
1613173708Salc * that are wired.
1614173708Salc */
1615173708Salcint
1616173708Salcmoea_page_wired_mappings(mmu_t mmu, vm_page_t m)
1617173708Salc{
1618173708Salc	struct pvo_entry *pvo;
1619173708Salc	int count;
1620173708Salc
1621173708Salc	count = 0;
1622224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
1623173708Salc		return (count);
1624238159Salc	rw_wlock(&pvh_global_lock);
1625173708Salc	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1626173708Salc		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1627173708Salc			count++;
1628238159Salc	rw_wunlock(&pvh_global_lock);
1629173708Salc	return (count);
1630173708Salc}
1631173708Salc
1632152180Sgrehanstatic u_int	moea_vsidcontext;
163377957Sbenno
163490643Sbennovoid
1635152180Sgrehanmoea_pinit(mmu_t mmu, pmap_t pmap)
163690643Sbenno{
163790643Sbenno	int	i, mask;
163890643Sbenno	u_int	entropy;
163977957Sbenno
1640152180Sgrehan	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap"));
1641235689Snwhitehorn	RB_INIT(&pmap->pmap_pvo);
1642126478Sgrehan
164390643Sbenno	entropy = 0;
164490643Sbenno	__asm __volatile("mftb %0" : "=r"(entropy));
164577957Sbenno
1646183290Snwhitehorn	if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap))
1647183290Snwhitehorn	    == NULL) {
1648183290Snwhitehorn		pmap->pmap_phys = pmap;
1649183290Snwhitehorn	}
1650183290Snwhitehorn
1651183290Snwhitehorn
1652212278Snwhitehorn	mtx_lock(&moea_vsid_mutex);
165390643Sbenno	/*
165490643Sbenno	 * Allocate some segment registers for this pmap.
165590643Sbenno	 */
165690643Sbenno	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
165790643Sbenno		u_int	hash, n;
165877957Sbenno
165977957Sbenno		/*
166090643Sbenno		 * Create a new value by mutiplying by a prime and adding in
166190643Sbenno		 * entropy from the timebase register.  This is to make the
166290643Sbenno		 * VSID more random so that the PT hash function collides
166390643Sbenno		 * less often.  (Note that the prime casues gcc to do shifts
166490643Sbenno		 * instead of a multiply.)
166577957Sbenno		 */
1666152180Sgrehan		moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy;
1667152180Sgrehan		hash = moea_vsidcontext & (NPMAPS - 1);
166890643Sbenno		if (hash == 0)		/* 0 is special, avoid it */
166990643Sbenno			continue;
167090643Sbenno		n = hash >> 5;
167190643Sbenno		mask = 1 << (hash & (VSID_NBPW - 1));
1672152180Sgrehan		hash = (moea_vsidcontext & 0xfffff);
1673152180Sgrehan		if (moea_vsid_bitmap[n] & mask) {	/* collision? */
167490643Sbenno			/* anything free in this bucket? */
1675152180Sgrehan			if (moea_vsid_bitmap[n] == 0xffffffff) {
1676152180Sgrehan				entropy = (moea_vsidcontext >> 20);
167790643Sbenno				continue;
167890643Sbenno			}
1679212322Snwhitehorn			i = ffs(~moea_vsid_bitmap[n]) - 1;
168090643Sbenno			mask = 1 << i;
168190643Sbenno			hash &= 0xfffff & ~(VSID_NBPW - 1);
168290643Sbenno			hash |= i;
168377957Sbenno		}
1684227627Snwhitehorn		KASSERT(!(moea_vsid_bitmap[n] & mask),
1685227627Snwhitehorn		    ("Allocating in-use VSID group %#x\n", hash));
1686152180Sgrehan		moea_vsid_bitmap[n] |= mask;
168790643Sbenno		for (i = 0; i < 16; i++)
168890643Sbenno			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1689212278Snwhitehorn		mtx_unlock(&moea_vsid_mutex);
169090643Sbenno		return;
169190643Sbenno	}
169277957Sbenno
1693212278Snwhitehorn	mtx_unlock(&moea_vsid_mutex);
1694152180Sgrehan	panic("moea_pinit: out of segments");
169577957Sbenno}
169677957Sbenno
169777957Sbenno/*
169890643Sbenno * Initialize the pmap associated with process 0.
169977957Sbenno */
170077957Sbennovoid
1701152180Sgrehanmoea_pinit0(mmu_t mmu, pmap_t pm)
170277957Sbenno{
170377957Sbenno
1704254667Skib	PMAP_LOCK_INIT(pm);
1705152180Sgrehan	moea_pinit(mmu, pm);
170690643Sbenno	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
170777957Sbenno}
170877957Sbenno
170994838Sbenno/*
171094838Sbenno * Set the physical protection on the specified range of this map as requested.
171194838Sbenno */
171290643Sbennovoid
1713152180Sgrehanmoea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1714152180Sgrehan    vm_prot_t prot)
171590643Sbenno{
1716235689Snwhitehorn	struct	pvo_entry *pvo, *tpvo, key;
171794838Sbenno	struct	pte *pt;
171894838Sbenno
171994838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1720152180Sgrehan	    ("moea_protect: non current pmap"));
172194838Sbenno
172294838Sbenno	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1723152180Sgrehan		moea_remove(mmu, pm, sva, eva);
172494838Sbenno		return;
172594838Sbenno	}
172694838Sbenno
1727238159Salc	rw_wlock(&pvh_global_lock);
1728134329Salc	PMAP_LOCK(pm);
1729235689Snwhitehorn	key.pvo_vaddr = sva;
1730235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1731235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
1732235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
173394838Sbenno
173494838Sbenno		/*
173594838Sbenno		 * Grab the PTE pointer before we diddle with the cached PTE
173694838Sbenno		 * copy.
173794838Sbenno		 */
1738235689Snwhitehorn		pt = moea_pvo_to_pte(pvo, -1);
173994838Sbenno		/*
174094838Sbenno		 * Change the protection of the page.
174194838Sbenno		 */
1742183290Snwhitehorn		pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
1743183290Snwhitehorn		pvo->pvo_pte.pte.pte_lo |= PTE_BR;
174494838Sbenno
174594838Sbenno		/*
174694838Sbenno		 * If the PVO is in the page table, update that pte as well.
174794838Sbenno		 */
1748159928Salc		if (pt != NULL) {
1749183290Snwhitehorn			moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
1750159928Salc			mtx_unlock(&moea_table_mutex);
1751159928Salc		}
175294838Sbenno	}
1753238159Salc	rw_wunlock(&pvh_global_lock);
1754134329Salc	PMAP_UNLOCK(pm);
175577957Sbenno}
175677957Sbenno
175791456Sbenno/*
175891456Sbenno * Map a list of wired pages into kernel virtual address space.  This is
175991456Sbenno * intended for temporary mappings which do not need page modification or
176091456Sbenno * references recorded.  Existing mappings in the region are overwritten.
176191456Sbenno */
176290643Sbennovoid
1763152180Sgrehanmoea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
176477957Sbenno{
1765110172Sgrehan	vm_offset_t va;
176677957Sbenno
1767110172Sgrehan	va = sva;
1768110172Sgrehan	while (count-- > 0) {
1769152180Sgrehan		moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1770110172Sgrehan		va += PAGE_SIZE;
1771110172Sgrehan		m++;
1772110172Sgrehan	}
177390643Sbenno}
177477957Sbenno
177591456Sbenno/*
177691456Sbenno * Remove page mappings from kernel virtual address space.  Intended for
1777152180Sgrehan * temporary mappings entered by moea_qenter.
177891456Sbenno */
177990643Sbennovoid
1780152180Sgrehanmoea_qremove(mmu_t mmu, vm_offset_t sva, int count)
178190643Sbenno{
1782110172Sgrehan	vm_offset_t va;
178391456Sbenno
1784110172Sgrehan	va = sva;
1785110172Sgrehan	while (count-- > 0) {
1786152180Sgrehan		moea_kremove(mmu, va);
1787110172Sgrehan		va += PAGE_SIZE;
1788110172Sgrehan	}
178977957Sbenno}
179077957Sbenno
179190643Sbennovoid
1792152180Sgrehanmoea_release(mmu_t mmu, pmap_t pmap)
179390643Sbenno{
1794103604Sgrehan        int idx, mask;
1795103604Sgrehan
1796103604Sgrehan	/*
1797103604Sgrehan	 * Free segment register's VSID
1798103604Sgrehan	 */
1799103604Sgrehan        if (pmap->pm_sr[0] == 0)
1800152180Sgrehan                panic("moea_release");
1801103604Sgrehan
1802212278Snwhitehorn	mtx_lock(&moea_vsid_mutex);
1803103604Sgrehan        idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1804103604Sgrehan        mask = 1 << (idx % VSID_NBPW);
1805103604Sgrehan        idx /= VSID_NBPW;
1806152180Sgrehan        moea_vsid_bitmap[idx] &= ~mask;
1807212278Snwhitehorn	mtx_unlock(&moea_vsid_mutex);
180877957Sbenno}
180977957Sbenno
181091456Sbenno/*
181191456Sbenno * Remove the given range of addresses from the specified map.
181291456Sbenno */
181390643Sbennovoid
1814152180Sgrehanmoea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
181577957Sbenno{
1816235689Snwhitehorn	struct	pvo_entry *pvo, *tpvo, key;
181791456Sbenno
1818238159Salc	rw_wlock(&pvh_global_lock);
1819134329Salc	PMAP_LOCK(pm);
1820235689Snwhitehorn	key.pvo_vaddr = sva;
1821235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1822235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
1823235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
1824235689Snwhitehorn		moea_pvo_remove(pvo, -1);
182591456Sbenno	}
1826140538Sgrehan	PMAP_UNLOCK(pm);
1827238159Salc	rw_wunlock(&pvh_global_lock);
182877957Sbenno}
182977957Sbenno
183094838Sbenno/*
1831152180Sgrehan * Remove physical page from all pmaps in which it resides. moea_pvo_remove()
1832110172Sgrehan * will reflect changes in pte's back to the vm_page.
1833110172Sgrehan */
1834110172Sgrehanvoid
1835152180Sgrehanmoea_remove_all(mmu_t mmu, vm_page_t m)
1836110172Sgrehan{
1837110172Sgrehan	struct  pvo_head *pvo_head;
1838110172Sgrehan	struct	pvo_entry *pvo, *next_pvo;
1839134329Salc	pmap_t	pmap;
1840110172Sgrehan
1841238159Salc	rw_wlock(&pvh_global_lock);
1842110172Sgrehan	pvo_head = vm_page_to_pvoh(m);
1843110172Sgrehan	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1844110172Sgrehan		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1845133166Sgrehan
1846134329Salc		pmap = pvo->pvo_pmap;
1847134329Salc		PMAP_LOCK(pmap);
1848152180Sgrehan		moea_pvo_remove(pvo, -1);
1849134329Salc		PMAP_UNLOCK(pmap);
1850110172Sgrehan	}
1851238357Salc	if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
1852208847Snwhitehorn		moea_attr_clear(m, PTE_CHG);
1853204042Snwhitehorn		vm_page_dirty(m);
1854204042Snwhitehorn	}
1855225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1856238159Salc	rw_wunlock(&pvh_global_lock);
1857110172Sgrehan}
1858110172Sgrehan
1859110172Sgrehan/*
186090643Sbenno * Allocate a physical page of memory directly from the phys_avail map.
1861152180Sgrehan * Can only be called from moea_bootstrap before avail start and end are
186290643Sbenno * calculated.
186383682Smp */
186490643Sbennostatic vm_offset_t
1865152180Sgrehanmoea_bootstrap_alloc(vm_size_t size, u_int align)
186683682Smp{
186790643Sbenno	vm_offset_t	s, e;
186890643Sbenno	int		i, j;
186983682Smp
187090643Sbenno	size = round_page(size);
187190643Sbenno	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
187290643Sbenno		if (align != 0)
187390643Sbenno			s = (phys_avail[i] + align - 1) & ~(align - 1);
187490643Sbenno		else
187590643Sbenno			s = phys_avail[i];
187690643Sbenno		e = s + size;
187790643Sbenno
187890643Sbenno		if (s < phys_avail[i] || e > phys_avail[i + 1])
187990643Sbenno			continue;
188090643Sbenno
188190643Sbenno		if (s == phys_avail[i]) {
188290643Sbenno			phys_avail[i] += size;
188390643Sbenno		} else if (e == phys_avail[i + 1]) {
188490643Sbenno			phys_avail[i + 1] -= size;
188590643Sbenno		} else {
188690643Sbenno			for (j = phys_avail_count * 2; j > i; j -= 2) {
188790643Sbenno				phys_avail[j] = phys_avail[j - 2];
188890643Sbenno				phys_avail[j + 1] = phys_avail[j - 1];
188990643Sbenno			}
189090643Sbenno
189190643Sbenno			phys_avail[i + 3] = phys_avail[i + 1];
189290643Sbenno			phys_avail[i + 1] = s;
189390643Sbenno			phys_avail[i + 2] = e;
189490643Sbenno			phys_avail_count++;
189590643Sbenno		}
189690643Sbenno
189790643Sbenno		return (s);
189883682Smp	}
1899152180Sgrehan	panic("moea_bootstrap_alloc: could not allocate memory");
190083682Smp}
190183682Smp
190290643Sbennostatic void
1903152180Sgrehanmoea_syncicache(vm_offset_t pa, vm_size_t len)
190477957Sbenno{
190590643Sbenno	__syncicache((void *)pa, len);
190690643Sbenno}
190777957Sbenno
190890643Sbennostatic int
1909152180Sgrehanmoea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
191090643Sbenno    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
191177957Sbenno{
191290643Sbenno	struct	pvo_entry *pvo;
191390643Sbenno	u_int	sr;
191490643Sbenno	int	first;
191590643Sbenno	u_int	ptegidx;
191690643Sbenno	int	i;
1917103604Sgrehan	int     bootstrap;
191877957Sbenno
1919152180Sgrehan	moea_pvo_enter_calls++;
192096250Sbenno	first = 0;
1921103604Sgrehan	bootstrap = 0;
192290643Sbenno
192390643Sbenno	/*
192490643Sbenno	 * Compute the PTE Group index.
192590643Sbenno	 */
192690643Sbenno	va &= ~ADDR_POFF;
192790643Sbenno	sr = va_to_sr(pm->pm_sr, va);
192890643Sbenno	ptegidx = va_to_pteg(sr, va);
192990643Sbenno
193090643Sbenno	/*
193190643Sbenno	 * Remove any existing mapping for this page.  Reuse the pvo entry if
193290643Sbenno	 * there is a mapping.
193390643Sbenno	 */
1934152180Sgrehan	mtx_lock(&moea_table_mutex);
1935152180Sgrehan	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
193690643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1937183290Snwhitehorn			if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa &&
1938183290Snwhitehorn			    (pvo->pvo_pte.pte.pte_lo & PTE_PP) ==
193996334Sbenno			    (pte_lo & PTE_PP)) {
1940270920Skib				/*
1941270920Skib				 * The PTE is not changing.  Instead, this may
1942270920Skib				 * be a request to change the mapping's wired
1943270920Skib				 * attribute.
1944270920Skib				 */
1945152180Sgrehan				mtx_unlock(&moea_table_mutex);
1946270920Skib				if ((flags & PVO_WIRED) != 0 &&
1947270920Skib				    (pvo->pvo_vaddr & PVO_WIRED) == 0) {
1948270920Skib					pvo->pvo_vaddr |= PVO_WIRED;
1949270920Skib					pm->pm_stats.wired_count++;
1950270920Skib				} else if ((flags & PVO_WIRED) == 0 &&
1951270920Skib				    (pvo->pvo_vaddr & PVO_WIRED) != 0) {
1952270920Skib					pvo->pvo_vaddr &= ~PVO_WIRED;
1953270920Skib					pm->pm_stats.wired_count--;
1954270920Skib				}
195592521Sbenno				return (0);
195696334Sbenno			}
1957152180Sgrehan			moea_pvo_remove(pvo, -1);
195890643Sbenno			break;
195990643Sbenno		}
196090643Sbenno	}
196190643Sbenno
196290643Sbenno	/*
196390643Sbenno	 * If we aren't overwriting a mapping, try to allocate.
196490643Sbenno	 */
1965152180Sgrehan	if (moea_initialized) {
196692847Sjeff		pvo = uma_zalloc(zone, M_NOWAIT);
196792521Sbenno	} else {
1968152180Sgrehan		if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) {
1969152180Sgrehan			panic("moea_enter: bpvo pool exhausted, %d, %d, %d",
1970152180Sgrehan			      moea_bpvo_pool_index, BPVO_POOL_SIZE,
197199037Sbenno			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
197292521Sbenno		}
1973152180Sgrehan		pvo = &moea_bpvo_pool[moea_bpvo_pool_index];
1974152180Sgrehan		moea_bpvo_pool_index++;
1975103604Sgrehan		bootstrap = 1;
197692521Sbenno	}
197790643Sbenno
197890643Sbenno	if (pvo == NULL) {
1979152180Sgrehan		mtx_unlock(&moea_table_mutex);
198090643Sbenno		return (ENOMEM);
198190643Sbenno	}
198290643Sbenno
1983152180Sgrehan	moea_pvo_entries++;
198490643Sbenno	pvo->pvo_vaddr = va;
198590643Sbenno	pvo->pvo_pmap = pm;
1986152180Sgrehan	LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
198790643Sbenno	pvo->pvo_vaddr &= ~ADDR_POFF;
198890643Sbenno	if (flags & PVO_WIRED)
198990643Sbenno		pvo->pvo_vaddr |= PVO_WIRED;
1990152180Sgrehan	if (pvo_head != &moea_pvo_kunmanaged)
199190643Sbenno		pvo->pvo_vaddr |= PVO_MANAGED;
1992103604Sgrehan	if (bootstrap)
1993103604Sgrehan		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1994142416Sgrehan
1995183290Snwhitehorn	moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo);
199690643Sbenno
199790643Sbenno	/*
1998228412Snwhitehorn	 * Add to pmap list
1999228412Snwhitehorn	 */
2000235689Snwhitehorn	RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
2001228412Snwhitehorn
2002228412Snwhitehorn	/*
200390643Sbenno	 * Remember if the list was empty and therefore will be the first
200490643Sbenno	 * item.
200590643Sbenno	 */
200696250Sbenno	if (LIST_FIRST(pvo_head) == NULL)
200796250Sbenno		first = 1;
2008142416Sgrehan	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
200990643Sbenno
2010183290Snwhitehorn	if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED)
2011134453Salc		pm->pm_stats.wired_count++;
2012134453Salc	pm->pm_stats.resident_count++;
201390643Sbenno
2014183290Snwhitehorn	i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
2015253976Sjhibbits	KASSERT(i < 8, ("Invalid PTE index"));
201690643Sbenno	if (i >= 0) {
201790643Sbenno		PVO_PTEGIDX_SET(pvo, i);
201890643Sbenno	} else {
2019152180Sgrehan		panic("moea_pvo_enter: overflow");
2020152180Sgrehan		moea_pte_overflow++;
202190643Sbenno	}
2022152180Sgrehan	mtx_unlock(&moea_table_mutex);
202390643Sbenno
202490643Sbenno	return (first ? ENOENT : 0);
202577957Sbenno}
202677957Sbenno
202790643Sbennostatic void
2028152180Sgrehanmoea_pvo_remove(struct pvo_entry *pvo, int pteidx)
202977957Sbenno{
203090643Sbenno	struct	pte *pt;
203177957Sbenno
203290643Sbenno	/*
203390643Sbenno	 * If there is an active pte entry, we need to deactivate it (and
203490643Sbenno	 * save the ref & cfg bits).
203590643Sbenno	 */
2036152180Sgrehan	pt = moea_pvo_to_pte(pvo, pteidx);
203790643Sbenno	if (pt != NULL) {
2038183290Snwhitehorn		moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
2039159928Salc		mtx_unlock(&moea_table_mutex);
204090643Sbenno		PVO_PTEGIDX_CLR(pvo);
204190643Sbenno	} else {
2042152180Sgrehan		moea_pte_overflow--;
2043142416Sgrehan	}
204490643Sbenno
204590643Sbenno	/*
204690643Sbenno	 * Update our statistics.
204790643Sbenno	 */
204890643Sbenno	pvo->pvo_pmap->pm_stats.resident_count--;
2049183290Snwhitehorn	if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED)
205090643Sbenno		pvo->pvo_pmap->pm_stats.wired_count--;
205190643Sbenno
205290643Sbenno	/*
205390643Sbenno	 * Save the REF/CHG bits into their cache if the page is managed.
205490643Sbenno	 */
2055224746Skib	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
205690643Sbenno		struct	vm_page *pg;
205790643Sbenno
2058183290Snwhitehorn		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
205990643Sbenno		if (pg != NULL) {
2060183290Snwhitehorn			moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo &
206190643Sbenno			    (PTE_REF | PTE_CHG));
206290643Sbenno		}
206390643Sbenno	}
206490643Sbenno
206590643Sbenno	/*
2066228412Snwhitehorn	 * Remove this PVO from the PV and pmap lists.
206790643Sbenno	 */
206890643Sbenno	LIST_REMOVE(pvo, pvo_vlink);
2069235689Snwhitehorn	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
207090643Sbenno
207190643Sbenno	/*
207290643Sbenno	 * Remove this from the overflow list and return it to the pool
207390643Sbenno	 * if we aren't going to reuse it.
207490643Sbenno	 */
207590643Sbenno	LIST_REMOVE(pvo, pvo_olink);
207692521Sbenno	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2077152180Sgrehan		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone :
2078152180Sgrehan		    moea_upvo_zone, pvo);
2079152180Sgrehan	moea_pvo_entries--;
2080152180Sgrehan	moea_pvo_remove_calls++;
208177957Sbenno}
208277957Sbenno
208390643Sbennostatic __inline int
2084152180Sgrehanmoea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
208577957Sbenno{
208690643Sbenno	int	pteidx;
208777957Sbenno
208890643Sbenno	/*
208990643Sbenno	 * We can find the actual pte entry without searching by grabbing
209090643Sbenno	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
209190643Sbenno	 * noticing the HID bit.
209290643Sbenno	 */
209390643Sbenno	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
2094183290Snwhitehorn	if (pvo->pvo_pte.pte.pte_hi & PTE_HID)
2095152180Sgrehan		pteidx ^= moea_pteg_mask * 8;
209690643Sbenno
209790643Sbenno	return (pteidx);
209877957Sbenno}
209977957Sbenno
210090643Sbennostatic struct pvo_entry *
2101152180Sgrehanmoea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
210277957Sbenno{
210390643Sbenno	struct	pvo_entry *pvo;
210490643Sbenno	int	ptegidx;
210590643Sbenno	u_int	sr;
210677957Sbenno
210790643Sbenno	va &= ~ADDR_POFF;
210890643Sbenno	sr = va_to_sr(pm->pm_sr, va);
210990643Sbenno	ptegidx = va_to_pteg(sr, va);
211090643Sbenno
2111152180Sgrehan	mtx_lock(&moea_table_mutex);
2112152180Sgrehan	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
211390643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
211490643Sbenno			if (pteidx_p)
2115152180Sgrehan				*pteidx_p = moea_pvo_pte_index(pvo, ptegidx);
2116134535Salc			break;
211790643Sbenno		}
211890643Sbenno	}
2119152180Sgrehan	mtx_unlock(&moea_table_mutex);
212090643Sbenno
2121134535Salc	return (pvo);
212277957Sbenno}
212377957Sbenno
212490643Sbennostatic struct pte *
2125152180Sgrehanmoea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
212677957Sbenno{
212790643Sbenno	struct	pte *pt;
212877957Sbenno
212990643Sbenno	/*
213090643Sbenno	 * If we haven't been supplied the ptegidx, calculate it.
213190643Sbenno	 */
213290643Sbenno	if (pteidx == -1) {
213390643Sbenno		int	ptegidx;
213490643Sbenno		u_int	sr;
213577957Sbenno
213690643Sbenno		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
213790643Sbenno		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
2138152180Sgrehan		pteidx = moea_pvo_pte_index(pvo, ptegidx);
213990643Sbenno	}
214090643Sbenno
2141152180Sgrehan	pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7];
2142159928Salc	mtx_lock(&moea_table_mutex);
214390643Sbenno
2144183290Snwhitehorn	if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
2145152180Sgrehan		panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no "
214690643Sbenno		    "valid pte index", pvo);
214790643Sbenno	}
214890643Sbenno
2149183290Snwhitehorn	if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
2150152180Sgrehan		panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo "
215190643Sbenno		    "pvo but no valid pte", pvo);
215290643Sbenno	}
215390643Sbenno
2154183290Snwhitehorn	if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
2155183290Snwhitehorn		if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) {
2156152180Sgrehan			panic("moea_pvo_to_pte: pvo %p has valid pte in "
2157152180Sgrehan			    "moea_pteg_table %p but invalid in pvo", pvo, pt);
215877957Sbenno		}
215990643Sbenno
2160183290Snwhitehorn		if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF))
216190643Sbenno		    != 0) {
2162152180Sgrehan			panic("moea_pvo_to_pte: pvo %p pte does not match "
2163152180Sgrehan			    "pte %p in moea_pteg_table", pvo, pt);
216490643Sbenno		}
216590643Sbenno
2166159928Salc		mtx_assert(&moea_table_mutex, MA_OWNED);
216790643Sbenno		return (pt);
216877957Sbenno	}
216977957Sbenno
2170183290Snwhitehorn	if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) {
2171152180Sgrehan		panic("moea_pvo_to_pte: pvo %p has invalid pte %p in "
2172253976Sjhibbits		    "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi);
217390643Sbenno	}
217477957Sbenno
2175159928Salc	mtx_unlock(&moea_table_mutex);
217690643Sbenno	return (NULL);
217777957Sbenno}
217878880Sbenno
217978880Sbenno/*
218090643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c?
218178880Sbenno */
218290643Sbennoint
2183152180Sgrehanmoea_pte_spill(vm_offset_t addr)
218478880Sbenno{
218590643Sbenno	struct	pvo_entry *source_pvo, *victim_pvo;
218690643Sbenno	struct	pvo_entry *pvo;
218790643Sbenno	int	ptegidx, i, j;
218890643Sbenno	u_int	sr;
218990643Sbenno	struct	pteg *pteg;
219090643Sbenno	struct	pte *pt;
219178880Sbenno
2192152180Sgrehan	moea_pte_spills++;
219390643Sbenno
219494836Sbenno	sr = mfsrin(addr);
219590643Sbenno	ptegidx = va_to_pteg(sr, addr);
219690643Sbenno
219778880Sbenno	/*
219890643Sbenno	 * Have to substitute some entry.  Use the primary hash for this.
219990643Sbenno	 * Use low bits of timebase as random generator.
220078880Sbenno	 */
2201152180Sgrehan	pteg = &moea_pteg_table[ptegidx];
2202152180Sgrehan	mtx_lock(&moea_table_mutex);
220390643Sbenno	__asm __volatile("mftb %0" : "=r"(i));
220490643Sbenno	i &= 7;
220590643Sbenno	pt = &pteg->pt[i];
220678880Sbenno
220790643Sbenno	source_pvo = NULL;
220890643Sbenno	victim_pvo = NULL;
2209152180Sgrehan	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
221078880Sbenno		/*
221190643Sbenno		 * We need to find a pvo entry for this address.
221278880Sbenno		 */
221390643Sbenno		if (source_pvo == NULL &&
2214183290Snwhitehorn		    moea_pte_match(&pvo->pvo_pte.pte, sr, addr,
2215183290Snwhitehorn		    pvo->pvo_pte.pte.pte_hi & PTE_HID)) {
221690643Sbenno			/*
221790643Sbenno			 * Now found an entry to be spilled into the pteg.
221890643Sbenno			 * The PTE is now valid, so we know it's active.
221990643Sbenno			 */
2220183290Snwhitehorn			j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
222178880Sbenno
222290643Sbenno			if (j >= 0) {
222390643Sbenno				PVO_PTEGIDX_SET(pvo, j);
2224152180Sgrehan				moea_pte_overflow--;
2225152180Sgrehan				mtx_unlock(&moea_table_mutex);
222690643Sbenno				return (1);
222790643Sbenno			}
222890643Sbenno
222990643Sbenno			source_pvo = pvo;
223090643Sbenno
223190643Sbenno			if (victim_pvo != NULL)
223290643Sbenno				break;
223390643Sbenno		}
223490643Sbenno
223578880Sbenno		/*
223690643Sbenno		 * We also need the pvo entry of the victim we are replacing
223790643Sbenno		 * so save the R & C bits of the PTE.
223878880Sbenno		 */
223990643Sbenno		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
2240183290Snwhitehorn		    moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
224190643Sbenno			victim_pvo = pvo;
224290643Sbenno			if (source_pvo != NULL)
224390643Sbenno				break;
224490643Sbenno		}
224590643Sbenno	}
224678880Sbenno
2247134535Salc	if (source_pvo == NULL) {
2248152180Sgrehan		mtx_unlock(&moea_table_mutex);
224990643Sbenno		return (0);
2250134535Salc	}
225190643Sbenno
225290643Sbenno	if (victim_pvo == NULL) {
225390643Sbenno		if ((pt->pte_hi & PTE_HID) == 0)
2254152180Sgrehan			panic("moea_pte_spill: victim p-pte (%p) has no pvo"
225590643Sbenno			    "entry", pt);
225690643Sbenno
225778880Sbenno		/*
225890643Sbenno		 * If this is a secondary PTE, we need to search it's primary
225990643Sbenno		 * pvo bucket for the matching PVO.
226078880Sbenno		 */
2261152180Sgrehan		LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask],
226290643Sbenno		    pvo_olink) {
226390643Sbenno			/*
226490643Sbenno			 * We also need the pvo entry of the victim we are
226590643Sbenno			 * replacing so save the R & C bits of the PTE.
226690643Sbenno			 */
2267183290Snwhitehorn			if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
226890643Sbenno				victim_pvo = pvo;
226990643Sbenno				break;
227090643Sbenno			}
227190643Sbenno		}
227278880Sbenno
227390643Sbenno		if (victim_pvo == NULL)
2274152180Sgrehan			panic("moea_pte_spill: victim s-pte (%p) has no pvo"
227590643Sbenno			    "entry", pt);
227690643Sbenno	}
227778880Sbenno
227890643Sbenno	/*
227990643Sbenno	 * We are invalidating the TLB entry for the EA we are replacing even
228090643Sbenno	 * though it's valid.  If we don't, we lose any ref/chg bit changes
228190643Sbenno	 * contained in the TLB entry.
228290643Sbenno	 */
2283183290Snwhitehorn	source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID;
228478880Sbenno
2285183290Snwhitehorn	moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
2286183290Snwhitehorn	moea_pte_set(pt, &source_pvo->pvo_pte.pte);
228790643Sbenno
228890643Sbenno	PVO_PTEGIDX_CLR(victim_pvo);
228990643Sbenno	PVO_PTEGIDX_SET(source_pvo, i);
2290152180Sgrehan	moea_pte_replacements++;
229190643Sbenno
2292152180Sgrehan	mtx_unlock(&moea_table_mutex);
229390643Sbenno	return (1);
229490643Sbenno}
229590643Sbenno
2296253976Sjhibbitsstatic __inline struct pvo_entry *
2297253976Sjhibbitsmoea_pte_spillable_ident(u_int ptegidx)
2298253976Sjhibbits{
2299253976Sjhibbits	struct	pte *pt;
2300253976Sjhibbits	struct	pvo_entry *pvo_walk, *pvo = NULL;
2301253976Sjhibbits
2302253976Sjhibbits	LIST_FOREACH(pvo_walk, &moea_pvo_table[ptegidx], pvo_olink) {
2303253976Sjhibbits		if (pvo_walk->pvo_vaddr & PVO_WIRED)
2304253976Sjhibbits			continue;
2305253976Sjhibbits
2306253976Sjhibbits		if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID))
2307253976Sjhibbits			continue;
2308253976Sjhibbits
2309253976Sjhibbits		pt = moea_pvo_to_pte(pvo_walk, -1);
2310253976Sjhibbits
2311253976Sjhibbits		if (pt == NULL)
2312253976Sjhibbits			continue;
2313253976Sjhibbits
2314253976Sjhibbits		pvo = pvo_walk;
2315253976Sjhibbits
2316253976Sjhibbits		mtx_unlock(&moea_table_mutex);
2317253976Sjhibbits		if (!(pt->pte_lo & PTE_REF))
2318253976Sjhibbits			return (pvo_walk);
2319253976Sjhibbits	}
2320253976Sjhibbits
2321253976Sjhibbits	return (pvo);
2322253976Sjhibbits}
2323253976Sjhibbits
232490643Sbennostatic int
2325152180Sgrehanmoea_pte_insert(u_int ptegidx, struct pte *pvo_pt)
232690643Sbenno{
232790643Sbenno	struct	pte *pt;
2328253976Sjhibbits	struct	pvo_entry *victim_pvo;
232990643Sbenno	int	i;
2330253976Sjhibbits	int	victim_idx;
2331253976Sjhibbits	u_int	pteg_bkpidx = ptegidx;
233290643Sbenno
2333159928Salc	mtx_assert(&moea_table_mutex, MA_OWNED);
2334159928Salc
233590643Sbenno	/*
233690643Sbenno	 * First try primary hash.
233790643Sbenno	 */
2338152180Sgrehan	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
233990643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
234090643Sbenno			pvo_pt->pte_hi &= ~PTE_HID;
2341152180Sgrehan			moea_pte_set(pt, pvo_pt);
234290643Sbenno			return (i);
234378880Sbenno		}
234490643Sbenno	}
234578880Sbenno
234690643Sbenno	/*
234790643Sbenno	 * Now try secondary hash.
234890643Sbenno	 */
2349152180Sgrehan	ptegidx ^= moea_pteg_mask;
2350165362Sgrehan
2351152180Sgrehan	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
235290643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
235390643Sbenno			pvo_pt->pte_hi |= PTE_HID;
2354152180Sgrehan			moea_pte_set(pt, pvo_pt);
235590643Sbenno			return (i);
235690643Sbenno		}
235790643Sbenno	}
235878880Sbenno
2359253976Sjhibbits	/* Try again, but this time try to force a PTE out. */
2360253976Sjhibbits	ptegidx = pteg_bkpidx;
2361253976Sjhibbits
2362253976Sjhibbits	victim_pvo = moea_pte_spillable_ident(ptegidx);
2363253976Sjhibbits	if (victim_pvo == NULL) {
2364253976Sjhibbits		ptegidx ^= moea_pteg_mask;
2365253976Sjhibbits		victim_pvo = moea_pte_spillable_ident(ptegidx);
2366253976Sjhibbits	}
2367253976Sjhibbits
2368253976Sjhibbits	if (victim_pvo == NULL) {
2369253976Sjhibbits		panic("moea_pte_insert: overflow");
2370253976Sjhibbits		return (-1);
2371253976Sjhibbits	}
2372253976Sjhibbits
2373253976Sjhibbits	victim_idx = moea_pvo_pte_index(victim_pvo, ptegidx);
2374253976Sjhibbits
2375253976Sjhibbits	if (pteg_bkpidx == ptegidx)
2376253976Sjhibbits		pvo_pt->pte_hi &= ~PTE_HID;
2377253976Sjhibbits	else
2378253976Sjhibbits		pvo_pt->pte_hi |= PTE_HID;
2379253976Sjhibbits
2380253976Sjhibbits	/*
2381253976Sjhibbits	 * Synchronize the sacrifice PTE with its PVO, then mark both
2382253976Sjhibbits	 * invalid. The PVO will be reused when/if the VM system comes
2383253976Sjhibbits	 * here after a fault.
2384253976Sjhibbits	 */
2385253976Sjhibbits	pt = &moea_pteg_table[victim_idx >> 3].pt[victim_idx & 7];
2386253976Sjhibbits
2387253976Sjhibbits	if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi)
2388253976Sjhibbits	    panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_hi);
2389253976Sjhibbits
2390253976Sjhibbits	/*
2391253976Sjhibbits	 * Set the new PTE.
2392253976Sjhibbits	 */
2393253976Sjhibbits	moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
2394253976Sjhibbits	PVO_PTEGIDX_CLR(victim_pvo);
2395253976Sjhibbits	moea_pte_overflow++;
2396253976Sjhibbits	moea_pte_set(pt, pvo_pt);
2397253976Sjhibbits
2398253976Sjhibbits	return (victim_idx & 7);
239978880Sbenno}
240084921Sbenno
240190643Sbennostatic boolean_t
2402152180Sgrehanmoea_query_bit(vm_page_t m, int ptebit)
240384921Sbenno{
240490643Sbenno	struct	pvo_entry *pvo;
240590643Sbenno	struct	pte *pt;
240684921Sbenno
2407238357Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2408152180Sgrehan	if (moea_attr_fetch(m) & ptebit)
240990643Sbenno		return (TRUE);
241084921Sbenno
241190643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
241284921Sbenno
241390643Sbenno		/*
241490643Sbenno		 * See if we saved the bit off.  If so, cache it and return
241590643Sbenno		 * success.
241690643Sbenno		 */
2417183290Snwhitehorn		if (pvo->pvo_pte.pte.pte_lo & ptebit) {
2418152180Sgrehan			moea_attr_save(m, ptebit);
241990643Sbenno			return (TRUE);
242090643Sbenno		}
242190643Sbenno	}
242284921Sbenno
242390643Sbenno	/*
242490643Sbenno	 * No luck, now go through the hard part of looking at the PTEs
242590643Sbenno	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
242690643Sbenno	 * the PTEs.
242790643Sbenno	 */
2428183094Smarcel	powerpc_sync();
242990643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
243090643Sbenno
243190643Sbenno		/*
243290643Sbenno		 * See if this pvo has a valid PTE.  if so, fetch the
243390643Sbenno		 * REF/CHG bits from the valid PTE.  If the appropriate
243490643Sbenno		 * ptebit is set, cache it and return success.
243590643Sbenno		 */
2436152180Sgrehan		pt = moea_pvo_to_pte(pvo, -1);
243790643Sbenno		if (pt != NULL) {
2438183290Snwhitehorn			moea_pte_synch(pt, &pvo->pvo_pte.pte);
2439159928Salc			mtx_unlock(&moea_table_mutex);
2440183290Snwhitehorn			if (pvo->pvo_pte.pte.pte_lo & ptebit) {
2441152180Sgrehan				moea_attr_save(m, ptebit);
244290643Sbenno				return (TRUE);
244390643Sbenno			}
244490643Sbenno		}
244584921Sbenno	}
244684921Sbenno
2447123354Sgallatin	return (FALSE);
244884921Sbenno}
244990643Sbenno
2450110172Sgrehanstatic u_int
2451208990Salcmoea_clear_bit(vm_page_t m, int ptebit)
245290643Sbenno{
2453110172Sgrehan	u_int	count;
245490643Sbenno	struct	pvo_entry *pvo;
245590643Sbenno	struct	pte *pt;
245690643Sbenno
2457238357Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2458208990Salc
245990643Sbenno	/*
246090643Sbenno	 * Clear the cached value.
246190643Sbenno	 */
2462152180Sgrehan	moea_attr_clear(m, ptebit);
246390643Sbenno
246490643Sbenno	/*
246590643Sbenno	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
246690643Sbenno	 * we can reset the right ones).  note that since the pvo entries and
246790643Sbenno	 * list heads are accessed via BAT0 and are never placed in the page
246890643Sbenno	 * table, we don't have to worry about further accesses setting the
246990643Sbenno	 * REF/CHG bits.
247090643Sbenno	 */
2471183094Smarcel	powerpc_sync();
247290643Sbenno
247390643Sbenno	/*
247490643Sbenno	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
247590643Sbenno	 * valid pte clear the ptebit from the valid pte.
247690643Sbenno	 */
2477110172Sgrehan	count = 0;
247890643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2479152180Sgrehan		pt = moea_pvo_to_pte(pvo, -1);
248090643Sbenno		if (pt != NULL) {
2481183290Snwhitehorn			moea_pte_synch(pt, &pvo->pvo_pte.pte);
2482183290Snwhitehorn			if (pvo->pvo_pte.pte.pte_lo & ptebit) {
2483110172Sgrehan				count++;
2484152180Sgrehan				moea_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2485110172Sgrehan			}
2486159928Salc			mtx_unlock(&moea_table_mutex);
248790643Sbenno		}
2488183290Snwhitehorn		pvo->pvo_pte.pte.pte_lo &= ~ptebit;
248990643Sbenno	}
249090643Sbenno
2491110172Sgrehan	return (count);
249290643Sbenno}
249399038Sbenno
249499038Sbenno/*
2495103604Sgrehan * Return true if the physical range is encompassed by the battable[idx]
2496103604Sgrehan */
2497103604Sgrehanstatic int
2498152180Sgrehanmoea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2499103604Sgrehan{
2500103604Sgrehan	u_int prot;
2501103604Sgrehan	u_int32_t start;
2502103604Sgrehan	u_int32_t end;
2503103604Sgrehan	u_int32_t bat_ble;
2504103604Sgrehan
2505103604Sgrehan	/*
2506103604Sgrehan	 * Return immediately if not a valid mapping
2507103604Sgrehan	 */
2508214601Snwhitehorn	if (!(battable[idx].batu & BAT_Vs))
2509103604Sgrehan		return (EINVAL);
2510103604Sgrehan
2511103604Sgrehan	/*
2512103604Sgrehan	 * The BAT entry must be cache-inhibited, guarded, and r/w
2513103604Sgrehan	 * so it can function as an i/o page
2514103604Sgrehan	 */
2515103604Sgrehan	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2516103604Sgrehan	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2517103604Sgrehan		return (EPERM);
2518103604Sgrehan
2519103604Sgrehan	/*
2520103604Sgrehan	 * The address should be within the BAT range. Assume that the
2521103604Sgrehan	 * start address in the BAT has the correct alignment (thus
2522103604Sgrehan	 * not requiring masking)
2523103604Sgrehan	 */
2524103604Sgrehan	start = battable[idx].batl & BAT_PBS;
2525103604Sgrehan	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2526103604Sgrehan	end = start | (bat_ble << 15) | 0x7fff;
2527103604Sgrehan
2528103604Sgrehan	if ((pa < start) || ((pa + size) > end))
2529103604Sgrehan		return (ERANGE);
2530103604Sgrehan
2531103604Sgrehan	return (0);
2532103604Sgrehan}
2533103604Sgrehan
2534152180Sgrehanboolean_t
2535235936Srajmoea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2536133855Sssouhlal{
2537133855Sssouhlal	int i;
2538103604Sgrehan
2539133855Sssouhlal	/*
2540133855Sssouhlal	 * This currently does not work for entries that
2541133855Sssouhlal	 * overlap 256M BAT segments.
2542133855Sssouhlal	 */
2543133855Sssouhlal
2544133855Sssouhlal	for(i = 0; i < 16; i++)
2545152180Sgrehan		if (moea_bat_mapped(i, pa, size) == 0)
2546133855Sssouhlal			return (0);
2547133855Sssouhlal
2548133855Sssouhlal	return (EFAULT);
2549133855Sssouhlal}
2550133855Sssouhlal
2551103604Sgrehan/*
255299038Sbenno * Map a set of physical memory pages into the kernel virtual
255399038Sbenno * address space. Return a pointer to where it is mapped. This
255499038Sbenno * routine is intended to be used for mapping device memory,
255599038Sbenno * NOT real memory.
255699038Sbenno */
255799038Sbennovoid *
2558235936Srajmoea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
255999038Sbenno{
2560213307Snwhitehorn
2561213307Snwhitehorn	return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
2562213307Snwhitehorn}
2563213307Snwhitehorn
2564213307Snwhitehornvoid *
2565213307Snwhitehornmoea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2566213307Snwhitehorn{
2567103604Sgrehan	vm_offset_t va, tmpva, ppa, offset;
2568103604Sgrehan	int i;
2569103604Sgrehan
2570103604Sgrehan	ppa = trunc_page(pa);
257199038Sbenno	offset = pa & PAGE_MASK;
257299038Sbenno	size = roundup(offset + size, PAGE_SIZE);
257399038Sbenno
2574103604Sgrehan	/*
2575103604Sgrehan	 * If the physical address lies within a valid BAT table entry,
2576103604Sgrehan	 * return the 1:1 mapping. This currently doesn't work
2577103604Sgrehan	 * for regions that overlap 256M BAT segments.
2578103604Sgrehan	 */
2579103604Sgrehan	for (i = 0; i < 16; i++) {
2580152180Sgrehan		if (moea_bat_mapped(i, pa, size) == 0)
2581103604Sgrehan			return ((void *) pa);
2582103604Sgrehan	}
2583103604Sgrehan
2584254025Sjeff	va = kva_alloc(size);
258599038Sbenno	if (!va)
2586152180Sgrehan		panic("moea_mapdev: Couldn't alloc kernel virtual memory");
258799038Sbenno
258899038Sbenno	for (tmpva = va; size > 0;) {
2589213307Snwhitehorn		moea_kenter_attr(mmu, tmpva, ppa, ma);
2590183094Smarcel		tlbie(tmpva);
259199038Sbenno		size -= PAGE_SIZE;
259299038Sbenno		tmpva += PAGE_SIZE;
2593103604Sgrehan		ppa += PAGE_SIZE;
259499038Sbenno	}
259599038Sbenno
259699038Sbenno	return ((void *)(va + offset));
259799038Sbenno}
259899038Sbenno
259999038Sbennovoid
2600152180Sgrehanmoea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
260199038Sbenno{
260299038Sbenno	vm_offset_t base, offset;
260399038Sbenno
2604103604Sgrehan	/*
2605103604Sgrehan	 * If this is outside kernel virtual space, then it's a
2606103604Sgrehan	 * battable entry and doesn't require unmapping
2607103604Sgrehan	 */
2608204128Snwhitehorn	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) {
2609103604Sgrehan		base = trunc_page(va);
2610103604Sgrehan		offset = va & PAGE_MASK;
2611103604Sgrehan		size = roundup(offset + size, PAGE_SIZE);
2612254025Sjeff		kva_free(base, size);
2613103604Sgrehan	}
261499038Sbenno}
2615198341Smarcel
2616198341Smarcelstatic void
2617198341Smarcelmoea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2618198341Smarcel{
2619198341Smarcel	struct pvo_entry *pvo;
2620198341Smarcel	vm_offset_t lim;
2621198341Smarcel	vm_paddr_t pa;
2622198341Smarcel	vm_size_t len;
2623198341Smarcel
2624198341Smarcel	PMAP_LOCK(pm);
2625198341Smarcel	while (sz > 0) {
2626198341Smarcel		lim = round_page(va);
2627198341Smarcel		len = MIN(lim - va, sz);
2628198341Smarcel		pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2629198341Smarcel		if (pvo != NULL) {
2630198341Smarcel			pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
2631198341Smarcel			    (va & ADDR_POFF);
2632198341Smarcel			moea_syncicache(pa, len);
2633198341Smarcel		}
2634198341Smarcel		va += len;
2635198341Smarcel		sz -= len;
2636198341Smarcel	}
2637198341Smarcel	PMAP_UNLOCK(pm);
2638198341Smarcel}
2639249864Sjhibbits
2640249864Sjhibbitsvm_offset_t
2641249864Sjhibbitsmoea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2642249864Sjhibbits    vm_size_t *sz)
2643249864Sjhibbits{
2644249864Sjhibbits	if (md->md_vaddr == ~0UL)
2645249864Sjhibbits	    return (md->md_paddr + ofs);
2646249864Sjhibbits	else
2647249864Sjhibbits	    return (md->md_vaddr + ofs);
2648249864Sjhibbits}
2649249864Sjhibbits
2650249864Sjhibbitsstruct pmap_md *
2651249864Sjhibbitsmoea_scan_md(mmu_t mmu, struct pmap_md *prev)
2652249864Sjhibbits{
2653249864Sjhibbits	static struct pmap_md md;
2654249864Sjhibbits	struct pvo_entry *pvo;
2655249864Sjhibbits	vm_offset_t va;
2656249864Sjhibbits
2657249864Sjhibbits	if (dumpsys_minidump) {
2658249864Sjhibbits		md.md_paddr = ~0UL;	/* Minidumps use virtual addresses. */
2659249864Sjhibbits		if (prev == NULL) {
2660249864Sjhibbits			/* 1st: kernel .data and .bss. */
2661249864Sjhibbits			md.md_index = 1;
2662249864Sjhibbits			md.md_vaddr = trunc_page((uintptr_t)_etext);
2663249864Sjhibbits			md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2664249864Sjhibbits			return (&md);
2665249864Sjhibbits		}
2666249864Sjhibbits		switch (prev->md_index) {
2667249864Sjhibbits		case 1:
2668249864Sjhibbits			/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2669249864Sjhibbits			md.md_index = 2;
2670249864Sjhibbits			md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr;
2671249864Sjhibbits			md.md_size = round_page(msgbufp->msg_size);
2672249864Sjhibbits			break;
2673249864Sjhibbits		case 2:
2674249864Sjhibbits			/* 3rd: kernel VM. */
2675249864Sjhibbits			va = prev->md_vaddr + prev->md_size;
2676249864Sjhibbits			/* Find start of next chunk (from va). */
2677249864Sjhibbits			while (va < virtual_end) {
2678249864Sjhibbits				/* Don't dump the buffer cache. */
2679249864Sjhibbits				if (va >= kmi.buffer_sva &&
2680249864Sjhibbits				    va < kmi.buffer_eva) {
2681249864Sjhibbits					va = kmi.buffer_eva;
2682249864Sjhibbits					continue;
2683249864Sjhibbits				}
2684249864Sjhibbits				pvo = moea_pvo_find_va(kernel_pmap,
2685249864Sjhibbits				    va & ~ADDR_POFF, NULL);
2686249864Sjhibbits				if (pvo != NULL &&
2687249864Sjhibbits				    (pvo->pvo_pte.pte.pte_hi & PTE_VALID))
2688249864Sjhibbits					break;
2689249864Sjhibbits				va += PAGE_SIZE;
2690249864Sjhibbits			}
2691249864Sjhibbits			if (va < virtual_end) {
2692249864Sjhibbits				md.md_vaddr = va;
2693249864Sjhibbits				va += PAGE_SIZE;
2694249864Sjhibbits				/* Find last page in chunk. */
2695249864Sjhibbits				while (va < virtual_end) {
2696249864Sjhibbits					/* Don't run into the buffer cache. */
2697249864Sjhibbits					if (va == kmi.buffer_sva)
2698249864Sjhibbits						break;
2699249864Sjhibbits					pvo = moea_pvo_find_va(kernel_pmap,
2700249864Sjhibbits					    va & ~ADDR_POFF, NULL);
2701249864Sjhibbits					if (pvo == NULL ||
2702249864Sjhibbits					    !(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
2703249864Sjhibbits						break;
2704249864Sjhibbits					va += PAGE_SIZE;
2705249864Sjhibbits				}
2706249864Sjhibbits				md.md_size = va - md.md_vaddr;
2707249864Sjhibbits				break;
2708249864Sjhibbits			}
2709249864Sjhibbits			md.md_index = 3;
2710249864Sjhibbits			/* FALLTHROUGH */
2711249864Sjhibbits		default:
2712249864Sjhibbits			return (NULL);
2713249864Sjhibbits		}
2714249864Sjhibbits	} else { /* minidumps */
2715249864Sjhibbits		mem_regions(&pregions, &pregions_sz,
2716249864Sjhibbits		    &regions, &regions_sz);
2717249864Sjhibbits
2718249864Sjhibbits		if (prev == NULL) {
2719249864Sjhibbits			/* first physical chunk. */
2720249864Sjhibbits			md.md_paddr = pregions[0].mr_start;
2721249864Sjhibbits			md.md_size = pregions[0].mr_size;
2722249864Sjhibbits			md.md_vaddr = ~0UL;
2723249864Sjhibbits			md.md_index = 1;
2724249864Sjhibbits		} else if (md.md_index < pregions_sz) {
2725249864Sjhibbits			md.md_paddr = pregions[md.md_index].mr_start;
2726249864Sjhibbits			md.md_size = pregions[md.md_index].mr_size;
2727249864Sjhibbits			md.md_vaddr = ~0UL;
2728249864Sjhibbits			md.md_index++;
2729249864Sjhibbits		} else {
2730249864Sjhibbits			/* There's no next physical chunk. */
2731249864Sjhibbits			return (NULL);
2732249864Sjhibbits		}
2733249864Sjhibbits	}
2734249864Sjhibbits
2735249864Sjhibbits	return (&md);
2736249864Sjhibbits}
2737