pmap.c revision 265954
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Some hw specific parts of this pmap were derived or influenced
27 * by NetBSD's ibm4xx pmap module. More generic code is shared with
28 * a few other pmap modules from the FreeBSD tree.
29 */
30
31 /*
32  * VM layout notes:
33  *
34  * Kernel and user threads run within one common virtual address space
35  * defined by AS=0.
36  *
37  * Virtual address space layout:
38  * -----------------------------
39  * 0x0000_0000 - 0xafff_ffff	: user process
40  * 0xb000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
41  * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
42  *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
43  * 0xc100_0000 - 0xfeef_ffff	: KVA
44  *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45  *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46  *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
47  *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
48  * 0xfef0_0000 - 0xffff_ffff	: I/O devices region
49  */
50
51#include <sys/cdefs.h>
52__FBSDID("$FreeBSD: stable/10/sys/powerpc/booke/pmap.c 265954 2014-05-13 16:59:50Z ian $");
53
54#include <sys/param.h>
55#include <sys/malloc.h>
56#include <sys/ktr.h>
57#include <sys/proc.h>
58#include <sys/user.h>
59#include <sys/queue.h>
60#include <sys/systm.h>
61#include <sys/kernel.h>
62#include <sys/linker.h>
63#include <sys/msgbuf.h>
64#include <sys/lock.h>
65#include <sys/mutex.h>
66#include <sys/rwlock.h>
67#include <sys/sched.h>
68#include <sys/smp.h>
69#include <sys/vmmeter.h>
70
71#include <vm/vm.h>
72#include <vm/vm_page.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_pageout.h>
75#include <vm/vm_extern.h>
76#include <vm/vm_object.h>
77#include <vm/vm_param.h>
78#include <vm/vm_map.h>
79#include <vm/vm_pager.h>
80#include <vm/uma.h>
81
82#include <machine/cpu.h>
83#include <machine/pcb.h>
84#include <machine/platform.h>
85
86#include <machine/tlb.h>
87#include <machine/spr.h>
88#include <machine/md_var.h>
89#include <machine/mmuvar.h>
90#include <machine/pmap.h>
91#include <machine/pte.h>
92
93#include "mmu_if.h"
94
95#ifdef  DEBUG
96#define debugf(fmt, args...) printf(fmt, ##args)
97#else
98#define debugf(fmt, args...)
99#endif
100
101#define TODO			panic("%s: not implemented", __func__);
102
103extern struct mtx sched_lock;
104
105extern int dumpsys_minidump;
106
107extern unsigned char _etext[];
108extern unsigned char _end[];
109
110extern uint32_t *bootinfo;
111
112#ifdef SMP
113extern uint32_t bp_ntlb1s;
114#endif
115
116vm_paddr_t ccsrbar_pa;
117vm_paddr_t kernload;
118vm_offset_t kernstart;
119vm_size_t kernsize;
120
121/* Message buffer and tables. */
122static vm_offset_t data_start;
123static vm_size_t data_end;
124
125/* Phys/avail memory regions. */
126static struct mem_region *availmem_regions;
127static int availmem_regions_sz;
128static struct mem_region *physmem_regions;
129static int physmem_regions_sz;
130
131/* Reserved KVA space and mutex for mmu_booke_zero_page. */
132static vm_offset_t zero_page_va;
133static struct mtx zero_page_mutex;
134
135static struct mtx tlbivax_mutex;
136
137/*
138 * Reserved KVA space for mmu_booke_zero_page_idle. This is used
139 * by idle thred only, no lock required.
140 */
141static vm_offset_t zero_page_idle_va;
142
143/* Reserved KVA space and mutex for mmu_booke_copy_page. */
144static vm_offset_t copy_page_src_va;
145static vm_offset_t copy_page_dst_va;
146static struct mtx copy_page_mutex;
147
148/**************************************************************************/
149/* PMAP */
150/**************************************************************************/
151
152static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
153    vm_prot_t, boolean_t);
154
155unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
156unsigned int kernel_ptbls;	/* Number of KVA ptbls. */
157
158/*
159 * If user pmap is processed with mmu_booke_remove and the resident count
160 * drops to 0, there are no more pages to remove, so we need not continue.
161 */
162#define PMAP_REMOVE_DONE(pmap) \
163	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
164
165extern void tid_flush(tlbtid_t);
166
167/**************************************************************************/
168/* TLB and TID handling */
169/**************************************************************************/
170
171/* Translation ID busy table */
172static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
173
174/*
175 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
176 * core revisions and should be read from h/w registers during early config.
177 */
178uint32_t tlb0_entries;
179uint32_t tlb0_ways;
180uint32_t tlb0_entries_per_way;
181
182#define TLB0_ENTRIES		(tlb0_entries)
183#define TLB0_WAYS		(tlb0_ways)
184#define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
185
186#define TLB1_ENTRIES 16
187
188/* In-ram copy of the TLB1 */
189static tlb_entry_t tlb1[TLB1_ENTRIES];
190
191/* Next free entry in the TLB1 */
192static unsigned int tlb1_idx;
193
194static tlbtid_t tid_alloc(struct pmap *);
195
196static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
197
198static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
199static void tlb1_write_entry(unsigned int);
200static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
201static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
202
203static vm_size_t tsize2size(unsigned int);
204static unsigned int size2tsize(vm_size_t);
205static unsigned int ilog2(unsigned int);
206
207static void set_mas4_defaults(void);
208
209static inline void tlb0_flush_entry(vm_offset_t);
210static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
211
212/**************************************************************************/
213/* Page table management */
214/**************************************************************************/
215
216static struct rwlock_padalign pvh_global_lock;
217
218/* Data for the pv entry allocation mechanism */
219static uma_zone_t pvzone;
220static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
221
222#define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
223
224#ifndef PMAP_SHPGPERPROC
225#define PMAP_SHPGPERPROC	200
226#endif
227
228static void ptbl_init(void);
229static struct ptbl_buf *ptbl_buf_alloc(void);
230static void ptbl_buf_free(struct ptbl_buf *);
231static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
232
233static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
234static void ptbl_free(mmu_t, pmap_t, unsigned int);
235static void ptbl_hold(mmu_t, pmap_t, unsigned int);
236static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
237
238static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
239static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
240static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
241static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
242
243static pv_entry_t pv_alloc(void);
244static void pv_free(pv_entry_t);
245static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
246static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
247
248/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
249#define PTBL_BUFS		(128 * 16)
250
251struct ptbl_buf {
252	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
253	vm_offset_t kva;		/* va of mapping */
254};
255
256/* ptbl free list and a lock used for access synchronization. */
257static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
258static struct mtx ptbl_buf_freelist_lock;
259
260/* Base address of kva space allocated fot ptbl bufs. */
261static vm_offset_t ptbl_buf_pool_vabase;
262
263/* Pointer to ptbl_buf structures. */
264static struct ptbl_buf *ptbl_bufs;
265
266void pmap_bootstrap_ap(volatile uint32_t *);
267
268/*
269 * Kernel MMU interface
270 */
271static void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
272static void		mmu_booke_clear_modify(mmu_t, vm_page_t);
273static void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
274    vm_size_t, vm_offset_t);
275static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
276static void		mmu_booke_copy_pages(mmu_t, vm_page_t *,
277    vm_offset_t, vm_page_t *, vm_offset_t, int);
278static void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
279    vm_prot_t, boolean_t);
280static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
281    vm_page_t, vm_prot_t);
282static void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
283    vm_prot_t);
284static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
285static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
286    vm_prot_t);
287static void		mmu_booke_init(mmu_t);
288static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
289static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
290static boolean_t	mmu_booke_is_referenced(mmu_t, vm_page_t);
291static int		mmu_booke_ts_referenced(mmu_t, vm_page_t);
292static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
293    int);
294static int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
295    vm_paddr_t *);
296static void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
297    vm_object_t, vm_pindex_t, vm_size_t);
298static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
299static void		mmu_booke_page_init(mmu_t, vm_page_t);
300static int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
301static void		mmu_booke_pinit(mmu_t, pmap_t);
302static void		mmu_booke_pinit0(mmu_t, pmap_t);
303static void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
304    vm_prot_t);
305static void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
306static void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
307static void		mmu_booke_release(mmu_t, pmap_t);
308static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
309static void		mmu_booke_remove_all(mmu_t, vm_page_t);
310static void		mmu_booke_remove_write(mmu_t, vm_page_t);
311static void		mmu_booke_zero_page(mmu_t, vm_page_t);
312static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
313static void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
314static void		mmu_booke_activate(mmu_t, struct thread *);
315static void		mmu_booke_deactivate(mmu_t, struct thread *);
316static void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
317static void		*mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
318static void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
319static vm_paddr_t	mmu_booke_kextract(mmu_t, vm_offset_t);
320static void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
321static void		mmu_booke_kremove(mmu_t, vm_offset_t);
322static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
323static void		mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
324    vm_size_t);
325static vm_offset_t	mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
326    vm_size_t, vm_size_t *);
327static void		mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
328    vm_size_t, vm_offset_t);
329static struct pmap_md	*mmu_booke_scan_md(mmu_t, struct pmap_md *);
330
331static mmu_method_t mmu_booke_methods[] = {
332	/* pmap dispatcher interface */
333	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
334	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
335	MMUMETHOD(mmu_copy,		mmu_booke_copy),
336	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
337	MMUMETHOD(mmu_copy_pages,	mmu_booke_copy_pages),
338	MMUMETHOD(mmu_enter,		mmu_booke_enter),
339	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
340	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
341	MMUMETHOD(mmu_extract,		mmu_booke_extract),
342	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
343	MMUMETHOD(mmu_init,		mmu_booke_init),
344	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
345	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
346	MMUMETHOD(mmu_is_referenced,	mmu_booke_is_referenced),
347	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
348	MMUMETHOD(mmu_map,		mmu_booke_map),
349	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
350	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
351	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
352	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
353	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
354	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
355	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
356	MMUMETHOD(mmu_protect,		mmu_booke_protect),
357	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
358	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
359	MMUMETHOD(mmu_release,		mmu_booke_release),
360	MMUMETHOD(mmu_remove,		mmu_booke_remove),
361	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
362	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
363	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
364	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
365	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
366	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
367	MMUMETHOD(mmu_activate,		mmu_booke_activate),
368	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
369
370	/* Internal interfaces */
371	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
372	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
373	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
374	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
375	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
376/*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
377	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
378
379	/* dumpsys() support */
380	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
381	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
382	MMUMETHOD(mmu_scan_md,		mmu_booke_scan_md),
383
384	{ 0, 0 }
385};
386
387MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
388
389static inline void
390tlb_miss_lock(void)
391{
392#ifdef SMP
393	struct pcpu *pc;
394
395	if (!smp_started)
396		return;
397
398	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
399		if (pc != pcpup) {
400
401			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
402			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
403
404			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
405			    ("tlb_miss_lock: tried to lock self"));
406
407			tlb_lock(pc->pc_booke_tlb_lock);
408
409			CTR1(KTR_PMAP, "%s: locked", __func__);
410		}
411	}
412#endif
413}
414
415static inline void
416tlb_miss_unlock(void)
417{
418#ifdef SMP
419	struct pcpu *pc;
420
421	if (!smp_started)
422		return;
423
424	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
425		if (pc != pcpup) {
426			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
427			    __func__, pc->pc_cpuid);
428
429			tlb_unlock(pc->pc_booke_tlb_lock);
430
431			CTR1(KTR_PMAP, "%s: unlocked", __func__);
432		}
433	}
434#endif
435}
436
437/* Return number of entries in TLB0. */
438static __inline void
439tlb0_get_tlbconf(void)
440{
441	uint32_t tlb0_cfg;
442
443	tlb0_cfg = mfspr(SPR_TLB0CFG);
444	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
445	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
446	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
447}
448
449/* Initialize pool of kva ptbl buffers. */
450static void
451ptbl_init(void)
452{
453	int i;
454
455	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
456	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
457	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
458	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
459
460	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
461	TAILQ_INIT(&ptbl_buf_freelist);
462
463	for (i = 0; i < PTBL_BUFS; i++) {
464		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
465		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
466	}
467}
468
469/* Get a ptbl_buf from the freelist. */
470static struct ptbl_buf *
471ptbl_buf_alloc(void)
472{
473	struct ptbl_buf *buf;
474
475	mtx_lock(&ptbl_buf_freelist_lock);
476	buf = TAILQ_FIRST(&ptbl_buf_freelist);
477	if (buf != NULL)
478		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
479	mtx_unlock(&ptbl_buf_freelist_lock);
480
481	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
482
483	return (buf);
484}
485
486/* Return ptbl buff to free pool. */
487static void
488ptbl_buf_free(struct ptbl_buf *buf)
489{
490
491	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
492
493	mtx_lock(&ptbl_buf_freelist_lock);
494	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
495	mtx_unlock(&ptbl_buf_freelist_lock);
496}
497
498/*
499 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
500 */
501static void
502ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
503{
504	struct ptbl_buf *pbuf;
505
506	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
507
508	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
509
510	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
511		if (pbuf->kva == (vm_offset_t)ptbl) {
512			/* Remove from pmap ptbl buf list. */
513			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
514
515			/* Free corresponding ptbl buf. */
516			ptbl_buf_free(pbuf);
517			break;
518		}
519}
520
521/* Allocate page table. */
522static pte_t *
523ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
524{
525	vm_page_t mtbl[PTBL_PAGES];
526	vm_page_t m;
527	struct ptbl_buf *pbuf;
528	unsigned int pidx;
529	pte_t *ptbl;
530	int i;
531
532	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
533	    (pmap == kernel_pmap), pdir_idx);
534
535	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
536	    ("ptbl_alloc: invalid pdir_idx"));
537	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
538	    ("pte_alloc: valid ptbl entry exists!"));
539
540	pbuf = ptbl_buf_alloc();
541	if (pbuf == NULL)
542		panic("pte_alloc: couldn't alloc kernel virtual memory");
543
544	ptbl = (pte_t *)pbuf->kva;
545
546	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
547
548	/* Allocate ptbl pages, this will sleep! */
549	for (i = 0; i < PTBL_PAGES; i++) {
550		pidx = (PTBL_PAGES * pdir_idx) + i;
551		while ((m = vm_page_alloc(NULL, pidx,
552		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
553
554			PMAP_UNLOCK(pmap);
555			rw_wunlock(&pvh_global_lock);
556			VM_WAIT;
557			rw_wlock(&pvh_global_lock);
558			PMAP_LOCK(pmap);
559		}
560		mtbl[i] = m;
561	}
562
563	/* Map allocated pages into kernel_pmap. */
564	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
565
566	/* Zero whole ptbl. */
567	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
568
569	/* Add pbuf to the pmap ptbl bufs list. */
570	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
571
572	return (ptbl);
573}
574
575/* Free ptbl pages and invalidate pdir entry. */
576static void
577ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
578{
579	pte_t *ptbl;
580	vm_paddr_t pa;
581	vm_offset_t va;
582	vm_page_t m;
583	int i;
584
585	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
586	    (pmap == kernel_pmap), pdir_idx);
587
588	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
589	    ("ptbl_free: invalid pdir_idx"));
590
591	ptbl = pmap->pm_pdir[pdir_idx];
592
593	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
594
595	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
596
597	/*
598	 * Invalidate the pdir entry as soon as possible, so that other CPUs
599	 * don't attempt to look up the page tables we are releasing.
600	 */
601	mtx_lock_spin(&tlbivax_mutex);
602	tlb_miss_lock();
603
604	pmap->pm_pdir[pdir_idx] = NULL;
605
606	tlb_miss_unlock();
607	mtx_unlock_spin(&tlbivax_mutex);
608
609	for (i = 0; i < PTBL_PAGES; i++) {
610		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
611		pa = pte_vatopa(mmu, kernel_pmap, va);
612		m = PHYS_TO_VM_PAGE(pa);
613		vm_page_free_zero(m);
614		atomic_subtract_int(&cnt.v_wire_count, 1);
615		mmu_booke_kremove(mmu, va);
616	}
617
618	ptbl_free_pmap_ptbl(pmap, ptbl);
619}
620
621/*
622 * Decrement ptbl pages hold count and attempt to free ptbl pages.
623 * Called when removing pte entry from ptbl.
624 *
625 * Return 1 if ptbl pages were freed.
626 */
627static int
628ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
629{
630	pte_t *ptbl;
631	vm_paddr_t pa;
632	vm_page_t m;
633	int i;
634
635	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
636	    (pmap == kernel_pmap), pdir_idx);
637
638	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
639	    ("ptbl_unhold: invalid pdir_idx"));
640	KASSERT((pmap != kernel_pmap),
641	    ("ptbl_unhold: unholding kernel ptbl!"));
642
643	ptbl = pmap->pm_pdir[pdir_idx];
644
645	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
646	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
647	    ("ptbl_unhold: non kva ptbl"));
648
649	/* decrement hold count */
650	for (i = 0; i < PTBL_PAGES; i++) {
651		pa = pte_vatopa(mmu, kernel_pmap,
652		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
653		m = PHYS_TO_VM_PAGE(pa);
654		m->wire_count--;
655	}
656
657	/*
658	 * Free ptbl pages if there are no pte etries in this ptbl.
659	 * wire_count has the same value for all ptbl pages, so check the last
660	 * page.
661	 */
662	if (m->wire_count == 0) {
663		ptbl_free(mmu, pmap, pdir_idx);
664
665		//debugf("ptbl_unhold: e (freed ptbl)\n");
666		return (1);
667	}
668
669	return (0);
670}
671
672/*
673 * Increment hold count for ptbl pages. This routine is used when a new pte
674 * entry is being inserted into the ptbl.
675 */
676static void
677ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
678{
679	vm_paddr_t pa;
680	pte_t *ptbl;
681	vm_page_t m;
682	int i;
683
684	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
685	    pdir_idx);
686
687	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
688	    ("ptbl_hold: invalid pdir_idx"));
689	KASSERT((pmap != kernel_pmap),
690	    ("ptbl_hold: holding kernel ptbl!"));
691
692	ptbl = pmap->pm_pdir[pdir_idx];
693
694	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
695
696	for (i = 0; i < PTBL_PAGES; i++) {
697		pa = pte_vatopa(mmu, kernel_pmap,
698		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
699		m = PHYS_TO_VM_PAGE(pa);
700		m->wire_count++;
701	}
702}
703
704/* Allocate pv_entry structure. */
705pv_entry_t
706pv_alloc(void)
707{
708	pv_entry_t pv;
709
710	pv_entry_count++;
711	if (pv_entry_count > pv_entry_high_water)
712		pagedaemon_wakeup();
713	pv = uma_zalloc(pvzone, M_NOWAIT);
714
715	return (pv);
716}
717
718/* Free pv_entry structure. */
719static __inline void
720pv_free(pv_entry_t pve)
721{
722
723	pv_entry_count--;
724	uma_zfree(pvzone, pve);
725}
726
727
728/* Allocate and initialize pv_entry structure. */
729static void
730pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
731{
732	pv_entry_t pve;
733
734	//int su = (pmap == kernel_pmap);
735	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
736	//	(u_int32_t)pmap, va, (u_int32_t)m);
737
738	pve = pv_alloc();
739	if (pve == NULL)
740		panic("pv_insert: no pv entries!");
741
742	pve->pv_pmap = pmap;
743	pve->pv_va = va;
744
745	/* add to pv_list */
746	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
747	rw_assert(&pvh_global_lock, RA_WLOCKED);
748
749	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
750
751	//debugf("pv_insert: e\n");
752}
753
754/* Destroy pv entry. */
755static void
756pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
757{
758	pv_entry_t pve;
759
760	//int su = (pmap == kernel_pmap);
761	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
762
763	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
764	rw_assert(&pvh_global_lock, RA_WLOCKED);
765
766	/* find pv entry */
767	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
768		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
769			/* remove from pv_list */
770			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
771			if (TAILQ_EMPTY(&m->md.pv_list))
772				vm_page_aflag_clear(m, PGA_WRITEABLE);
773
774			/* free pv entry struct */
775			pv_free(pve);
776			break;
777		}
778	}
779
780	//debugf("pv_remove: e\n");
781}
782
783/*
784 * Clean pte entry, try to free page table page if requested.
785 *
786 * Return 1 if ptbl pages were freed, otherwise return 0.
787 */
788static int
789pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
790{
791	unsigned int pdir_idx = PDIR_IDX(va);
792	unsigned int ptbl_idx = PTBL_IDX(va);
793	vm_page_t m;
794	pte_t *ptbl;
795	pte_t *pte;
796
797	//int su = (pmap == kernel_pmap);
798	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
799	//		su, (u_int32_t)pmap, va, flags);
800
801	ptbl = pmap->pm_pdir[pdir_idx];
802	KASSERT(ptbl, ("pte_remove: null ptbl"));
803
804	pte = &ptbl[ptbl_idx];
805
806	if (pte == NULL || !PTE_ISVALID(pte))
807		return (0);
808
809	if (PTE_ISWIRED(pte))
810		pmap->pm_stats.wired_count--;
811
812	/* Handle managed entry. */
813	if (PTE_ISMANAGED(pte)) {
814		/* Get vm_page_t for mapped pte. */
815		m = PHYS_TO_VM_PAGE(PTE_PA(pte));
816
817		if (PTE_ISMODIFIED(pte))
818			vm_page_dirty(m);
819
820		if (PTE_ISREFERENCED(pte))
821			vm_page_aflag_set(m, PGA_REFERENCED);
822
823		pv_remove(pmap, va, m);
824	}
825
826	mtx_lock_spin(&tlbivax_mutex);
827	tlb_miss_lock();
828
829	tlb0_flush_entry(va);
830	pte->flags = 0;
831	pte->rpn = 0;
832
833	tlb_miss_unlock();
834	mtx_unlock_spin(&tlbivax_mutex);
835
836	pmap->pm_stats.resident_count--;
837
838	if (flags & PTBL_UNHOLD) {
839		//debugf("pte_remove: e (unhold)\n");
840		return (ptbl_unhold(mmu, pmap, pdir_idx));
841	}
842
843	//debugf("pte_remove: e\n");
844	return (0);
845}
846
847/*
848 * Insert PTE for a given page and virtual address.
849 */
850static void
851pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
852{
853	unsigned int pdir_idx = PDIR_IDX(va);
854	unsigned int ptbl_idx = PTBL_IDX(va);
855	pte_t *ptbl, *pte;
856
857	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
858	    pmap == kernel_pmap, pmap, va);
859
860	/* Get the page table pointer. */
861	ptbl = pmap->pm_pdir[pdir_idx];
862
863	if (ptbl == NULL) {
864		/* Allocate page table pages. */
865		ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
866	} else {
867		/*
868		 * Check if there is valid mapping for requested
869		 * va, if there is, remove it.
870		 */
871		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
872		if (PTE_ISVALID(pte)) {
873			pte_remove(mmu, pmap, va, PTBL_HOLD);
874		} else {
875			/*
876			 * pte is not used, increment hold count
877			 * for ptbl pages.
878			 */
879			if (pmap != kernel_pmap)
880				ptbl_hold(mmu, pmap, pdir_idx);
881		}
882	}
883
884	/*
885	 * Insert pv_entry into pv_list for mapped page if part of managed
886	 * memory.
887	 */
888	if ((m->oflags & VPO_UNMANAGED) == 0) {
889		flags |= PTE_MANAGED;
890
891		/* Create and insert pv entry. */
892		pv_insert(pmap, va, m);
893	}
894
895	pmap->pm_stats.resident_count++;
896
897	mtx_lock_spin(&tlbivax_mutex);
898	tlb_miss_lock();
899
900	tlb0_flush_entry(va);
901	if (pmap->pm_pdir[pdir_idx] == NULL) {
902		/*
903		 * If we just allocated a new page table, hook it in
904		 * the pdir.
905		 */
906		pmap->pm_pdir[pdir_idx] = ptbl;
907	}
908	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
909	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
910	pte->flags |= (PTE_VALID | flags);
911
912	tlb_miss_unlock();
913	mtx_unlock_spin(&tlbivax_mutex);
914}
915
916/* Return the pa for the given pmap/va. */
917static vm_paddr_t
918pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
919{
920	vm_paddr_t pa = 0;
921	pte_t *pte;
922
923	pte = pte_find(mmu, pmap, va);
924	if ((pte != NULL) && PTE_ISVALID(pte))
925		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
926	return (pa);
927}
928
929/* Get a pointer to a PTE in a page table. */
930static pte_t *
931pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
932{
933	unsigned int pdir_idx = PDIR_IDX(va);
934	unsigned int ptbl_idx = PTBL_IDX(va);
935
936	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
937
938	if (pmap->pm_pdir[pdir_idx])
939		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
940
941	return (NULL);
942}
943
944/**************************************************************************/
945/* PMAP related */
946/**************************************************************************/
947
948/*
949 * This is called during booke_init, before the system is really initialized.
950 */
951static void
952mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
953{
954	vm_offset_t phys_kernelend;
955	struct mem_region *mp, *mp1;
956	int cnt, i, j;
957	u_int s, e, sz;
958	u_int phys_avail_count;
959	vm_size_t physsz, hwphyssz, kstack0_sz;
960	vm_offset_t kernel_pdir, kstack0, va;
961	vm_paddr_t kstack0_phys;
962	void *dpcpu;
963	pte_t *pte;
964
965	debugf("mmu_booke_bootstrap: entered\n");
966
967	/* Initialize invalidation mutex */
968	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
969
970	/* Read TLB0 size and associativity. */
971	tlb0_get_tlbconf();
972
973	/*
974	 * Align kernel start and end address (kernel image).
975	 * Note that kernel end does not necessarily relate to kernsize.
976	 * kernsize is the size of the kernel that is actually mapped.
977	 * Also note that "start - 1" is deliberate. With SMP, the
978	 * entry point is exactly a page from the actual load address.
979	 * As such, trunc_page() has no effect and we're off by a page.
980	 * Since we always have the ELF header between the load address
981	 * and the entry point, we can safely subtract 1 to compensate.
982	 */
983	kernstart = trunc_page(start - 1);
984	data_start = round_page(kernelend);
985	data_end = data_start;
986
987	/*
988	 * Addresses of preloaded modules (like file systems) use
989	 * physical addresses. Make sure we relocate those into
990	 * virtual addresses.
991	 */
992	preload_addr_relocate = kernstart - kernload;
993
994	/* Allocate the dynamic per-cpu area. */
995	dpcpu = (void *)data_end;
996	data_end += DPCPU_SIZE;
997
998	/* Allocate space for the message buffer. */
999	msgbufp = (struct msgbuf *)data_end;
1000	data_end += msgbufsize;
1001	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
1002	    data_end);
1003
1004	data_end = round_page(data_end);
1005
1006	/* Allocate space for ptbl_bufs. */
1007	ptbl_bufs = (struct ptbl_buf *)data_end;
1008	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1009	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1010	    data_end);
1011
1012	data_end = round_page(data_end);
1013
1014	/* Allocate PTE tables for kernel KVA. */
1015	kernel_pdir = data_end;
1016	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1017	    PDIR_SIZE - 1) / PDIR_SIZE;
1018	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1019	debugf(" kernel ptbls: %d\n", kernel_ptbls);
1020	debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1021
1022	debugf(" data_end: 0x%08x\n", data_end);
1023	if (data_end - kernstart > kernsize) {
1024		kernsize += tlb1_mapin_region(kernstart + kernsize,
1025		    kernload + kernsize, (data_end - kernstart) - kernsize);
1026	}
1027	data_end = kernstart + kernsize;
1028	debugf(" updated data_end: 0x%08x\n", data_end);
1029
1030	/*
1031	 * Clear the structures - note we can only do it safely after the
1032	 * possible additional TLB1 translations are in place (above) so that
1033	 * all range up to the currently calculated 'data_end' is covered.
1034	 */
1035	dpcpu_init(dpcpu, 0);
1036	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1037	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1038
1039	/*******************************************************/
1040	/* Set the start and end of kva. */
1041	/*******************************************************/
1042	virtual_avail = round_page(data_end);
1043	virtual_end = VM_MAX_KERNEL_ADDRESS;
1044
1045	/* Allocate KVA space for page zero/copy operations. */
1046	zero_page_va = virtual_avail;
1047	virtual_avail += PAGE_SIZE;
1048	zero_page_idle_va = virtual_avail;
1049	virtual_avail += PAGE_SIZE;
1050	copy_page_src_va = virtual_avail;
1051	virtual_avail += PAGE_SIZE;
1052	copy_page_dst_va = virtual_avail;
1053	virtual_avail += PAGE_SIZE;
1054	debugf("zero_page_va = 0x%08x\n", zero_page_va);
1055	debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1056	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1057	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1058
1059	/* Initialize page zero/copy mutexes. */
1060	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1061	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1062
1063	/* Allocate KVA space for ptbl bufs. */
1064	ptbl_buf_pool_vabase = virtual_avail;
1065	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1066	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1067	    ptbl_buf_pool_vabase, virtual_avail);
1068
1069	/* Calculate corresponding physical addresses for the kernel region. */
1070	phys_kernelend = kernload + kernsize;
1071	debugf("kernel image and allocated data:\n");
1072	debugf(" kernload    = 0x%08x\n", kernload);
1073	debugf(" kernstart   = 0x%08x\n", kernstart);
1074	debugf(" kernsize    = 0x%08x\n", kernsize);
1075
1076	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1077		panic("mmu_booke_bootstrap: phys_avail too small");
1078
1079	/*
1080	 * Remove kernel physical address range from avail regions list. Page
1081	 * align all regions.  Non-page aligned memory isn't very interesting
1082	 * to us.  Also, sort the entries for ascending addresses.
1083	 */
1084
1085	/* Retrieve phys/avail mem regions */
1086	mem_regions(&physmem_regions, &physmem_regions_sz,
1087	    &availmem_regions, &availmem_regions_sz);
1088	sz = 0;
1089	cnt = availmem_regions_sz;
1090	debugf("processing avail regions:\n");
1091	for (mp = availmem_regions; mp->mr_size; mp++) {
1092		s = mp->mr_start;
1093		e = mp->mr_start + mp->mr_size;
1094		debugf(" %08x-%08x -> ", s, e);
1095		/* Check whether this region holds all of the kernel. */
1096		if (s < kernload && e > phys_kernelend) {
1097			availmem_regions[cnt].mr_start = phys_kernelend;
1098			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1099			e = kernload;
1100		}
1101		/* Look whether this regions starts within the kernel. */
1102		if (s >= kernload && s < phys_kernelend) {
1103			if (e <= phys_kernelend)
1104				goto empty;
1105			s = phys_kernelend;
1106		}
1107		/* Now look whether this region ends within the kernel. */
1108		if (e > kernload && e <= phys_kernelend) {
1109			if (s >= kernload)
1110				goto empty;
1111			e = kernload;
1112		}
1113		/* Now page align the start and size of the region. */
1114		s = round_page(s);
1115		e = trunc_page(e);
1116		if (e < s)
1117			e = s;
1118		sz = e - s;
1119		debugf("%08x-%08x = %x\n", s, e, sz);
1120
1121		/* Check whether some memory is left here. */
1122		if (sz == 0) {
1123		empty:
1124			memmove(mp, mp + 1,
1125			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1126			cnt--;
1127			mp--;
1128			continue;
1129		}
1130
1131		/* Do an insertion sort. */
1132		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1133			if (s < mp1->mr_start)
1134				break;
1135		if (mp1 < mp) {
1136			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1137			mp1->mr_start = s;
1138			mp1->mr_size = sz;
1139		} else {
1140			mp->mr_start = s;
1141			mp->mr_size = sz;
1142		}
1143	}
1144	availmem_regions_sz = cnt;
1145
1146	/*******************************************************/
1147	/* Steal physical memory for kernel stack from the end */
1148	/* of the first avail region                           */
1149	/*******************************************************/
1150	kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1151	kstack0_phys = availmem_regions[0].mr_start +
1152	    availmem_regions[0].mr_size;
1153	kstack0_phys -= kstack0_sz;
1154	availmem_regions[0].mr_size -= kstack0_sz;
1155
1156	/*******************************************************/
1157	/* Fill in phys_avail table, based on availmem_regions */
1158	/*******************************************************/
1159	phys_avail_count = 0;
1160	physsz = 0;
1161	hwphyssz = 0;
1162	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1163
1164	debugf("fill in phys_avail:\n");
1165	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1166
1167		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1168		    availmem_regions[i].mr_start,
1169		    availmem_regions[i].mr_start +
1170		        availmem_regions[i].mr_size,
1171		    availmem_regions[i].mr_size);
1172
1173		if (hwphyssz != 0 &&
1174		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1175			debugf(" hw.physmem adjust\n");
1176			if (physsz < hwphyssz) {
1177				phys_avail[j] = availmem_regions[i].mr_start;
1178				phys_avail[j + 1] =
1179				    availmem_regions[i].mr_start +
1180				    hwphyssz - physsz;
1181				physsz = hwphyssz;
1182				phys_avail_count++;
1183			}
1184			break;
1185		}
1186
1187		phys_avail[j] = availmem_regions[i].mr_start;
1188		phys_avail[j + 1] = availmem_regions[i].mr_start +
1189		    availmem_regions[i].mr_size;
1190		phys_avail_count++;
1191		physsz += availmem_regions[i].mr_size;
1192	}
1193	physmem = btoc(physsz);
1194
1195	/* Calculate the last available physical address. */
1196	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1197		;
1198	Maxmem = powerpc_btop(phys_avail[i + 1]);
1199
1200	debugf("Maxmem = 0x%08lx\n", Maxmem);
1201	debugf("phys_avail_count = %d\n", phys_avail_count);
1202	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1203	    physmem);
1204
1205	/*******************************************************/
1206	/* Initialize (statically allocated) kernel pmap. */
1207	/*******************************************************/
1208	PMAP_LOCK_INIT(kernel_pmap);
1209	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1210
1211	debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1212	debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1213	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1214	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1215
1216	/* Initialize kernel pdir */
1217	for (i = 0; i < kernel_ptbls; i++)
1218		kernel_pmap->pm_pdir[kptbl_min + i] =
1219		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1220
1221	for (i = 0; i < MAXCPU; i++) {
1222		kernel_pmap->pm_tid[i] = TID_KERNEL;
1223
1224		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1225		tidbusy[i][0] = kernel_pmap;
1226	}
1227
1228	/*
1229	 * Fill in PTEs covering kernel code and data. They are not required
1230	 * for address translation, as this area is covered by static TLB1
1231	 * entries, but for pte_vatopa() to work correctly with kernel area
1232	 * addresses.
1233	 */
1234	for (va = kernstart; va < data_end; va += PAGE_SIZE) {
1235		pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1236		pte->rpn = kernload + (va - kernstart);
1237		pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1238		    PTE_VALID;
1239	}
1240	/* Mark kernel_pmap active on all CPUs */
1241	CPU_FILL(&kernel_pmap->pm_active);
1242
1243 	/*
1244	 * Initialize the global pv list lock.
1245	 */
1246	rw_init(&pvh_global_lock, "pmap pv global");
1247
1248	/*******************************************************/
1249	/* Final setup */
1250	/*******************************************************/
1251
1252	/* Enter kstack0 into kernel map, provide guard page */
1253	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1254	thread0.td_kstack = kstack0;
1255	thread0.td_kstack_pages = KSTACK_PAGES;
1256
1257	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1258	debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1259	    kstack0_phys, kstack0_phys + kstack0_sz);
1260	debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1261
1262	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1263	for (i = 0; i < KSTACK_PAGES; i++) {
1264		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1265		kstack0 += PAGE_SIZE;
1266		kstack0_phys += PAGE_SIZE;
1267	}
1268
1269	debugf("virtual_avail = %08x\n", virtual_avail);
1270	debugf("virtual_end   = %08x\n", virtual_end);
1271
1272	debugf("mmu_booke_bootstrap: exit\n");
1273}
1274
1275void
1276pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1277{
1278	int i;
1279
1280	/*
1281	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1282	 * have the snapshot of its contents in the s/w tlb1[] table, so use
1283	 * these values directly to (re)program AP's TLB1 hardware.
1284	 */
1285	for (i = bp_ntlb1s; i < tlb1_idx; i++) {
1286		/* Skip invalid entries */
1287		if (!(tlb1[i].mas1 & MAS1_VALID))
1288			continue;
1289
1290		tlb1_write_entry(i);
1291	}
1292
1293	set_mas4_defaults();
1294}
1295
1296/*
1297 * Get the physical page address for the given pmap/virtual address.
1298 */
1299static vm_paddr_t
1300mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1301{
1302	vm_paddr_t pa;
1303
1304	PMAP_LOCK(pmap);
1305	pa = pte_vatopa(mmu, pmap, va);
1306	PMAP_UNLOCK(pmap);
1307
1308	return (pa);
1309}
1310
1311/*
1312 * Extract the physical page address associated with the given
1313 * kernel virtual address.
1314 */
1315static vm_paddr_t
1316mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1317{
1318
1319	return (pte_vatopa(mmu, kernel_pmap, va));
1320}
1321
1322/*
1323 * Initialize the pmap module.
1324 * Called by vm_init, to initialize any structures that the pmap
1325 * system needs to map virtual memory.
1326 */
1327static void
1328mmu_booke_init(mmu_t mmu)
1329{
1330	int shpgperproc = PMAP_SHPGPERPROC;
1331
1332	/*
1333	 * Initialize the address space (zone) for the pv entries.  Set a
1334	 * high water mark so that the system can recover from excessive
1335	 * numbers of pv entries.
1336	 */
1337	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1338	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1339
1340	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1341	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1342
1343	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1344	pv_entry_high_water = 9 * (pv_entry_max / 10);
1345
1346	uma_zone_reserve_kva(pvzone, pv_entry_max);
1347
1348	/* Pre-fill pvzone with initial number of pv entries. */
1349	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1350
1351	/* Initialize ptbl allocation. */
1352	ptbl_init();
1353}
1354
1355/*
1356 * Map a list of wired pages into kernel virtual address space.  This is
1357 * intended for temporary mappings which do not need page modification or
1358 * references recorded.  Existing mappings in the region are overwritten.
1359 */
1360static void
1361mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1362{
1363	vm_offset_t va;
1364
1365	va = sva;
1366	while (count-- > 0) {
1367		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1368		va += PAGE_SIZE;
1369		m++;
1370	}
1371}
1372
1373/*
1374 * Remove page mappings from kernel virtual address space.  Intended for
1375 * temporary mappings entered by mmu_booke_qenter.
1376 */
1377static void
1378mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1379{
1380	vm_offset_t va;
1381
1382	va = sva;
1383	while (count-- > 0) {
1384		mmu_booke_kremove(mmu, va);
1385		va += PAGE_SIZE;
1386	}
1387}
1388
1389/*
1390 * Map a wired page into kernel virtual address space.
1391 */
1392static void
1393mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1394{
1395	unsigned int pdir_idx = PDIR_IDX(va);
1396	unsigned int ptbl_idx = PTBL_IDX(va);
1397	uint32_t flags;
1398	pte_t *pte;
1399
1400	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1401	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1402
1403	flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
1404
1405	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1406
1407	mtx_lock_spin(&tlbivax_mutex);
1408	tlb_miss_lock();
1409
1410	if (PTE_ISVALID(pte)) {
1411
1412		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1413
1414		/* Flush entry from TLB0 */
1415		tlb0_flush_entry(va);
1416	}
1417
1418	pte->rpn = pa & ~PTE_PA_MASK;
1419	pte->flags = flags;
1420
1421	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1422	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1423	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1424
1425	/* Flush the real memory from the instruction cache. */
1426	if ((flags & (PTE_I | PTE_G)) == 0) {
1427		__syncicache((void *)va, PAGE_SIZE);
1428	}
1429
1430	tlb_miss_unlock();
1431	mtx_unlock_spin(&tlbivax_mutex);
1432}
1433
1434/*
1435 * Remove a page from kernel page table.
1436 */
1437static void
1438mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1439{
1440	unsigned int pdir_idx = PDIR_IDX(va);
1441	unsigned int ptbl_idx = PTBL_IDX(va);
1442	pte_t *pte;
1443
1444//	CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1445
1446	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1447	    (va <= VM_MAX_KERNEL_ADDRESS)),
1448	    ("mmu_booke_kremove: invalid va"));
1449
1450	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1451
1452	if (!PTE_ISVALID(pte)) {
1453
1454		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1455
1456		return;
1457	}
1458
1459	mtx_lock_spin(&tlbivax_mutex);
1460	tlb_miss_lock();
1461
1462	/* Invalidate entry in TLB0, update PTE. */
1463	tlb0_flush_entry(va);
1464	pte->flags = 0;
1465	pte->rpn = 0;
1466
1467	tlb_miss_unlock();
1468	mtx_unlock_spin(&tlbivax_mutex);
1469}
1470
1471/*
1472 * Initialize pmap associated with process 0.
1473 */
1474static void
1475mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1476{
1477
1478	PMAP_LOCK_INIT(pmap);
1479	mmu_booke_pinit(mmu, pmap);
1480	PCPU_SET(curpmap, pmap);
1481}
1482
1483/*
1484 * Initialize a preallocated and zeroed pmap structure,
1485 * such as one in a vmspace structure.
1486 */
1487static void
1488mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1489{
1490	int i;
1491
1492	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1493	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1494
1495	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1496
1497	for (i = 0; i < MAXCPU; i++)
1498		pmap->pm_tid[i] = TID_NONE;
1499	CPU_ZERO(&kernel_pmap->pm_active);
1500	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1501	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1502	TAILQ_INIT(&pmap->pm_ptbl_list);
1503}
1504
1505/*
1506 * Release any resources held by the given physical map.
1507 * Called when a pmap initialized by mmu_booke_pinit is being released.
1508 * Should only be called if the map contains no valid mappings.
1509 */
1510static void
1511mmu_booke_release(mmu_t mmu, pmap_t pmap)
1512{
1513
1514	KASSERT(pmap->pm_stats.resident_count == 0,
1515	    ("pmap_release: pmap resident count %ld != 0",
1516	    pmap->pm_stats.resident_count));
1517}
1518
1519/*
1520 * Insert the given physical page at the specified virtual address in the
1521 * target physical map with the protection requested. If specified the page
1522 * will be wired down.
1523 */
1524static void
1525mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1526    vm_prot_t prot, boolean_t wired)
1527{
1528
1529	rw_wlock(&pvh_global_lock);
1530	PMAP_LOCK(pmap);
1531	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1532	rw_wunlock(&pvh_global_lock);
1533	PMAP_UNLOCK(pmap);
1534}
1535
1536static void
1537mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1538    vm_prot_t prot, boolean_t wired)
1539{
1540	pte_t *pte;
1541	vm_paddr_t pa;
1542	uint32_t flags;
1543	int su, sync;
1544
1545	pa = VM_PAGE_TO_PHYS(m);
1546	su = (pmap == kernel_pmap);
1547	sync = 0;
1548
1549	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1550	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1551	//		(u_int32_t)pmap, su, pmap->pm_tid,
1552	//		(u_int32_t)m, va, pa, prot, wired);
1553
1554	if (su) {
1555		KASSERT(((va >= virtual_avail) &&
1556		    (va <= VM_MAX_KERNEL_ADDRESS)),
1557		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1558	} else {
1559		KASSERT((va <= VM_MAXUSER_ADDRESS),
1560		    ("mmu_booke_enter_locked: user pmap, non user va"));
1561	}
1562	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1563		VM_OBJECT_ASSERT_LOCKED(m->object);
1564
1565	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1566
1567	/*
1568	 * If there is an existing mapping, and the physical address has not
1569	 * changed, must be protection or wiring change.
1570	 */
1571	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1572	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1573
1574		/*
1575		 * Before actually updating pte->flags we calculate and
1576		 * prepare its new value in a helper var.
1577		 */
1578		flags = pte->flags;
1579		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1580
1581		/* Wiring change, just update stats. */
1582		if (wired) {
1583			if (!PTE_ISWIRED(pte)) {
1584				flags |= PTE_WIRED;
1585				pmap->pm_stats.wired_count++;
1586			}
1587		} else {
1588			if (PTE_ISWIRED(pte)) {
1589				flags &= ~PTE_WIRED;
1590				pmap->pm_stats.wired_count--;
1591			}
1592		}
1593
1594		if (prot & VM_PROT_WRITE) {
1595			/* Add write permissions. */
1596			flags |= PTE_SW;
1597			if (!su)
1598				flags |= PTE_UW;
1599
1600			if ((flags & PTE_MANAGED) != 0)
1601				vm_page_aflag_set(m, PGA_WRITEABLE);
1602		} else {
1603			/* Handle modified pages, sense modify status. */
1604
1605			/*
1606			 * The PTE_MODIFIED flag could be set by underlying
1607			 * TLB misses since we last read it (above), possibly
1608			 * other CPUs could update it so we check in the PTE
1609			 * directly rather than rely on that saved local flags
1610			 * copy.
1611			 */
1612			if (PTE_ISMODIFIED(pte))
1613				vm_page_dirty(m);
1614		}
1615
1616		if (prot & VM_PROT_EXECUTE) {
1617			flags |= PTE_SX;
1618			if (!su)
1619				flags |= PTE_UX;
1620
1621			/*
1622			 * Check existing flags for execute permissions: if we
1623			 * are turning execute permissions on, icache should
1624			 * be flushed.
1625			 */
1626			if ((pte->flags & (PTE_UX | PTE_SX)) == 0)
1627				sync++;
1628		}
1629
1630		flags &= ~PTE_REFERENCED;
1631
1632		/*
1633		 * The new flags value is all calculated -- only now actually
1634		 * update the PTE.
1635		 */
1636		mtx_lock_spin(&tlbivax_mutex);
1637		tlb_miss_lock();
1638
1639		tlb0_flush_entry(va);
1640		pte->flags = flags;
1641
1642		tlb_miss_unlock();
1643		mtx_unlock_spin(&tlbivax_mutex);
1644
1645	} else {
1646		/*
1647		 * If there is an existing mapping, but it's for a different
1648		 * physical address, pte_enter() will delete the old mapping.
1649		 */
1650		//if ((pte != NULL) && PTE_ISVALID(pte))
1651		//	debugf("mmu_booke_enter_locked: replace\n");
1652		//else
1653		//	debugf("mmu_booke_enter_locked: new\n");
1654
1655		/* Now set up the flags and install the new mapping. */
1656		flags = (PTE_SR | PTE_VALID);
1657		flags |= PTE_M;
1658
1659		if (!su)
1660			flags |= PTE_UR;
1661
1662		if (prot & VM_PROT_WRITE) {
1663			flags |= PTE_SW;
1664			if (!su)
1665				flags |= PTE_UW;
1666
1667			if ((m->oflags & VPO_UNMANAGED) == 0)
1668				vm_page_aflag_set(m, PGA_WRITEABLE);
1669		}
1670
1671		if (prot & VM_PROT_EXECUTE) {
1672			flags |= PTE_SX;
1673			if (!su)
1674				flags |= PTE_UX;
1675		}
1676
1677		/* If its wired update stats. */
1678		if (wired) {
1679			pmap->pm_stats.wired_count++;
1680			flags |= PTE_WIRED;
1681		}
1682
1683		pte_enter(mmu, pmap, m, va, flags);
1684
1685		/* Flush the real memory from the instruction cache. */
1686		if (prot & VM_PROT_EXECUTE)
1687			sync++;
1688	}
1689
1690	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1691		__syncicache((void *)va, PAGE_SIZE);
1692		sync = 0;
1693	}
1694}
1695
1696/*
1697 * Maps a sequence of resident pages belonging to the same object.
1698 * The sequence begins with the given page m_start.  This page is
1699 * mapped at the given virtual address start.  Each subsequent page is
1700 * mapped at a virtual address that is offset from start by the same
1701 * amount as the page is offset from m_start within the object.  The
1702 * last page in the sequence is the page with the largest offset from
1703 * m_start that can be mapped at a virtual address less than the given
1704 * virtual address end.  Not every virtual page between start and end
1705 * is mapped; only those for which a resident page exists with the
1706 * corresponding offset from m_start are mapped.
1707 */
1708static void
1709mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1710    vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1711{
1712	vm_page_t m;
1713	vm_pindex_t diff, psize;
1714
1715	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1716
1717	psize = atop(end - start);
1718	m = m_start;
1719	rw_wlock(&pvh_global_lock);
1720	PMAP_LOCK(pmap);
1721	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1722		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1723		    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1724		m = TAILQ_NEXT(m, listq);
1725	}
1726	rw_wunlock(&pvh_global_lock);
1727	PMAP_UNLOCK(pmap);
1728}
1729
1730static void
1731mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1732    vm_prot_t prot)
1733{
1734
1735	rw_wlock(&pvh_global_lock);
1736	PMAP_LOCK(pmap);
1737	mmu_booke_enter_locked(mmu, pmap, va, m,
1738	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1739	rw_wunlock(&pvh_global_lock);
1740	PMAP_UNLOCK(pmap);
1741}
1742
1743/*
1744 * Remove the given range of addresses from the specified map.
1745 *
1746 * It is assumed that the start and end are properly rounded to the page size.
1747 */
1748static void
1749mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1750{
1751	pte_t *pte;
1752	uint8_t hold_flag;
1753
1754	int su = (pmap == kernel_pmap);
1755
1756	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1757	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1758
1759	if (su) {
1760		KASSERT(((va >= virtual_avail) &&
1761		    (va <= VM_MAX_KERNEL_ADDRESS)),
1762		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1763	} else {
1764		KASSERT((va <= VM_MAXUSER_ADDRESS),
1765		    ("mmu_booke_remove: user pmap, non user va"));
1766	}
1767
1768	if (PMAP_REMOVE_DONE(pmap)) {
1769		//debugf("mmu_booke_remove: e (empty)\n");
1770		return;
1771	}
1772
1773	hold_flag = PTBL_HOLD_FLAG(pmap);
1774	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1775
1776	rw_wlock(&pvh_global_lock);
1777	PMAP_LOCK(pmap);
1778	for (; va < endva; va += PAGE_SIZE) {
1779		pte = pte_find(mmu, pmap, va);
1780		if ((pte != NULL) && PTE_ISVALID(pte))
1781			pte_remove(mmu, pmap, va, hold_flag);
1782	}
1783	PMAP_UNLOCK(pmap);
1784	rw_wunlock(&pvh_global_lock);
1785
1786	//debugf("mmu_booke_remove: e\n");
1787}
1788
1789/*
1790 * Remove physical page from all pmaps in which it resides.
1791 */
1792static void
1793mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1794{
1795	pv_entry_t pv, pvn;
1796	uint8_t hold_flag;
1797
1798	rw_wlock(&pvh_global_lock);
1799	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1800		pvn = TAILQ_NEXT(pv, pv_link);
1801
1802		PMAP_LOCK(pv->pv_pmap);
1803		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1804		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1805		PMAP_UNLOCK(pv->pv_pmap);
1806	}
1807	vm_page_aflag_clear(m, PGA_WRITEABLE);
1808	rw_wunlock(&pvh_global_lock);
1809}
1810
1811/*
1812 * Map a range of physical addresses into kernel virtual address space.
1813 */
1814static vm_offset_t
1815mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1816    vm_paddr_t pa_end, int prot)
1817{
1818	vm_offset_t sva = *virt;
1819	vm_offset_t va = sva;
1820
1821	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1822	//		sva, pa_start, pa_end);
1823
1824	while (pa_start < pa_end) {
1825		mmu_booke_kenter(mmu, va, pa_start);
1826		va += PAGE_SIZE;
1827		pa_start += PAGE_SIZE;
1828	}
1829	*virt = va;
1830
1831	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1832	return (sva);
1833}
1834
1835/*
1836 * The pmap must be activated before it's address space can be accessed in any
1837 * way.
1838 */
1839static void
1840mmu_booke_activate(mmu_t mmu, struct thread *td)
1841{
1842	pmap_t pmap;
1843	u_int cpuid;
1844
1845	pmap = &td->td_proc->p_vmspace->vm_pmap;
1846
1847	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1848	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1849
1850	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1851
1852	mtx_lock_spin(&sched_lock);
1853
1854	cpuid = PCPU_GET(cpuid);
1855	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
1856	PCPU_SET(curpmap, pmap);
1857
1858	if (pmap->pm_tid[cpuid] == TID_NONE)
1859		tid_alloc(pmap);
1860
1861	/* Load PID0 register with pmap tid value. */
1862	mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
1863	__asm __volatile("isync");
1864
1865	mtx_unlock_spin(&sched_lock);
1866
1867	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1868	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1869}
1870
1871/*
1872 * Deactivate the specified process's address space.
1873 */
1874static void
1875mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1876{
1877	pmap_t pmap;
1878
1879	pmap = &td->td_proc->p_vmspace->vm_pmap;
1880
1881	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1882	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1883
1884	CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
1885	PCPU_SET(curpmap, NULL);
1886}
1887
1888/*
1889 * Copy the range specified by src_addr/len
1890 * from the source map to the range dst_addr/len
1891 * in the destination map.
1892 *
1893 * This routine is only advisory and need not do anything.
1894 */
1895static void
1896mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
1897    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1898{
1899
1900}
1901
1902/*
1903 * Set the physical protection on the specified range of this map as requested.
1904 */
1905static void
1906mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1907    vm_prot_t prot)
1908{
1909	vm_offset_t va;
1910	vm_page_t m;
1911	pte_t *pte;
1912
1913	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1914		mmu_booke_remove(mmu, pmap, sva, eva);
1915		return;
1916	}
1917
1918	if (prot & VM_PROT_WRITE)
1919		return;
1920
1921	PMAP_LOCK(pmap);
1922	for (va = sva; va < eva; va += PAGE_SIZE) {
1923		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1924			if (PTE_ISVALID(pte)) {
1925				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1926
1927				mtx_lock_spin(&tlbivax_mutex);
1928				tlb_miss_lock();
1929
1930				/* Handle modified pages. */
1931				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
1932					vm_page_dirty(m);
1933
1934				tlb0_flush_entry(va);
1935				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1936
1937				tlb_miss_unlock();
1938				mtx_unlock_spin(&tlbivax_mutex);
1939			}
1940		}
1941	}
1942	PMAP_UNLOCK(pmap);
1943}
1944
1945/*
1946 * Clear the write and modified bits in each of the given page's mappings.
1947 */
1948static void
1949mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1950{
1951	pv_entry_t pv;
1952	pte_t *pte;
1953
1954	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1955	    ("mmu_booke_remove_write: page %p is not managed", m));
1956
1957	/*
1958	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1959	 * set by another thread while the object is locked.  Thus,
1960	 * if PGA_WRITEABLE is clear, no page table entries need updating.
1961	 */
1962	VM_OBJECT_ASSERT_WLOCKED(m->object);
1963	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1964		return;
1965	rw_wlock(&pvh_global_lock);
1966	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1967		PMAP_LOCK(pv->pv_pmap);
1968		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1969			if (PTE_ISVALID(pte)) {
1970				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1971
1972				mtx_lock_spin(&tlbivax_mutex);
1973				tlb_miss_lock();
1974
1975				/* Handle modified pages. */
1976				if (PTE_ISMODIFIED(pte))
1977					vm_page_dirty(m);
1978
1979				/* Flush mapping from TLB0. */
1980				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1981
1982				tlb_miss_unlock();
1983				mtx_unlock_spin(&tlbivax_mutex);
1984			}
1985		}
1986		PMAP_UNLOCK(pv->pv_pmap);
1987	}
1988	vm_page_aflag_clear(m, PGA_WRITEABLE);
1989	rw_wunlock(&pvh_global_lock);
1990}
1991
1992static void
1993mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
1994{
1995	pte_t *pte;
1996	pmap_t pmap;
1997	vm_page_t m;
1998	vm_offset_t addr;
1999	vm_paddr_t pa;
2000	int active, valid;
2001
2002	va = trunc_page(va);
2003	sz = round_page(sz);
2004
2005	rw_wlock(&pvh_global_lock);
2006	pmap = PCPU_GET(curpmap);
2007	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2008	while (sz > 0) {
2009		PMAP_LOCK(pm);
2010		pte = pte_find(mmu, pm, va);
2011		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2012		if (valid)
2013			pa = PTE_PA(pte);
2014		PMAP_UNLOCK(pm);
2015		if (valid) {
2016			if (!active) {
2017				/* Create a mapping in the active pmap. */
2018				addr = 0;
2019				m = PHYS_TO_VM_PAGE(pa);
2020				PMAP_LOCK(pmap);
2021				pte_enter(mmu, pmap, m, addr,
2022				    PTE_SR | PTE_VALID | PTE_UR);
2023				__syncicache((void *)addr, PAGE_SIZE);
2024				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2025				PMAP_UNLOCK(pmap);
2026			} else
2027				__syncicache((void *)va, PAGE_SIZE);
2028		}
2029		va += PAGE_SIZE;
2030		sz -= PAGE_SIZE;
2031	}
2032	rw_wunlock(&pvh_global_lock);
2033}
2034
2035/*
2036 * Atomically extract and hold the physical page with the given
2037 * pmap and virtual address pair if that mapping permits the given
2038 * protection.
2039 */
2040static vm_page_t
2041mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2042    vm_prot_t prot)
2043{
2044	pte_t *pte;
2045	vm_page_t m;
2046	uint32_t pte_wbit;
2047	vm_paddr_t pa;
2048
2049	m = NULL;
2050	pa = 0;
2051	PMAP_LOCK(pmap);
2052retry:
2053	pte = pte_find(mmu, pmap, va);
2054	if ((pte != NULL) && PTE_ISVALID(pte)) {
2055		if (pmap == kernel_pmap)
2056			pte_wbit = PTE_SW;
2057		else
2058			pte_wbit = PTE_UW;
2059
2060		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2061			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2062				goto retry;
2063			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2064			vm_page_hold(m);
2065		}
2066	}
2067
2068	PA_UNLOCK_COND(pa);
2069	PMAP_UNLOCK(pmap);
2070	return (m);
2071}
2072
2073/*
2074 * Initialize a vm_page's machine-dependent fields.
2075 */
2076static void
2077mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2078{
2079
2080	TAILQ_INIT(&m->md.pv_list);
2081}
2082
2083/*
2084 * mmu_booke_zero_page_area zeros the specified hardware page by
2085 * mapping it into virtual memory and using bzero to clear
2086 * its contents.
2087 *
2088 * off and size must reside within a single page.
2089 */
2090static void
2091mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2092{
2093	vm_offset_t va;
2094
2095	/* XXX KASSERT off and size are within a single page? */
2096
2097	mtx_lock(&zero_page_mutex);
2098	va = zero_page_va;
2099
2100	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2101	bzero((caddr_t)va + off, size);
2102	mmu_booke_kremove(mmu, va);
2103
2104	mtx_unlock(&zero_page_mutex);
2105}
2106
2107/*
2108 * mmu_booke_zero_page zeros the specified hardware page.
2109 */
2110static void
2111mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2112{
2113
2114	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2115}
2116
2117/*
2118 * mmu_booke_copy_page copies the specified (machine independent) page by
2119 * mapping the page into virtual memory and using memcopy to copy the page,
2120 * one machine dependent page at a time.
2121 */
2122static void
2123mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2124{
2125	vm_offset_t sva, dva;
2126
2127	sva = copy_page_src_va;
2128	dva = copy_page_dst_va;
2129
2130	mtx_lock(&copy_page_mutex);
2131	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2132	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2133	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2134	mmu_booke_kremove(mmu, dva);
2135	mmu_booke_kremove(mmu, sva);
2136	mtx_unlock(&copy_page_mutex);
2137}
2138
2139static inline void
2140mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
2141    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
2142{
2143	void *a_cp, *b_cp;
2144	vm_offset_t a_pg_offset, b_pg_offset;
2145	int cnt;
2146
2147	mtx_lock(&copy_page_mutex);
2148	while (xfersize > 0) {
2149		a_pg_offset = a_offset & PAGE_MASK;
2150		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2151		mmu_booke_kenter(mmu, copy_page_src_va,
2152		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
2153		a_cp = (char *)copy_page_src_va + a_pg_offset;
2154		b_pg_offset = b_offset & PAGE_MASK;
2155		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2156		mmu_booke_kenter(mmu, copy_page_dst_va,
2157		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
2158		b_cp = (char *)copy_page_dst_va + b_pg_offset;
2159		bcopy(a_cp, b_cp, cnt);
2160		mmu_booke_kremove(mmu, copy_page_dst_va);
2161		mmu_booke_kremove(mmu, copy_page_src_va);
2162		a_offset += cnt;
2163		b_offset += cnt;
2164		xfersize -= cnt;
2165	}
2166	mtx_unlock(&copy_page_mutex);
2167}
2168
2169/*
2170 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2171 * into virtual memory and using bzero to clear its contents. This is intended
2172 * to be called from the vm_pagezero process only and outside of Giant. No
2173 * lock is required.
2174 */
2175static void
2176mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2177{
2178	vm_offset_t va;
2179
2180	va = zero_page_idle_va;
2181	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2182	bzero((caddr_t)va, PAGE_SIZE);
2183	mmu_booke_kremove(mmu, va);
2184}
2185
2186/*
2187 * Return whether or not the specified physical page was modified
2188 * in any of physical maps.
2189 */
2190static boolean_t
2191mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2192{
2193	pte_t *pte;
2194	pv_entry_t pv;
2195	boolean_t rv;
2196
2197	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2198	    ("mmu_booke_is_modified: page %p is not managed", m));
2199	rv = FALSE;
2200
2201	/*
2202	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2203	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2204	 * is clear, no PTEs can be modified.
2205	 */
2206	VM_OBJECT_ASSERT_WLOCKED(m->object);
2207	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2208		return (rv);
2209	rw_wlock(&pvh_global_lock);
2210	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2211		PMAP_LOCK(pv->pv_pmap);
2212		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2213		    PTE_ISVALID(pte)) {
2214			if (PTE_ISMODIFIED(pte))
2215				rv = TRUE;
2216		}
2217		PMAP_UNLOCK(pv->pv_pmap);
2218		if (rv)
2219			break;
2220	}
2221	rw_wunlock(&pvh_global_lock);
2222	return (rv);
2223}
2224
2225/*
2226 * Return whether or not the specified virtual address is eligible
2227 * for prefault.
2228 */
2229static boolean_t
2230mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2231{
2232
2233	return (FALSE);
2234}
2235
2236/*
2237 * Return whether or not the specified physical page was referenced
2238 * in any physical maps.
2239 */
2240static boolean_t
2241mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
2242{
2243	pte_t *pte;
2244	pv_entry_t pv;
2245	boolean_t rv;
2246
2247	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2248	    ("mmu_booke_is_referenced: page %p is not managed", m));
2249	rv = FALSE;
2250	rw_wlock(&pvh_global_lock);
2251	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2252		PMAP_LOCK(pv->pv_pmap);
2253		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2254		    PTE_ISVALID(pte)) {
2255			if (PTE_ISREFERENCED(pte))
2256				rv = TRUE;
2257		}
2258		PMAP_UNLOCK(pv->pv_pmap);
2259		if (rv)
2260			break;
2261	}
2262	rw_wunlock(&pvh_global_lock);
2263	return (rv);
2264}
2265
2266/*
2267 * Clear the modify bits on the specified physical page.
2268 */
2269static void
2270mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2271{
2272	pte_t *pte;
2273	pv_entry_t pv;
2274
2275	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2276	    ("mmu_booke_clear_modify: page %p is not managed", m));
2277	VM_OBJECT_ASSERT_WLOCKED(m->object);
2278	KASSERT(!vm_page_xbusied(m),
2279	    ("mmu_booke_clear_modify: page %p is exclusive busied", m));
2280
2281	/*
2282	 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
2283	 * If the object containing the page is locked and the page is not
2284	 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
2285	 */
2286	if ((m->aflags & PGA_WRITEABLE) == 0)
2287		return;
2288	rw_wlock(&pvh_global_lock);
2289	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2290		PMAP_LOCK(pv->pv_pmap);
2291		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2292		    PTE_ISVALID(pte)) {
2293			mtx_lock_spin(&tlbivax_mutex);
2294			tlb_miss_lock();
2295
2296			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2297				tlb0_flush_entry(pv->pv_va);
2298				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2299				    PTE_REFERENCED);
2300			}
2301
2302			tlb_miss_unlock();
2303			mtx_unlock_spin(&tlbivax_mutex);
2304		}
2305		PMAP_UNLOCK(pv->pv_pmap);
2306	}
2307	rw_wunlock(&pvh_global_lock);
2308}
2309
2310/*
2311 * Return a count of reference bits for a page, clearing those bits.
2312 * It is not necessary for every reference bit to be cleared, but it
2313 * is necessary that 0 only be returned when there are truly no
2314 * reference bits set.
2315 *
2316 * XXX: The exact number of bits to check and clear is a matter that
2317 * should be tested and standardized at some point in the future for
2318 * optimal aging of shared pages.
2319 */
2320static int
2321mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2322{
2323	pte_t *pte;
2324	pv_entry_t pv;
2325	int count;
2326
2327	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2328	    ("mmu_booke_ts_referenced: page %p is not managed", m));
2329	count = 0;
2330	rw_wlock(&pvh_global_lock);
2331	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2332		PMAP_LOCK(pv->pv_pmap);
2333		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2334		    PTE_ISVALID(pte)) {
2335			if (PTE_ISREFERENCED(pte)) {
2336				mtx_lock_spin(&tlbivax_mutex);
2337				tlb_miss_lock();
2338
2339				tlb0_flush_entry(pv->pv_va);
2340				pte->flags &= ~PTE_REFERENCED;
2341
2342				tlb_miss_unlock();
2343				mtx_unlock_spin(&tlbivax_mutex);
2344
2345				if (++count > 4) {
2346					PMAP_UNLOCK(pv->pv_pmap);
2347					break;
2348				}
2349			}
2350		}
2351		PMAP_UNLOCK(pv->pv_pmap);
2352	}
2353	rw_wunlock(&pvh_global_lock);
2354	return (count);
2355}
2356
2357/*
2358 * Change wiring attribute for a map/virtual-address pair.
2359 */
2360static void
2361mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2362{
2363	pte_t *pte;
2364
2365	PMAP_LOCK(pmap);
2366	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2367		if (wired) {
2368			if (!PTE_ISWIRED(pte)) {
2369				pte->flags |= PTE_WIRED;
2370				pmap->pm_stats.wired_count++;
2371			}
2372		} else {
2373			if (PTE_ISWIRED(pte)) {
2374				pte->flags &= ~PTE_WIRED;
2375				pmap->pm_stats.wired_count--;
2376			}
2377		}
2378	}
2379	PMAP_UNLOCK(pmap);
2380}
2381
2382/*
2383 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2384 * page.  This count may be changed upwards or downwards in the future; it is
2385 * only necessary that true be returned for a small subset of pmaps for proper
2386 * page aging.
2387 */
2388static boolean_t
2389mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2390{
2391	pv_entry_t pv;
2392	int loops;
2393	boolean_t rv;
2394
2395	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2396	    ("mmu_booke_page_exists_quick: page %p is not managed", m));
2397	loops = 0;
2398	rv = FALSE;
2399	rw_wlock(&pvh_global_lock);
2400	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2401		if (pv->pv_pmap == pmap) {
2402			rv = TRUE;
2403			break;
2404		}
2405		if (++loops >= 16)
2406			break;
2407	}
2408	rw_wunlock(&pvh_global_lock);
2409	return (rv);
2410}
2411
2412/*
2413 * Return the number of managed mappings to the given physical page that are
2414 * wired.
2415 */
2416static int
2417mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2418{
2419	pv_entry_t pv;
2420	pte_t *pte;
2421	int count = 0;
2422
2423	if ((m->oflags & VPO_UNMANAGED) != 0)
2424		return (count);
2425	rw_wlock(&pvh_global_lock);
2426	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2427		PMAP_LOCK(pv->pv_pmap);
2428		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2429			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2430				count++;
2431		PMAP_UNLOCK(pv->pv_pmap);
2432	}
2433	rw_wunlock(&pvh_global_lock);
2434	return (count);
2435}
2436
2437static int
2438mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2439{
2440	int i;
2441	vm_offset_t va;
2442
2443	/*
2444	 * This currently does not work for entries that
2445	 * overlap TLB1 entries.
2446	 */
2447	for (i = 0; i < tlb1_idx; i ++) {
2448		if (tlb1_iomapped(i, pa, size, &va) == 0)
2449			return (0);
2450	}
2451
2452	return (EFAULT);
2453}
2454
2455vm_offset_t
2456mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2457    vm_size_t *sz)
2458{
2459	vm_paddr_t pa, ppa;
2460	vm_offset_t va;
2461	vm_size_t gran;
2462
2463	/* Raw physical memory dumps don't have a virtual address. */
2464	if (md->md_vaddr == ~0UL) {
2465		/* We always map a 256MB page at 256M. */
2466		gran = 256 * 1024 * 1024;
2467		pa = md->md_paddr + ofs;
2468		ppa = pa & ~(gran - 1);
2469		ofs = pa - ppa;
2470		va = gran;
2471		tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
2472		if (*sz > (gran - ofs))
2473			*sz = gran - ofs;
2474		return (va + ofs);
2475	}
2476
2477	/* Minidumps are based on virtual memory addresses. */
2478	va = md->md_vaddr + ofs;
2479	if (va >= kernstart + kernsize) {
2480		gran = PAGE_SIZE - (va & PAGE_MASK);
2481		if (*sz > gran)
2482			*sz = gran;
2483	}
2484	return (va);
2485}
2486
2487void
2488mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2489    vm_offset_t va)
2490{
2491
2492	/* Raw physical memory dumps don't have a virtual address. */
2493	if (md->md_vaddr == ~0UL) {
2494		tlb1_idx--;
2495		tlb1[tlb1_idx].mas1 = 0;
2496		tlb1[tlb1_idx].mas2 = 0;
2497		tlb1[tlb1_idx].mas3 = 0;
2498		tlb1_write_entry(tlb1_idx);
2499		return;
2500	}
2501
2502	/* Minidumps are based on virtual memory addresses. */
2503	/* Nothing to do... */
2504}
2505
2506struct pmap_md *
2507mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
2508{
2509	static struct pmap_md md;
2510	pte_t *pte;
2511	vm_offset_t va;
2512
2513	if (dumpsys_minidump) {
2514		md.md_paddr = ~0UL;	/* Minidumps use virtual addresses. */
2515		if (prev == NULL) {
2516			/* 1st: kernel .data and .bss. */
2517			md.md_index = 1;
2518			md.md_vaddr = trunc_page((uintptr_t)_etext);
2519			md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2520			return (&md);
2521		}
2522		switch (prev->md_index) {
2523		case 1:
2524			/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2525			md.md_index = 2;
2526			md.md_vaddr = data_start;
2527			md.md_size = data_end - data_start;
2528			break;
2529		case 2:
2530			/* 3rd: kernel VM. */
2531			va = prev->md_vaddr + prev->md_size;
2532			/* Find start of next chunk (from va). */
2533			while (va < virtual_end) {
2534				/* Don't dump the buffer cache. */
2535				if (va >= kmi.buffer_sva &&
2536				    va < kmi.buffer_eva) {
2537					va = kmi.buffer_eva;
2538					continue;
2539				}
2540				pte = pte_find(mmu, kernel_pmap, va);
2541				if (pte != NULL && PTE_ISVALID(pte))
2542					break;
2543				va += PAGE_SIZE;
2544			}
2545			if (va < virtual_end) {
2546				md.md_vaddr = va;
2547				va += PAGE_SIZE;
2548				/* Find last page in chunk. */
2549				while (va < virtual_end) {
2550					/* Don't run into the buffer cache. */
2551					if (va == kmi.buffer_sva)
2552						break;
2553					pte = pte_find(mmu, kernel_pmap, va);
2554					if (pte == NULL || !PTE_ISVALID(pte))
2555						break;
2556					va += PAGE_SIZE;
2557				}
2558				md.md_size = va - md.md_vaddr;
2559				break;
2560			}
2561			md.md_index = 3;
2562			/* FALLTHROUGH */
2563		default:
2564			return (NULL);
2565		}
2566	} else { /* minidumps */
2567		mem_regions(&physmem_regions, &physmem_regions_sz,
2568		    &availmem_regions, &availmem_regions_sz);
2569
2570		if (prev == NULL) {
2571			/* first physical chunk. */
2572			md.md_paddr = physmem_regions[0].mr_start;
2573			md.md_size = physmem_regions[0].mr_size;
2574			md.md_vaddr = ~0UL;
2575			md.md_index = 1;
2576		} else if (md.md_index < physmem_regions_sz) {
2577			md.md_paddr = physmem_regions[md.md_index].mr_start;
2578			md.md_size = physmem_regions[md.md_index].mr_size;
2579			md.md_vaddr = ~0UL;
2580			md.md_index++;
2581		} else {
2582			/* There's no next physical chunk. */
2583			return (NULL);
2584		}
2585	}
2586
2587	return (&md);
2588}
2589
2590/*
2591 * Map a set of physical memory pages into the kernel virtual address space.
2592 * Return a pointer to where it is mapped. This routine is intended to be used
2593 * for mapping device memory, NOT real memory.
2594 */
2595static void *
2596mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2597{
2598	void *res;
2599	uintptr_t va;
2600	vm_size_t sz;
2601
2602	/*
2603	 * CCSR is premapped. Note that (pa + size - 1) is there to make sure
2604	 * we don't wrap around. Devices on the local bus typically extend all
2605	 * the way up to and including 0xffffffff. In that case (pa + size)
2606	 * would be 0. This creates a false positive (i.e. we think it's
2607	 * within the CCSR) and not create a mapping.
2608	 */
2609	if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) {
2610		va = CCSRBAR_VA + (pa - ccsrbar_pa);
2611		return ((void *)va);
2612	}
2613
2614	va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2615	res = (void *)va;
2616	if (size < PAGE_SIZE)
2617	    size = PAGE_SIZE;
2618
2619	do {
2620		sz = 1 << (ilog2(size) & ~1);
2621		if (bootverbose)
2622			printf("Wiring VA=%x to PA=%x (size=%x), "
2623			    "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2624		tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
2625		size -= sz;
2626		pa += sz;
2627		va += sz;
2628	} while (size > 0);
2629
2630	return (res);
2631}
2632
2633/*
2634 * 'Unmap' a range mapped by mmu_booke_mapdev().
2635 */
2636static void
2637mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2638{
2639	vm_offset_t base, offset;
2640
2641	/*
2642	 * Unmap only if this is inside kernel virtual space.
2643	 */
2644	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2645		base = trunc_page(va);
2646		offset = va & PAGE_MASK;
2647		size = roundup(offset + size, PAGE_SIZE);
2648		kva_free(base, size);
2649	}
2650}
2651
2652/*
2653 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2654 * specified pmap. This eliminates the blast of soft faults on process startup
2655 * and immediately after an mmap.
2656 */
2657static void
2658mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2659    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2660{
2661
2662	VM_OBJECT_ASSERT_WLOCKED(object);
2663	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2664	    ("mmu_booke_object_init_pt: non-device object"));
2665}
2666
2667/*
2668 * Perform the pmap work for mincore.
2669 */
2670static int
2671mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2672    vm_paddr_t *locked_pa)
2673{
2674
2675	TODO;
2676	return (0);
2677}
2678
2679/**************************************************************************/
2680/* TID handling */
2681/**************************************************************************/
2682
2683/*
2684 * Allocate a TID. If necessary, steal one from someone else.
2685 * The new TID is flushed from the TLB before returning.
2686 */
2687static tlbtid_t
2688tid_alloc(pmap_t pmap)
2689{
2690	tlbtid_t tid;
2691	int thiscpu;
2692
2693	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2694
2695	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2696
2697	thiscpu = PCPU_GET(cpuid);
2698
2699	tid = PCPU_GET(tid_next);
2700	if (tid > TID_MAX)
2701		tid = TID_MIN;
2702	PCPU_SET(tid_next, tid + 1);
2703
2704	/* If we are stealing TID then clear the relevant pmap's field */
2705	if (tidbusy[thiscpu][tid] != NULL) {
2706
2707		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2708
2709		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2710
2711		/* Flush all entries from TLB0 matching this TID. */
2712		tid_flush(tid);
2713	}
2714
2715	tidbusy[thiscpu][tid] = pmap;
2716	pmap->pm_tid[thiscpu] = tid;
2717	__asm __volatile("msync; isync");
2718
2719	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2720	    PCPU_GET(tid_next));
2721
2722	return (tid);
2723}
2724
2725/**************************************************************************/
2726/* TLB0 handling */
2727/**************************************************************************/
2728
2729static void
2730tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2731    uint32_t mas7)
2732{
2733	int as;
2734	char desc[3];
2735	tlbtid_t tid;
2736	vm_size_t size;
2737	unsigned int tsize;
2738
2739	desc[2] = '\0';
2740	if (mas1 & MAS1_VALID)
2741		desc[0] = 'V';
2742	else
2743		desc[0] = ' ';
2744
2745	if (mas1 & MAS1_IPROT)
2746		desc[1] = 'P';
2747	else
2748		desc[1] = ' ';
2749
2750	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2751	tid = MAS1_GETTID(mas1);
2752
2753	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2754	size = 0;
2755	if (tsize)
2756		size = tsize2size(tsize);
2757
2758	debugf("%3d: (%s) [AS=%d] "
2759	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2760	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2761	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2762}
2763
2764/* Convert TLB0 va and way number to tlb0[] table index. */
2765static inline unsigned int
2766tlb0_tableidx(vm_offset_t va, unsigned int way)
2767{
2768	unsigned int idx;
2769
2770	idx = (way * TLB0_ENTRIES_PER_WAY);
2771	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2772	return (idx);
2773}
2774
2775/*
2776 * Invalidate TLB0 entry.
2777 */
2778static inline void
2779tlb0_flush_entry(vm_offset_t va)
2780{
2781
2782	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2783
2784	mtx_assert(&tlbivax_mutex, MA_OWNED);
2785
2786	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2787	__asm __volatile("isync; msync");
2788	__asm __volatile("tlbsync; msync");
2789
2790	CTR1(KTR_PMAP, "%s: e", __func__);
2791}
2792
2793/* Print out contents of the MAS registers for each TLB0 entry */
2794void
2795tlb0_print_tlbentries(void)
2796{
2797	uint32_t mas0, mas1, mas2, mas3, mas7;
2798	int entryidx, way, idx;
2799
2800	debugf("TLB0 entries:\n");
2801	for (way = 0; way < TLB0_WAYS; way ++)
2802		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2803
2804			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2805			mtspr(SPR_MAS0, mas0);
2806			__asm __volatile("isync");
2807
2808			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2809			mtspr(SPR_MAS2, mas2);
2810
2811			__asm __volatile("isync; tlbre");
2812
2813			mas1 = mfspr(SPR_MAS1);
2814			mas2 = mfspr(SPR_MAS2);
2815			mas3 = mfspr(SPR_MAS3);
2816			mas7 = mfspr(SPR_MAS7);
2817
2818			idx = tlb0_tableidx(mas2, way);
2819			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2820		}
2821}
2822
2823/**************************************************************************/
2824/* TLB1 handling */
2825/**************************************************************************/
2826
2827/*
2828 * TLB1 mapping notes:
2829 *
2830 * TLB1[0]	CCSRBAR
2831 * TLB1[1]	Kernel text and data.
2832 * TLB1[2-15]	Additional kernel text and data mappings (if required), PCI
2833 *		windows, other devices mappings.
2834 */
2835
2836/*
2837 * Write given entry to TLB1 hardware.
2838 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2839 */
2840static void
2841tlb1_write_entry(unsigned int idx)
2842{
2843	uint32_t mas0, mas7;
2844
2845	//debugf("tlb1_write_entry: s\n");
2846
2847	/* Clear high order RPN bits */
2848	mas7 = 0;
2849
2850	/* Select entry */
2851	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2852	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2853
2854	mtspr(SPR_MAS0, mas0);
2855	__asm __volatile("isync");
2856	mtspr(SPR_MAS1, tlb1[idx].mas1);
2857	__asm __volatile("isync");
2858	mtspr(SPR_MAS2, tlb1[idx].mas2);
2859	__asm __volatile("isync");
2860	mtspr(SPR_MAS3, tlb1[idx].mas3);
2861	__asm __volatile("isync");
2862	mtspr(SPR_MAS7, mas7);
2863	__asm __volatile("isync; tlbwe; isync; msync");
2864
2865	//debugf("tlb1_write_entry: e\n");
2866}
2867
2868/*
2869 * Return the largest uint value log such that 2^log <= num.
2870 */
2871static unsigned int
2872ilog2(unsigned int num)
2873{
2874	int lz;
2875
2876	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2877	return (31 - lz);
2878}
2879
2880/*
2881 * Convert TLB TSIZE value to mapped region size.
2882 */
2883static vm_size_t
2884tsize2size(unsigned int tsize)
2885{
2886
2887	/*
2888	 * size = 4^tsize KB
2889	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2890	 */
2891
2892	return ((1 << (2 * tsize)) * 1024);
2893}
2894
2895/*
2896 * Convert region size (must be power of 4) to TLB TSIZE value.
2897 */
2898static unsigned int
2899size2tsize(vm_size_t size)
2900{
2901
2902	return (ilog2(size) / 2 - 5);
2903}
2904
2905/*
2906 * Register permanent kernel mapping in TLB1.
2907 *
2908 * Entries are created starting from index 0 (current free entry is
2909 * kept in tlb1_idx) and are not supposed to be invalidated.
2910 */
2911static int
2912tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2913    uint32_t flags)
2914{
2915	uint32_t ts, tid;
2916	int tsize;
2917
2918	if (tlb1_idx >= TLB1_ENTRIES) {
2919		printf("tlb1_set_entry: TLB1 full!\n");
2920		return (-1);
2921	}
2922
2923	/* Convert size to TSIZE */
2924	tsize = size2tsize(size);
2925
2926	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2927	/* XXX TS is hard coded to 0 for now as we only use single address space */
2928	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2929
2930	/* XXX LOCK tlb1[] */
2931
2932	tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2933	tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2934	tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
2935
2936	/* Set supervisor RWX permission bits */
2937	tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2938
2939	tlb1_write_entry(tlb1_idx++);
2940
2941	/* XXX UNLOCK tlb1[] */
2942
2943	/*
2944	 * XXX in general TLB1 updates should be propagated between CPUs,
2945	 * since current design assumes to have the same TLB1 set-up on all
2946	 * cores.
2947	 */
2948	return (0);
2949}
2950
2951/*
2952 * Map in contiguous RAM region into the TLB1 using maximum of
2953 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2954 *
2955 * If necessary round up last entry size and return total size
2956 * used by all allocated entries.
2957 */
2958vm_size_t
2959tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
2960{
2961	vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
2962	vm_size_t mapped, pgsz, base, mask;
2963	int idx, nents;
2964
2965	/* Round up to the next 1M */
2966	size = (size + (1 << 20) - 1) & ~((1 << 20) - 1);
2967
2968	mapped = 0;
2969	idx = 0;
2970	base = va;
2971	pgsz = 64*1024*1024;
2972	while (mapped < size) {
2973		while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
2974			while (pgsz > (size - mapped))
2975				pgsz >>= 2;
2976			pgs[idx++] = pgsz;
2977			mapped += pgsz;
2978		}
2979
2980		/* We under-map. Correct for this. */
2981		if (mapped < size) {
2982			while (pgs[idx - 1] == pgsz) {
2983				idx--;
2984				mapped -= pgsz;
2985			}
2986			/* XXX We may increase beyond out starting point. */
2987			pgsz <<= 2;
2988			pgs[idx++] = pgsz;
2989			mapped += pgsz;
2990		}
2991	}
2992
2993	nents = idx;
2994	mask = pgs[0] - 1;
2995	/* Align address to the boundary */
2996	if (va & mask) {
2997		va = (va + mask) & ~mask;
2998		pa = (pa + mask) & ~mask;
2999	}
3000
3001	for (idx = 0; idx < nents; idx++) {
3002		pgsz = pgs[idx];
3003		debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz);
3004		tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM);
3005		pa += pgsz;
3006		va += pgsz;
3007	}
3008
3009	mapped = (va - base);
3010	debugf("mapped size 0x%08x (wasted space 0x%08x)\n",
3011	    mapped, mapped - size);
3012	return (mapped);
3013}
3014
3015/*
3016 * TLB1 initialization routine, to be called after the very first
3017 * assembler level setup done in locore.S.
3018 */
3019void
3020tlb1_init(vm_offset_t ccsrbar)
3021{
3022	uint32_t mas0, mas1, mas3;
3023	uint32_t tsz;
3024	u_int i;
3025
3026	ccsrbar_pa = ccsrbar;
3027
3028	if (bootinfo != NULL && bootinfo[0] != 1) {
3029		tlb1_idx = *((uint16_t *)(bootinfo + 8));
3030	} else
3031		tlb1_idx = 1;
3032
3033	/* The first entry/entries are used to map the kernel. */
3034	for (i = 0; i < tlb1_idx; i++) {
3035		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3036		mtspr(SPR_MAS0, mas0);
3037		__asm __volatile("isync; tlbre");
3038
3039		mas1 = mfspr(SPR_MAS1);
3040		if ((mas1 & MAS1_VALID) == 0)
3041			continue;
3042
3043		mas3 = mfspr(SPR_MAS3);
3044
3045		tlb1[i].mas1 = mas1;
3046		tlb1[i].mas2 = mfspr(SPR_MAS2);
3047		tlb1[i].mas3 = mas3;
3048
3049		if (i == 0)
3050			kernload = mas3 & MAS3_RPN;
3051
3052		tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3053		kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
3054	}
3055
3056	/* Map in CCSRBAR. */
3057	tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
3058
3059#ifdef SMP
3060	bp_ntlb1s = tlb1_idx;
3061#endif
3062
3063	/* Purge the remaining entries */
3064	for (i = tlb1_idx; i < TLB1_ENTRIES; i++)
3065		tlb1_write_entry(i);
3066
3067	/* Setup TLB miss defaults */
3068	set_mas4_defaults();
3069}
3070
3071/*
3072 * Setup MAS4 defaults.
3073 * These values are loaded to MAS0-2 on a TLB miss.
3074 */
3075static void
3076set_mas4_defaults(void)
3077{
3078	uint32_t mas4;
3079
3080	/* Defaults: TLB0, PID0, TSIZED=4K */
3081	mas4 = MAS4_TLBSELD0;
3082	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3083#ifdef SMP
3084	mas4 |= MAS4_MD;
3085#endif
3086	mtspr(SPR_MAS4, mas4);
3087	__asm __volatile("isync");
3088}
3089
3090/*
3091 * Print out contents of the MAS registers for each TLB1 entry
3092 */
3093void
3094tlb1_print_tlbentries(void)
3095{
3096	uint32_t mas0, mas1, mas2, mas3, mas7;
3097	int i;
3098
3099	debugf("TLB1 entries:\n");
3100	for (i = 0; i < TLB1_ENTRIES; i++) {
3101
3102		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3103		mtspr(SPR_MAS0, mas0);
3104
3105		__asm __volatile("isync; tlbre");
3106
3107		mas1 = mfspr(SPR_MAS1);
3108		mas2 = mfspr(SPR_MAS2);
3109		mas3 = mfspr(SPR_MAS3);
3110		mas7 = mfspr(SPR_MAS7);
3111
3112		tlb_print_entry(i, mas1, mas2, mas3, mas7);
3113	}
3114}
3115
3116/*
3117 * Print out contents of the in-ram tlb1 table.
3118 */
3119void
3120tlb1_print_entries(void)
3121{
3122	int i;
3123
3124	debugf("tlb1[] table entries:\n");
3125	for (i = 0; i < TLB1_ENTRIES; i++)
3126		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3127}
3128
3129/*
3130 * Return 0 if the physical IO range is encompassed by one of the
3131 * the TLB1 entries, otherwise return related error code.
3132 */
3133static int
3134tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3135{
3136	uint32_t prot;
3137	vm_paddr_t pa_start;
3138	vm_paddr_t pa_end;
3139	unsigned int entry_tsize;
3140	vm_size_t entry_size;
3141
3142	*va = (vm_offset_t)NULL;
3143
3144	/* Skip invalid entries */
3145	if (!(tlb1[i].mas1 & MAS1_VALID))
3146		return (EINVAL);
3147
3148	/*
3149	 * The entry must be cache-inhibited, guarded, and r/w
3150	 * so it can function as an i/o page
3151	 */
3152	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3153	if (prot != (MAS2_I | MAS2_G))
3154		return (EPERM);
3155
3156	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3157	if (prot != (MAS3_SR | MAS3_SW))
3158		return (EPERM);
3159
3160	/* The address should be within the entry range. */
3161	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3162	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3163
3164	entry_size = tsize2size(entry_tsize);
3165	pa_start = tlb1[i].mas3 & MAS3_RPN;
3166	pa_end = pa_start + entry_size - 1;
3167
3168	if ((pa < pa_start) || ((pa + size) > pa_end))
3169		return (ERANGE);
3170
3171	/* Return virtual address of this mapping. */
3172	*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
3173	return (0);
3174}
3175