1178172Simp/*
2178172Simp * Copyright (c) 1991 Regents of the University of California.
3178172Simp * All rights reserved.
4178172Simp * Copyright (c) 1994 John S. Dyson
5178172Simp * All rights reserved.
6178172Simp * Copyright (c) 1994 David Greenman
7178172Simp * All rights reserved.
8178172Simp *
9178172Simp * This code is derived from software contributed to Berkeley by
10178172Simp * the Systems Programming Group of the University of Utah Computer
11178172Simp * Science Department and William Jolitz of UUNET Technologies Inc.
12178172Simp *
13178172Simp * Redistribution and use in source and binary forms, with or without
14178172Simp * modification, are permitted provided that the following conditions
15178172Simp * are met:
16178172Simp * 1. Redistributions of source code must retain the above copyright
17178172Simp *    notice, this list of conditions and the following disclaimer.
18178172Simp * 2. Redistributions in binary form must reproduce the above copyright
19178172Simp *    notice, this list of conditions and the following disclaimer in the
20178172Simp *    documentation and/or other materials provided with the distribution.
21178172Simp * 4. Neither the name of the University nor the names of its contributors
22178172Simp *    may be used to endorse or promote products derived from this software
23178172Simp *    without specific prior written permission.
24178172Simp *
25178172Simp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28178172Simp * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29178172Simp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35178172Simp * SUCH DAMAGE.
36178172Simp *
37178172Simp *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38178172Simp *	from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39178172Simp *	JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
40178172Simp */
41178172Simp
42178172Simp/*
43178172Simp *	Manages physical address maps.
44178172Simp *
45178172Simp *	Since the information managed by this module is
46178172Simp *	also stored by the logical address mapping module,
47178172Simp *	this module may throw away valid virtual-to-physical
48178172Simp *	mappings at almost any time.  However, invalidations
49178172Simp *	of virtual-to-physical mappings must be done as
50178172Simp *	requested.
51178172Simp *
52178172Simp *	In order to cope with hardware architectures which
53178172Simp *	make virtual-to-physical map invalidates expensive,
54178172Simp *	this module may delay invalidate or reduced protection
55178172Simp *	operations until such time as they are actually
56178172Simp *	necessary.  This module is given full information as
57178172Simp *	to which processors are currently using which maps,
58178172Simp *	and to when physical maps must be made correct.
59178172Simp */
60178172Simp
61178172Simp#include <sys/cdefs.h>
62178172Simp__FBSDID("$FreeBSD$");
63178172Simp
64210846Sjchandra#include "opt_ddb.h"
65239236Salc#include "opt_pmap.h"
66210846Sjchandra
67178172Simp#include <sys/param.h>
68178172Simp#include <sys/systm.h>
69239317Salc#include <sys/lock.h>
70239317Salc#include <sys/mman.h>
71239317Salc#include <sys/msgbuf.h>
72239317Salc#include <sys/mutex.h>
73239317Salc#include <sys/pcpu.h>
74178172Simp#include <sys/proc.h>
75239317Salc#include <sys/rwlock.h>
76239317Salc#include <sys/sched.h>
77239317Salc#ifdef SMP
78205064Sneel#include <sys/smp.h>
79239317Salc#else
80239317Salc#include <sys/cpuset.h>
81239317Salc#endif
82239236Salc#include <sys/sysctl.h>
83239317Salc#include <sys/vmmeter.h>
84239317Salc
85210846Sjchandra#ifdef DDB
86210846Sjchandra#include <ddb/ddb.h>
87210846Sjchandra#endif
88178172Simp
89178172Simp#include <vm/vm.h>
90178172Simp#include <vm/vm_param.h>
91178172Simp#include <vm/vm_kern.h>
92178172Simp#include <vm/vm_page.h>
93178172Simp#include <vm/vm_map.h>
94178172Simp#include <vm/vm_object.h>
95178172Simp#include <vm/vm_extern.h>
96178172Simp#include <vm/vm_pageout.h>
97178172Simp#include <vm/vm_pager.h>
98178172Simp#include <vm/uma.h>
99178172Simp
100178172Simp#include <machine/cache.h>
101178172Simp#include <machine/md_var.h>
102209243Sjchandra#include <machine/tlb.h>
103178172Simp
104187301Sgonzo#undef PMAP_DEBUG
105187301Sgonzo
106211958Sjchandra#if !defined(DIAGNOSTIC)
107178172Simp#define	PMAP_INLINE __inline
108178172Simp#else
109178172Simp#define	PMAP_INLINE
110178172Simp#endif
111178172Simp
112239236Salc#ifdef PV_STATS
113239236Salc#define PV_STAT(x)	do { x ; } while (0)
114239236Salc#else
115239236Salc#define PV_STAT(x)	do { } while (0)
116239236Salc#endif
117239236Salc
118178172Simp/*
119178172Simp * Get PDEs and PTEs for user/kernel address space
120178172Simp */
121210846Sjchandra#define	pmap_seg_index(v)	(((v) >> SEGSHIFT) & (NPDEPG - 1))
122210846Sjchandra#define	pmap_pde_index(v)	(((v) >> PDRSHIFT) & (NPDEPG - 1))
123210846Sjchandra#define	pmap_pte_index(v)	(((v) >> PAGE_SHIFT) & (NPTEPG - 1))
124210846Sjchandra#define	pmap_pde_pindex(v)	((v) >> PDRSHIFT)
125178172Simp
126210846Sjchandra#ifdef __mips_n64
127210846Sjchandra#define	NUPDE			(NPDEPG * NPDEPG)
128210846Sjchandra#define	NUSERPGTBLS		(NUPDE + NPDEPG)
129209930Sjchandra#else
130210846Sjchandra#define	NUPDE			(NPDEPG)
131210846Sjchandra#define	NUSERPGTBLS		(NUPDE)
132209930Sjchandra#endif
133210846Sjchandra
134178172Simp#define	is_kernel_pmap(x)	((x) == kernel_pmap)
135178172Simp
136191735Salcstruct pmap kernel_pmap_store;
137178172Simppd_entry_t *kernel_segmap;
138178172Simp
139178172Simpvm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
140178172Simpvm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
141178172Simp
142178172Simpstatic int nkpt;
143178172Simpunsigned pmap_max_asid;		/* max ASID supported by the system */
144178172Simp
145178172Simp#define	PMAP_ASID_RESERVED	0
146178172Simp
147210846Sjchandravm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
148178172Simp
149178172Simpstatic void pmap_asid_alloc(pmap_t pmap);
150178172Simp
151242534Sattiliostatic struct rwlock_padalign pvh_global_lock;
152239317Salc
153239317Salc/*
154178172Simp * Data for the pv entry allocation mechanism
155178172Simp */
156239236Salcstatic TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
157239236Salcstatic int pv_entry_count;
158178172Simp
159239236Salcstatic void free_pv_chunk(struct pv_chunk *pc);
160239236Salcstatic void free_pv_entry(pmap_t pmap, pv_entry_t pv);
161239236Salcstatic pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
162239236Salcstatic vm_page_t pmap_pv_reclaim(pmap_t locked_pmap);
163208665Salcstatic void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
164208665Salcstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
165208665Salc    vm_offset_t va);
166243030Salcstatic vm_page_t pmap_alloc_direct_page(unsigned int index, int req);
167191300Salcstatic vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
168191300Salc    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
169239152Salcstatic int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
170239152Salc    pd_entry_t pde);
171178172Simpstatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
172178172Simpstatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
173191300Salcstatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
174191300Salc    vm_offset_t va, vm_page_t m);
175211217Sjchandrastatic void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
176211215Sjchandrastatic void pmap_invalidate_all(pmap_t pmap);
177211215Sjchandrastatic void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
178240126Salcstatic void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
179178172Simp
180178172Simpstatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
181178172Simpstatic vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
182239152Salcstatic int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
183239681Salcstatic pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
184178172Simp
185178172Simpstatic void pmap_invalidate_page_action(void *arg);
186241123Salcstatic void pmap_invalidate_range_action(void *arg);
187178172Simpstatic void pmap_update_page_action(void *arg);
188178172Simp
189211453Sjchandra#ifndef __mips_n64
190211453Sjchandra/*
191216157Sjchandra * This structure is for high memory (memory above 512Meg in 32 bit) support.
192216157Sjchandra * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
193216157Sjchandra * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
194211453Sjchandra *
195216157Sjchandra * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
196216157Sjchandra * access a highmem physical address on a CPU, we map the physical address to
197216157Sjchandra * the reserved virtual address for the CPU in the kernel pagetable.  This is
198216157Sjchandra * done with interrupts disabled(although a spinlock and sched_pin would be
199216157Sjchandra * sufficient).
200211453Sjchandra */
201178172Simpstruct local_sysmaps {
202211453Sjchandra	vm_offset_t	base;
203211453Sjchandra	uint32_t	saved_intr;
204211453Sjchandra	uint16_t	valid1, valid2;
205178172Simp};
206178172Simpstatic struct local_sysmaps sysmap_lmem[MAXCPU];
207178172Simp
208211453Sjchandrastatic __inline void
209211453Sjchandrapmap_alloc_lmem_map(void)
210211453Sjchandra{
211211453Sjchandra	int i;
212206717Sjmallett
213211453Sjchandra	for (i = 0; i < MAXCPU; i++) {
214211453Sjchandra		sysmap_lmem[i].base = virtual_avail;
215211453Sjchandra		virtual_avail += PAGE_SIZE * 2;
216211453Sjchandra		sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
217211453Sjchandra	}
218211453Sjchandra}
219211453Sjchandra
220211453Sjchandrastatic __inline vm_offset_t
221211453Sjchandrapmap_lmem_map1(vm_paddr_t phys)
222211453Sjchandra{
223211453Sjchandra	struct local_sysmaps *sysm;
224211453Sjchandra	pt_entry_t *pte, npte;
225211453Sjchandra	vm_offset_t va;
226211453Sjchandra	uint32_t intr;
227211453Sjchandra	int cpu;
228211453Sjchandra
229211453Sjchandra	intr = intr_disable();
230211453Sjchandra	cpu = PCPU_GET(cpuid);
231211453Sjchandra	sysm = &sysmap_lmem[cpu];
232211453Sjchandra	sysm->saved_intr = intr;
233211453Sjchandra	va = sysm->base;
234241287Salc	npte = TLBLO_PA_TO_PFN(phys) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
235211453Sjchandra	pte = pmap_pte(kernel_pmap, va);
236211453Sjchandra	*pte = npte;
237211453Sjchandra	sysm->valid1 = 1;
238211453Sjchandra	return (va);
239211453Sjchandra}
240211453Sjchandra
241211453Sjchandrastatic __inline vm_offset_t
242211453Sjchandrapmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
243211453Sjchandra{
244211453Sjchandra	struct local_sysmaps *sysm;
245211453Sjchandra	pt_entry_t *pte, npte;
246211453Sjchandra	vm_offset_t va1, va2;
247211453Sjchandra	uint32_t intr;
248211453Sjchandra	int cpu;
249211453Sjchandra
250211453Sjchandra	intr = intr_disable();
251211453Sjchandra	cpu = PCPU_GET(cpuid);
252211453Sjchandra	sysm = &sysmap_lmem[cpu];
253211453Sjchandra	sysm->saved_intr = intr;
254211453Sjchandra	va1 = sysm->base;
255211453Sjchandra	va2 = sysm->base + PAGE_SIZE;
256241287Salc	npte = TLBLO_PA_TO_PFN(phys1) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
257211453Sjchandra	pte = pmap_pte(kernel_pmap, va1);
258211453Sjchandra	*pte = npte;
259241287Salc	npte = TLBLO_PA_TO_PFN(phys2) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
260211453Sjchandra	pte = pmap_pte(kernel_pmap, va2);
261211453Sjchandra	*pte = npte;
262211453Sjchandra	sysm->valid1 = 1;
263206717Sjmallett	sysm->valid2 = 1;
264211453Sjchandra	return (va1);
265211453Sjchandra}
266206717Sjmallett
267211453Sjchandrastatic __inline void
268211453Sjchandrapmap_lmem_unmap(void)
269211453Sjchandra{
270211453Sjchandra	struct local_sysmaps *sysm;
271211453Sjchandra	pt_entry_t *pte;
272211453Sjchandra	int cpu;
273206717Sjmallett
274211453Sjchandra	cpu = PCPU_GET(cpuid);
275211453Sjchandra	sysm = &sysmap_lmem[cpu];
276211453Sjchandra	pte = pmap_pte(kernel_pmap, sysm->base);
277211453Sjchandra	*pte = PTE_G;
278211453Sjchandra	tlb_invalidate_address(kernel_pmap, sysm->base);
279211453Sjchandra	sysm->valid1 = 0;
280211453Sjchandra	if (sysm->valid2) {
281211453Sjchandra		pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
282211453Sjchandra		*pte = PTE_G;
283211453Sjchandra		tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
284211453Sjchandra		sysm->valid2 = 0;
285211453Sjchandra	}
286211453Sjchandra	intr_restore(sysm->saved_intr);
287211453Sjchandra}
288211453Sjchandra#else  /* __mips_n64 */
289211453Sjchandra
290211453Sjchandrastatic __inline void
291211453Sjchandrapmap_alloc_lmem_map(void)
292211453Sjchandra{
293211453Sjchandra}
294211453Sjchandra
295211453Sjchandrastatic __inline vm_offset_t
296211453Sjchandrapmap_lmem_map1(vm_paddr_t phys)
297211453Sjchandra{
298211453Sjchandra
299211453Sjchandra	return (0);
300211453Sjchandra}
301211453Sjchandra
302211453Sjchandrastatic __inline vm_offset_t
303211453Sjchandrapmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
304211453Sjchandra{
305211453Sjchandra
306211453Sjchandra	return (0);
307211453Sjchandra}
308211453Sjchandra
309211453Sjchandrastatic __inline vm_offset_t
310211453Sjchandrapmap_lmem_unmap(void)
311211453Sjchandra{
312211453Sjchandra
313211453Sjchandra	return (0);
314211453Sjchandra}
315211453Sjchandra#endif /* !__mips_n64 */
316211453Sjchandra
317210846Sjchandra/*
318210846Sjchandra * Page table entry lookup routines.
319210846Sjchandra */
320210846Sjchandrastatic __inline pd_entry_t *
321178172Simppmap_segmap(pmap_t pmap, vm_offset_t va)
322178172Simp{
323211445Sjchandra
324210846Sjchandra	return (&pmap->pm_segtab[pmap_seg_index(va)]);
325210846Sjchandra}
326210846Sjchandra
327210846Sjchandra#ifdef __mips_n64
328210846Sjchandrastatic __inline pd_entry_t *
329210846Sjchandrapmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
330210846Sjchandra{
331210846Sjchandra	pd_entry_t *pde;
332210846Sjchandra
333210846Sjchandra	pde = (pd_entry_t *)*pdpe;
334210846Sjchandra	return (&pde[pmap_pde_index(va)]);
335210846Sjchandra}
336210846Sjchandra
337210846Sjchandrastatic __inline pd_entry_t *
338210846Sjchandrapmap_pde(pmap_t pmap, vm_offset_t va)
339210846Sjchandra{
340210846Sjchandra	pd_entry_t *pdpe;
341210846Sjchandra
342210846Sjchandra	pdpe = pmap_segmap(pmap, va);
343240185Salc	if (*pdpe == NULL)
344209805Sjchandra		return (NULL);
345210846Sjchandra
346210846Sjchandra	return (pmap_pdpe_to_pde(pdpe, va));
347178172Simp}
348210846Sjchandra#else
349210846Sjchandrastatic __inline pd_entry_t *
350210846Sjchandrapmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
351210846Sjchandra{
352211445Sjchandra
353211445Sjchandra	return (pdpe);
354210846Sjchandra}
355178172Simp
356210846Sjchandrastatic __inline
357210846Sjchandrapd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
358210846Sjchandra{
359211445Sjchandra
360211445Sjchandra	return (pmap_segmap(pmap, va));
361210846Sjchandra}
362210846Sjchandra#endif
363210846Sjchandra
364210846Sjchandrastatic __inline pt_entry_t *
365210846Sjchandrapmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
366210846Sjchandra{
367210846Sjchandra	pt_entry_t *pte;
368210846Sjchandra
369210846Sjchandra	pte = (pt_entry_t *)*pde;
370210846Sjchandra	return (&pte[pmap_pte_index(va)]);
371210846Sjchandra}
372210846Sjchandra
373178172Simppt_entry_t *
374178172Simppmap_pte(pmap_t pmap, vm_offset_t va)
375178172Simp{
376210846Sjchandra	pd_entry_t *pde;
377178172Simp
378210846Sjchandra	pde = pmap_pde(pmap, va);
379210846Sjchandra	if (pde == NULL || *pde == NULL)
380210846Sjchandra		return (NULL);
381210846Sjchandra
382210846Sjchandra	return (pmap_pde_to_pte(pde, va));
383178172Simp}
384178172Simp
385178172Simpvm_offset_t
386178172Simppmap_steal_memory(vm_size_t size)
387178172Simp{
388219106Sjchandra	vm_paddr_t bank_size, pa;
389219106Sjchandra	vm_offset_t va;
390178172Simp
391178172Simp	size = round_page(size);
392178172Simp	bank_size = phys_avail[1] - phys_avail[0];
393178172Simp	while (size > bank_size) {
394178172Simp		int i;
395178172Simp
396178172Simp		for (i = 0; phys_avail[i + 2]; i += 2) {
397178172Simp			phys_avail[i] = phys_avail[i + 2];
398178172Simp			phys_avail[i + 1] = phys_avail[i + 3];
399178172Simp		}
400178172Simp		phys_avail[i] = 0;
401178172Simp		phys_avail[i + 1] = 0;
402178172Simp		if (!phys_avail[0])
403178172Simp			panic("pmap_steal_memory: out of memory");
404178172Simp		bank_size = phys_avail[1] - phys_avail[0];
405178172Simp	}
406178172Simp
407178172Simp	pa = phys_avail[0];
408178172Simp	phys_avail[0] += size;
409211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa) == 0)
410178172Simp		panic("Out of memory below 512Meg?");
411211453Sjchandra	va = MIPS_PHYS_TO_DIRECT(pa);
412178172Simp	bzero((caddr_t)va, size);
413211445Sjchandra	return (va);
414178172Simp}
415178172Simp
416178172Simp/*
417209930Sjchandra * Bootstrap the system enough to run with virtual memory.  This
418178172Simp * assumes that the phys_avail array has been initialized.
419178172Simp */
420210846Sjchandrastatic void
421210846Sjchandrapmap_create_kernel_pagetable(void)
422210846Sjchandra{
423210846Sjchandra	int i, j;
424210846Sjchandra	vm_offset_t ptaddr;
425210846Sjchandra	pt_entry_t *pte;
426210846Sjchandra#ifdef __mips_n64
427210846Sjchandra	pd_entry_t *pde;
428210846Sjchandra	vm_offset_t pdaddr;
429210846Sjchandra	int npt, npde;
430210846Sjchandra#endif
431210846Sjchandra
432210846Sjchandra	/*
433210846Sjchandra	 * Allocate segment table for the kernel
434210846Sjchandra	 */
435210846Sjchandra	kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
436210846Sjchandra
437210846Sjchandra	/*
438210846Sjchandra	 * Allocate second level page tables for the kernel
439210846Sjchandra	 */
440210846Sjchandra#ifdef __mips_n64
441210846Sjchandra	npde = howmany(NKPT, NPDEPG);
442210846Sjchandra	pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
443210846Sjchandra#endif
444210846Sjchandra	nkpt = NKPT;
445210846Sjchandra	ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
446210846Sjchandra
447210846Sjchandra	/*
448210846Sjchandra	 * The R[4-7]?00 stores only one copy of the Global bit in the
449210846Sjchandra	 * translation lookaside buffer for each 2 page entry. Thus invalid
450210846Sjchandra	 * entrys must have the Global bit set so when Entry LO and Entry HI
451210846Sjchandra	 * G bits are anded together they will produce a global bit to store
452210846Sjchandra	 * in the tlb.
453210846Sjchandra	 */
454210846Sjchandra	for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
455210846Sjchandra		*pte = PTE_G;
456210846Sjchandra
457210846Sjchandra#ifdef __mips_n64
458210846Sjchandra	for (i = 0,  npt = nkpt; npt > 0; i++) {
459210846Sjchandra		kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
460210846Sjchandra		pde = (pd_entry_t *)kernel_segmap[i];
461210846Sjchandra
462210846Sjchandra		for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
463210846Sjchandra			pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
464210846Sjchandra	}
465210846Sjchandra#else
466210846Sjchandra	for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
467210846Sjchandra		kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
468210846Sjchandra#endif
469210846Sjchandra
470210846Sjchandra	PMAP_LOCK_INIT(kernel_pmap);
471210846Sjchandra	kernel_pmap->pm_segtab = kernel_segmap;
472222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
473239236Salc	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
474210846Sjchandra	kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
475210846Sjchandra	kernel_pmap->pm_asid[0].gen = 0;
476210846Sjchandra	kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
477210846Sjchandra}
478210846Sjchandra
479178172Simpvoid
480178172Simppmap_bootstrap(void)
481178172Simp{
482210846Sjchandra	int i;
483211453Sjchandra	int need_local_mappings = 0;
484178172Simp
485178172Simp	/* Sort. */
486178172Simpagain:
487178172Simp	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
488202046Simp		/*
489202046Simp		 * Keep the memory aligned on page boundary.
490202046Simp		 */
491202046Simp		phys_avail[i] = round_page(phys_avail[i]);
492202046Simp		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
493202046Simp
494178172Simp		if (i < 2)
495178172Simp			continue;
496178172Simp		if (phys_avail[i - 2] > phys_avail[i]) {
497178172Simp			vm_paddr_t ptemp[2];
498178172Simp
499178172Simp			ptemp[0] = phys_avail[i + 0];
500178172Simp			ptemp[1] = phys_avail[i + 1];
501178172Simp
502178172Simp			phys_avail[i + 0] = phys_avail[i - 2];
503178172Simp			phys_avail[i + 1] = phys_avail[i - 1];
504178172Simp
505178172Simp			phys_avail[i - 2] = ptemp[0];
506178172Simp			phys_avail[i - 1] = ptemp[1];
507178172Simp			goto again;
508178172Simp		}
509178172Simp	}
510178172Simp
511211453Sjchandra       	/*
512216157Sjchandra	 * In 32 bit, we may have memory which cannot be mapped directly.
513216157Sjchandra	 * This memory will need temporary mapping before it can be
514211453Sjchandra	 * accessed.
515211453Sjchandra	 */
516216157Sjchandra	if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
517211453Sjchandra		need_local_mappings = 1;
518209930Sjchandra
519202046Simp	/*
520202046Simp	 * Copy the phys_avail[] array before we start stealing memory from it.
521202046Simp	 */
522202046Simp	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
523202046Simp		physmem_desc[i] = phys_avail[i];
524202046Simp		physmem_desc[i + 1] = phys_avail[i + 1];
525202046Simp	}
526202046Simp
527202046Simp	Maxmem = atop(phys_avail[i - 1]);
528202046Simp
529178172Simp	if (bootverbose) {
530178172Simp		printf("Physical memory chunk(s):\n");
531178172Simp		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
532178172Simp			vm_paddr_t size;
533178172Simp
534178172Simp			size = phys_avail[i + 1] - phys_avail[i];
535178172Simp			printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
536178172Simp			    (uintmax_t) phys_avail[i],
537178172Simp			    (uintmax_t) phys_avail[i + 1] - 1,
538178172Simp			    (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
539178172Simp		}
540219106Sjchandra		printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
541178172Simp	}
542178172Simp	/*
543178172Simp	 * Steal the message buffer from the beginning of memory.
544178172Simp	 */
545217688Spluknet	msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
546217688Spluknet	msgbufinit(msgbufp, msgbufsize);
547178172Simp
548178172Simp	/*
549178172Simp	 * Steal thread0 kstack.
550178172Simp	 */
551178172Simp	kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
552178172Simp
553206716Sjmallett	virtual_avail = VM_MIN_KERNEL_ADDRESS;
554178172Simp	virtual_end = VM_MAX_KERNEL_ADDRESS;
555178172Simp
556203180Sneel#ifdef SMP
557178172Simp	/*
558203180Sneel	 * Steal some virtual address space to map the pcpu area.
559203180Sneel	 */
560203180Sneel	virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
561203180Sneel	pcpup = (struct pcpu *)virtual_avail;
562203180Sneel	virtual_avail += PAGE_SIZE * 2;
563203697Sneel
564203697Sneel	/*
565203697Sneel	 * Initialize the wired TLB entry mapping the pcpu region for
566203697Sneel	 * the BSP at 'pcpup'. Up until this point we were operating
567203697Sneel	 * with the 'pcpup' for the BSP pointing to a virtual address
568203697Sneel	 * in KSEG0 so there was no need for a TLB mapping.
569203697Sneel	 */
570203697Sneel	mips_pcpu_tlb_init(PCPU_ADDR(0));
571203697Sneel
572203180Sneel	if (bootverbose)
573203180Sneel		printf("pcpu is available at virtual address %p.\n", pcpup);
574203180Sneel#endif
575203180Sneel
576211453Sjchandra	if (need_local_mappings)
577211453Sjchandra		pmap_alloc_lmem_map();
578210846Sjchandra	pmap_create_kernel_pagetable();
579178172Simp	pmap_max_asid = VMNUM_PIDS;
580209243Sjchandra	mips_wr_entryhi(0);
581210846Sjchandra	mips_wr_pagemask(0);
582239317Salc
583239317Salc 	/*
584239317Salc	 * Initialize the global pv list lock.
585239317Salc	 */
586239317Salc	rw_init(&pvh_global_lock, "pmap pv global");
587178172Simp}
588178172Simp
589178172Simp/*
590178172Simp * Initialize a vm_page's machine-dependent fields.
591178172Simp */
592178172Simpvoid
593178172Simppmap_page_init(vm_page_t m)
594178172Simp{
595178172Simp
596178172Simp	TAILQ_INIT(&m->md.pv_list);
597178172Simp	m->md.pv_flags = 0;
598178172Simp}
599178172Simp
600178172Simp/*
601178172Simp *	Initialize the pmap module.
602178172Simp *	Called by vm_init, to initialize any structures that the pmap
603178172Simp *	system needs to map virtual memory.
604178172Simp */
605178172Simpvoid
606178172Simppmap_init(void)
607178172Simp{
608178172Simp}
609178172Simp
610178172Simp/***************************************************
611178172Simp * Low level helper routines.....
612178172Simp ***************************************************/
613178172Simp
614227623Sjchandra#ifdef	SMP
615211215Sjchandrastatic __inline void
616227623Sjchandrapmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
617211215Sjchandra{
618227623Sjchandra	int	cpuid, cpu, self;
619227623Sjchandra	cpuset_t active_cpus;
620211215Sjchandra
621227623Sjchandra	sched_pin();
622227623Sjchandra	if (is_kernel_pmap(pmap)) {
623227623Sjchandra		smp_rendezvous(NULL, fn, NULL, arg);
624227623Sjchandra		goto out;
625227623Sjchandra	}
626227623Sjchandra	/* Force ASID update on inactive CPUs */
627227623Sjchandra	CPU_FOREACH(cpu) {
628227623Sjchandra		if (!CPU_ISSET(cpu, &pmap->pm_active))
629227623Sjchandra			pmap->pm_asid[cpu].gen = 0;
630227623Sjchandra	}
631223758Sattilio	cpuid = PCPU_GET(cpuid);
632227623Sjchandra	/*
633227623Sjchandra	 * XXX: barrier/locking for active?
634227623Sjchandra	 *
635227623Sjchandra	 * Take a snapshot of active here, any further changes are ignored.
636227623Sjchandra	 * tlb update/invalidate should be harmless on inactive CPUs
637227623Sjchandra	 */
638227623Sjchandra	active_cpus = pmap->pm_active;
639227623Sjchandra	self = CPU_ISSET(cpuid, &active_cpus);
640227623Sjchandra	CPU_CLR(cpuid, &active_cpus);
641227623Sjchandra	/* Optimize for the case where this cpu is the only active one */
642227623Sjchandra	if (CPU_EMPTY(&active_cpus)) {
643227623Sjchandra		if (self)
644227623Sjchandra			fn(arg);
645227623Sjchandra	} else {
646227623Sjchandra		if (self)
647227623Sjchandra			CPU_SET(cpuid, &active_cpus);
648227623Sjchandra		smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg);
649227623Sjchandra	}
650227623Sjchandraout:
651227623Sjchandra	sched_unpin();
652227623Sjchandra}
653227623Sjchandra#else /* !SMP */
654227623Sjchandrastatic __inline void
655227623Sjchandrapmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
656227623Sjchandra{
657227623Sjchandra	int	cpuid;
658223758Sattilio
659227623Sjchandra	if (is_kernel_pmap(pmap)) {
660227623Sjchandra		fn(arg);
661211215Sjchandra		return;
662211215Sjchandra	}
663227623Sjchandra	cpuid = PCPU_GET(cpuid);
664227623Sjchandra	if (!CPU_ISSET(cpuid, &pmap->pm_active))
665227623Sjchandra		pmap->pm_asid[cpuid].gen = 0;
666223758Sattilio	else
667227623Sjchandra		fn(arg);
668211215Sjchandra}
669227623Sjchandra#endif /* SMP */
670211215Sjchandra
671178172Simpstatic void
672178172Simppmap_invalidate_all(pmap_t pmap)
673178172Simp{
674211215Sjchandra
675227623Sjchandra	pmap_call_on_active_cpus(pmap,
676227623Sjchandra	    (void (*)(void *))tlb_invalidate_all_user, pmap);
677178172Simp}
678178172Simp
679178172Simpstruct pmap_invalidate_page_arg {
680178172Simp	pmap_t pmap;
681178172Simp	vm_offset_t va;
682178172Simp};
683178172Simp
684211215Sjchandrastatic void
685178172Simppmap_invalidate_page_action(void *arg)
686178172Simp{
687211215Sjchandra	struct pmap_invalidate_page_arg *p = arg;
688178172Simp
689227623Sjchandra	tlb_invalidate_address(p->pmap, p->va);
690211215Sjchandra}
691227623Sjchandra
692211215Sjchandrastatic void
693211215Sjchandrapmap_invalidate_page(pmap_t pmap, vm_offset_t va)
694211215Sjchandra{
695227623Sjchandra	struct pmap_invalidate_page_arg arg;
696211215Sjchandra
697227623Sjchandra	arg.pmap = pmap;
698227623Sjchandra	arg.va = va;
699227623Sjchandra	pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
700211215Sjchandra}
701178172Simp
702241123Salcstruct pmap_invalidate_range_arg {
703241123Salc	pmap_t pmap;
704241123Salc	vm_offset_t sva;
705241123Salc	vm_offset_t eva;
706241123Salc};
707241123Salc
708241123Salcstatic void
709241123Salcpmap_invalidate_range_action(void *arg)
710241123Salc{
711241123Salc	struct pmap_invalidate_range_arg *p = arg;
712241123Salc
713241123Salc	tlb_invalidate_range(p->pmap, p->sva, p->eva);
714241123Salc}
715241123Salc
716241123Salcstatic void
717241123Salcpmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
718241123Salc{
719241123Salc	struct pmap_invalidate_range_arg arg;
720241123Salc
721241123Salc	arg.pmap = pmap;
722241123Salc	arg.sva = sva;
723241123Salc	arg.eva = eva;
724241123Salc	pmap_call_on_active_cpus(pmap, pmap_invalidate_range_action, &arg);
725241123Salc}
726241123Salc
727178172Simpstruct pmap_update_page_arg {
728178172Simp	pmap_t pmap;
729178172Simp	vm_offset_t va;
730178172Simp	pt_entry_t pte;
731178172Simp};
732178172Simp
733211217Sjchandrastatic void
734178172Simppmap_update_page_action(void *arg)
735178172Simp{
736211215Sjchandra	struct pmap_update_page_arg *p = arg;
737178172Simp
738227623Sjchandra	tlb_update(p->pmap, p->va, p->pte);
739178172Simp}
740227623Sjchandra
741211217Sjchandrastatic void
742211215Sjchandrapmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
743211215Sjchandra{
744227623Sjchandra	struct pmap_update_page_arg arg;
745178172Simp
746227623Sjchandra	arg.pmap = pmap;
747227623Sjchandra	arg.va = va;
748227623Sjchandra	arg.pte = pte;
749227623Sjchandra	pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg);
750211215Sjchandra}
751211215Sjchandra
752178172Simp/*
753178172Simp *	Routine:	pmap_extract
754178172Simp *	Function:
755178172Simp *		Extract the physical page address associated
756178172Simp *		with the given map/virtual_address pair.
757178172Simp */
758178172Simpvm_paddr_t
759178172Simppmap_extract(pmap_t pmap, vm_offset_t va)
760178172Simp{
761178172Simp	pt_entry_t *pte;
762178172Simp	vm_offset_t retval = 0;
763178172Simp
764178172Simp	PMAP_LOCK(pmap);
765178172Simp	pte = pmap_pte(pmap, va);
766178172Simp	if (pte) {
767209243Sjchandra		retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
768178172Simp	}
769178172Simp	PMAP_UNLOCK(pmap);
770211445Sjchandra	return (retval);
771178172Simp}
772178172Simp
773178172Simp/*
774178172Simp *	Routine:	pmap_extract_and_hold
775178172Simp *	Function:
776178172Simp *		Atomically extract and hold the physical page
777178172Simp *		with the given pmap and virtual address pair
778178172Simp *		if that mapping permits the given protection.
779178172Simp */
780178172Simpvm_page_t
781178172Simppmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
782178172Simp{
783241276Salc	pt_entry_t pte, *ptep;
784241276Salc	vm_paddr_t pa, pte_pa;
785178172Simp	vm_page_t m;
786178172Simp
787178172Simp	m = NULL;
788207410Skmacy	pa = 0;
789178172Simp	PMAP_LOCK(pmap);
790207410Skmacyretry:
791237566Sgonzo	ptep = pmap_pte(pmap, va);
792241276Salc	if (ptep != NULL) {
793241276Salc		pte = *ptep;
794241276Salc		if (pte_test(&pte, PTE_V) && (!pte_test(&pte, PTE_RO) ||
795241276Salc		    (prot & VM_PROT_WRITE) == 0)) {
796241276Salc			pte_pa = TLBLO_PTE_TO_PA(pte);
797241276Salc			if (vm_page_pa_tryrelock(pmap, pte_pa, &pa))
798241276Salc				goto retry;
799241276Salc			m = PHYS_TO_VM_PAGE(pte_pa);
800241276Salc			vm_page_hold(m);
801241276Salc		}
802178172Simp	}
803207410Skmacy	PA_UNLOCK_COND(pa);
804178172Simp	PMAP_UNLOCK(pmap);
805178172Simp	return (m);
806178172Simp}
807178172Simp
808178172Simp/***************************************************
809178172Simp * Low level mapping routines.....
810178172Simp ***************************************************/
811178172Simp
812178172Simp/*
813178172Simp * add a wired page to the kva
814178172Simp */
815212989Sneelvoid
816212589Sneelpmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
817178172Simp{
818209482Sjchandra	pt_entry_t *pte;
819209482Sjchandra	pt_entry_t opte, npte;
820178172Simp
821187301Sgonzo#ifdef PMAP_DEBUG
822209482Sjchandra	printf("pmap_kenter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
823187301Sgonzo#endif
824178172Simp
825178172Simp	pte = pmap_pte(kernel_pmap, va);
826178172Simp	opte = *pte;
827240317Salc	npte = TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G;
828178172Simp	*pte = npte;
829211216Sjchandra	if (pte_test(&opte, PTE_V) && opte != npte)
830211216Sjchandra		pmap_update_page(kernel_pmap, va, npte);
831178172Simp}
832178172Simp
833212589Sneelvoid
834212589Sneelpmap_kenter(vm_offset_t va, vm_paddr_t pa)
835212589Sneel{
836212589Sneel
837212989Sneel	KASSERT(is_cacheable_mem(pa),
838212989Sneel		("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
839212589Sneel
840212989Sneel	pmap_kenter_attr(va, pa, PTE_C_CACHE);
841212589Sneel}
842212589Sneel
843178172Simp/*
844178172Simp * remove a page from the kernel pagetables
845178172Simp */
846178172Simp /* PMAP_INLINE */ void
847178172Simppmap_kremove(vm_offset_t va)
848178172Simp{
849209482Sjchandra	pt_entry_t *pte;
850178172Simp
851202046Simp	/*
852202046Simp	 * Write back all caches from the page being destroyed
853202046Simp	 */
854206746Sjmallett	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
855202046Simp
856178172Simp	pte = pmap_pte(kernel_pmap, va);
857178172Simp	*pte = PTE_G;
858178172Simp	pmap_invalidate_page(kernel_pmap, va);
859178172Simp}
860178172Simp
861178172Simp/*
862178172Simp *	Used to map a range of physical addresses into kernel
863178172Simp *	virtual address space.
864178172Simp *
865178172Simp *	The value passed in '*virt' is a suggested virtual address for
866178172Simp *	the mapping. Architectures which can support a direct-mapped
867178172Simp *	physical to virtual region can return the appropriate address
868178172Simp *	within that region, leaving '*virt' unchanged. Other
869178172Simp *	architectures should map the pages starting at '*virt' and
870178172Simp *	update '*virt' with the first usable address after the mapped
871178172Simp *	region.
872209930Sjchandra *
873209930Sjchandra *	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
874178172Simp */
875178172Simpvm_offset_t
876217345Sjchandrapmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
877178172Simp{
878178172Simp	vm_offset_t va, sva;
879178172Simp
880216157Sjchandra	if (MIPS_DIRECT_MAPPABLE(end - 1))
881211453Sjchandra		return (MIPS_PHYS_TO_DIRECT(start));
882209930Sjchandra
883178172Simp	va = sva = *virt;
884178172Simp	while (start < end) {
885178172Simp		pmap_kenter(va, start);
886178172Simp		va += PAGE_SIZE;
887178172Simp		start += PAGE_SIZE;
888178172Simp	}
889178172Simp	*virt = va;
890178172Simp	return (sva);
891178172Simp}
892178172Simp
893178172Simp/*
894178172Simp * Add a list of wired pages to the kva
895178172Simp * this routine is only used for temporary
896178172Simp * kernel mappings that do not need to have
897178172Simp * page modification or references recorded.
898178172Simp * Note that old mappings are simply written
899178172Simp * over.  The page *must* be wired.
900178172Simp */
901178172Simpvoid
902178172Simppmap_qenter(vm_offset_t va, vm_page_t *m, int count)
903178172Simp{
904178172Simp	int i;
905202046Simp	vm_offset_t origva = va;
906178172Simp
907178172Simp	for (i = 0; i < count; i++) {
908202046Simp		pmap_flush_pvcache(m[i]);
909178172Simp		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
910178172Simp		va += PAGE_SIZE;
911178172Simp	}
912202046Simp
913202046Simp	mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
914178172Simp}
915178172Simp
916178172Simp/*
917178172Simp * this routine jerks page mappings from the
918178172Simp * kernel -- it is meant only for temporary mappings.
919178172Simp */
920178172Simpvoid
921178172Simppmap_qremove(vm_offset_t va, int count)
922178172Simp{
923241156Salc	pt_entry_t *pte;
924241156Salc	vm_offset_t origva;
925202046Simp
926241156Salc	if (count < 1)
927241156Salc		return;
928241156Salc	mips_dcache_wbinv_range_index(va, PAGE_SIZE * count);
929241156Salc	origva = va;
930241156Salc	do {
931241156Salc		pte = pmap_pte(kernel_pmap, va);
932241156Salc		*pte = PTE_G;
933178172Simp		va += PAGE_SIZE;
934241156Salc	} while (--count > 0);
935241156Salc	pmap_invalidate_range(kernel_pmap, origva, va);
936178172Simp}
937178172Simp
938178172Simp/***************************************************
939178172Simp * Page table page management routines.....
940178172Simp ***************************************************/
941178172Simp
942178172Simp/*
943240126Salc * Decrements a page table page's wire count, which is used to record the
944240126Salc * number of valid page table entries within the page.  If the wire count
945240126Salc * drops to zero, then the page table page is unmapped.  Returns TRUE if the
946240126Salc * page table page was unmapped and FALSE otherwise.
947178172Simp */
948240126Salcstatic PMAP_INLINE boolean_t
949240126Salcpmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
950210846Sjchandra{
951240126Salc
952210846Sjchandra	--m->wire_count;
953240126Salc	if (m->wire_count == 0) {
954240126Salc		_pmap_unwire_ptp(pmap, va, m);
955240126Salc		return (TRUE);
956240126Salc	} else
957240126Salc		return (FALSE);
958210846Sjchandra}
959210846Sjchandra
960240126Salcstatic void
961240126Salc_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
962178172Simp{
963210846Sjchandra	pd_entry_t *pde;
964178172Simp
965210846Sjchandra	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
966178172Simp	/*
967178172Simp	 * unmap the page table page
968178172Simp	 */
969210846Sjchandra#ifdef __mips_n64
970210846Sjchandra	if (m->pindex < NUPDE)
971210846Sjchandra		pde = pmap_pde(pmap, va);
972210846Sjchandra	else
973210846Sjchandra		pde = pmap_segmap(pmap, va);
974210846Sjchandra#else
975210846Sjchandra	pde = pmap_pde(pmap, va);
976210846Sjchandra#endif
977210846Sjchandra	*pde = 0;
978210846Sjchandra	pmap->pm_stats.resident_count--;
979178172Simp
980210846Sjchandra#ifdef __mips_n64
981210846Sjchandra	if (m->pindex < NUPDE) {
982210846Sjchandra		pd_entry_t *pdp;
983210846Sjchandra		vm_page_t pdpg;
984210846Sjchandra
985210846Sjchandra		/*
986210846Sjchandra		 * Recursively decrement next level pagetable refcount
987210846Sjchandra		 */
988210846Sjchandra		pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
989211453Sjchandra		pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
990240126Salc		pmap_unwire_ptp(pmap, va, pdpg);
991210846Sjchandra	}
992210846Sjchandra#endif
993178172Simp
994178172Simp	/*
995178172Simp	 * If the page is finally unwired, simply free it.
996178172Simp	 */
997210327Sjchandra	vm_page_free_zero(m);
998208616Sjchandra	atomic_subtract_int(&cnt.v_wire_count, 1);
999178172Simp}
1000178172Simp
1001178172Simp/*
1002178172Simp * After removing a page table entry, this routine is used to
1003178172Simp * conditionally free the page, and manage the hold/wire counts.
1004178172Simp */
1005178172Simpstatic int
1006239152Salcpmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1007178172Simp{
1008239152Salc	vm_page_t mpte;
1009178172Simp
1010178172Simp	if (va >= VM_MAXUSER_ADDRESS)
1011178172Simp		return (0);
1012239152Salc	KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0"));
1013239152Salc	mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde));
1014240126Salc	return (pmap_unwire_ptp(pmap, va, mpte));
1015178172Simp}
1016178172Simp
1017178172Simpvoid
1018178172Simppmap_pinit0(pmap_t pmap)
1019178172Simp{
1020178172Simp	int i;
1021178172Simp
1022178172Simp	PMAP_LOCK_INIT(pmap);
1023178172Simp	pmap->pm_segtab = kernel_segmap;
1024222813Sattilio	CPU_ZERO(&pmap->pm_active);
1025178172Simp	for (i = 0; i < MAXCPU; i++) {
1026178172Simp		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1027178172Simp		pmap->pm_asid[i].gen = 0;
1028178172Simp	}
1029178172Simp	PCPU_SET(curpmap, pmap);
1030239236Salc	TAILQ_INIT(&pmap->pm_pvchunk);
1031178172Simp	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1032178172Simp}
1033178172Simp
1034216315Sjchandravoid
1035216315Sjchandrapmap_grow_direct_page_cache()
1036208422Sneel{
1037208422Sneel
1038211453Sjchandra#ifdef __mips_n64
1039238561Salc	vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1040211453Sjchandra#else
1041238561Salc	vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1042211453Sjchandra#endif
1043208422Sneel}
1044208422Sneel
1045243030Salcstatic vm_page_t
1046216315Sjchandrapmap_alloc_direct_page(unsigned int index, int req)
1047208165Srrs{
1048208165Srrs	vm_page_t m;
1049208165Srrs
1050227012Salc	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED |
1051227012Salc	    VM_ALLOC_ZERO);
1052210327Sjchandra	if (m == NULL)
1053208165Srrs		return (NULL);
1054208165Srrs
1055210327Sjchandra	if ((m->flags & PG_ZERO) == 0)
1056210327Sjchandra		pmap_zero_page(m);
1057210327Sjchandra
1058208165Srrs	m->pindex = index;
1059208165Srrs	return (m);
1060208165Srrs}
1061208165Srrs
1062178172Simp/*
1063178172Simp * Initialize a preallocated and zeroed pmap structure,
1064178172Simp * such as one in a vmspace structure.
1065178172Simp */
1066178172Simpint
1067178172Simppmap_pinit(pmap_t pmap)
1068178172Simp{
1069205360Sneel	vm_offset_t ptdva;
1070178172Simp	vm_page_t ptdpg;
1071178172Simp	int i;
1072178172Simp
1073178172Simp	/*
1074178172Simp	 * allocate the page directory page
1075178172Simp	 */
1076216315Sjchandra	while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1077216315Sjchandra	       pmap_grow_direct_page_cache();
1078208589Sjchandra
1079211453Sjchandra	ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1080205360Sneel	pmap->pm_segtab = (pd_entry_t *)ptdva;
1081222813Sattilio	CPU_ZERO(&pmap->pm_active);
1082178172Simp	for (i = 0; i < MAXCPU; i++) {
1083178172Simp		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1084178172Simp		pmap->pm_asid[i].gen = 0;
1085178172Simp	}
1086239236Salc	TAILQ_INIT(&pmap->pm_pvchunk);
1087178172Simp	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1088178172Simp
1089178172Simp	return (1);
1090178172Simp}
1091178172Simp
1092178172Simp/*
1093178172Simp * this routine is called if the page table page is not
1094178172Simp * mapped correctly.
1095178172Simp */
1096178172Simpstatic vm_page_t
1097178172Simp_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1098178172Simp{
1099210846Sjchandra	vm_offset_t pageva;
1100178172Simp	vm_page_t m;
1101178172Simp
1102178172Simp	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1103178172Simp	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1104178172Simp	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1105178172Simp
1106178172Simp	/*
1107178172Simp	 * Find or fabricate a new pagetable page
1108178172Simp	 */
1109216315Sjchandra	if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1110210327Sjchandra		if (flags & M_WAITOK) {
1111210327Sjchandra			PMAP_UNLOCK(pmap);
1112239317Salc			rw_wunlock(&pvh_global_lock);
1113216315Sjchandra			pmap_grow_direct_page_cache();
1114239317Salc			rw_wlock(&pvh_global_lock);
1115210327Sjchandra			PMAP_LOCK(pmap);
1116210327Sjchandra		}
1117210327Sjchandra
1118210327Sjchandra		/*
1119210327Sjchandra		 * Indicate the need to retry.	While waiting, the page
1120210327Sjchandra		 * table page may have been allocated.
1121210327Sjchandra		 */
1122178172Simp		return (NULL);
1123210327Sjchandra	}
1124178172Simp
1125178172Simp	/*
1126178172Simp	 * Map the pagetable page into the process address space, if it
1127178172Simp	 * isn't already there.
1128178172Simp	 */
1129211453Sjchandra	pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1130178172Simp
1131210846Sjchandra#ifdef __mips_n64
1132210846Sjchandra	if (ptepindex >= NUPDE) {
1133210846Sjchandra		pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1134210846Sjchandra	} else {
1135210846Sjchandra		pd_entry_t *pdep, *pde;
1136210846Sjchandra		int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1137210846Sjchandra		int pdeindex = ptepindex & (NPDEPG - 1);
1138210846Sjchandra		vm_page_t pg;
1139210846Sjchandra
1140210846Sjchandra		pdep = &pmap->pm_segtab[segindex];
1141210846Sjchandra		if (*pdep == NULL) {
1142210846Sjchandra			/* recurse for allocating page dir */
1143210846Sjchandra			if (_pmap_allocpte(pmap, NUPDE + segindex,
1144210846Sjchandra			    flags) == NULL) {
1145210846Sjchandra				/* alloc failed, release current */
1146210846Sjchandra				--m->wire_count;
1147210846Sjchandra				atomic_subtract_int(&cnt.v_wire_count, 1);
1148210846Sjchandra				vm_page_free_zero(m);
1149210846Sjchandra				return (NULL);
1150210846Sjchandra			}
1151210846Sjchandra		} else {
1152211453Sjchandra			pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1153210846Sjchandra			pg->wire_count++;
1154210846Sjchandra		}
1155210846Sjchandra		/* Next level entry */
1156210846Sjchandra		pde = (pd_entry_t *)*pdep;
1157210846Sjchandra		pde[pdeindex] = (pd_entry_t)pageva;
1158210846Sjchandra	}
1159210846Sjchandra#else
1160210846Sjchandra	pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1161210846Sjchandra#endif
1162178172Simp	pmap->pm_stats.resident_count++;
1163178172Simp	return (m);
1164178172Simp}
1165178172Simp
1166178172Simpstatic vm_page_t
1167178172Simppmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1168178172Simp{
1169178172Simp	unsigned ptepindex;
1170210846Sjchandra	pd_entry_t *pde;
1171178172Simp	vm_page_t m;
1172178172Simp
1173178172Simp	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1174178172Simp	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1175178172Simp	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1176178172Simp
1177178172Simp	/*
1178178172Simp	 * Calculate pagetable page index
1179178172Simp	 */
1180210846Sjchandra	ptepindex = pmap_pde_pindex(va);
1181178172Simpretry:
1182178172Simp	/*
1183178172Simp	 * Get the page directory entry
1184178172Simp	 */
1185210846Sjchandra	pde = pmap_pde(pmap, va);
1186178172Simp
1187178172Simp	/*
1188178172Simp	 * If the page table page is mapped, we just increment the hold
1189178172Simp	 * count, and activate it.
1190178172Simp	 */
1191210846Sjchandra	if (pde != NULL && *pde != NULL) {
1192239152Salc		m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1193178172Simp		m->wire_count++;
1194178172Simp	} else {
1195178172Simp		/*
1196178172Simp		 * Here if the pte page isn't mapped, or if it has been
1197178172Simp		 * deallocated.
1198178172Simp		 */
1199178172Simp		m = _pmap_allocpte(pmap, ptepindex, flags);
1200178172Simp		if (m == NULL && (flags & M_WAITOK))
1201178172Simp			goto retry;
1202178172Simp	}
1203210846Sjchandra	return (m);
1204178172Simp}
1205178172Simp
1206178172Simp
1207178172Simp/***************************************************
1208239317Salc * Pmap allocation/deallocation routines.
1209178172Simp ***************************************************/
1210178172Simp
1211178172Simp/*
1212178172Simp * Release any resources held by the given physical map.
1213178172Simp * Called when a pmap initialized by pmap_pinit is being released.
1214178172Simp * Should only be called if the map contains no valid mappings.
1215178172Simp */
1216178172Simpvoid
1217178172Simppmap_release(pmap_t pmap)
1218178172Simp{
1219205360Sneel	vm_offset_t ptdva;
1220178172Simp	vm_page_t ptdpg;
1221178172Simp
1222178172Simp	KASSERT(pmap->pm_stats.resident_count == 0,
1223178172Simp	    ("pmap_release: pmap resident count %ld != 0",
1224178172Simp	    pmap->pm_stats.resident_count));
1225178172Simp
1226205360Sneel	ptdva = (vm_offset_t)pmap->pm_segtab;
1227211453Sjchandra	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1228205360Sneel
1229178172Simp	ptdpg->wire_count--;
1230178172Simp	atomic_subtract_int(&cnt.v_wire_count, 1);
1231210327Sjchandra	vm_page_free_zero(ptdpg);
1232178172Simp}
1233178172Simp
1234178172Simp/*
1235178172Simp * grow the number of kernel page table entries, if needed
1236178172Simp */
1237178172Simpvoid
1238178172Simppmap_growkernel(vm_offset_t addr)
1239178172Simp{
1240178172Simp	vm_page_t nkpg;
1241210846Sjchandra	pd_entry_t *pde, *pdpe;
1242178172Simp	pt_entry_t *pte;
1243208165Srrs	int i;
1244178172Simp
1245183510Simp	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1246210846Sjchandra	addr = roundup2(addr, NBSEG);
1247178172Simp	if (addr - 1 >= kernel_map->max_offset)
1248178172Simp		addr = kernel_map->max_offset;
1249178172Simp	while (kernel_vm_end < addr) {
1250210846Sjchandra		pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1251210846Sjchandra#ifdef __mips_n64
1252210846Sjchandra		if (*pdpe == 0) {
1253210846Sjchandra			/* new intermediate page table entry */
1254216315Sjchandra			nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1255210846Sjchandra			if (nkpg == NULL)
1256210846Sjchandra				panic("pmap_growkernel: no memory to grow kernel");
1257211453Sjchandra			*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1258210846Sjchandra			continue; /* try again */
1259210846Sjchandra		}
1260210846Sjchandra#endif
1261210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1262210846Sjchandra		if (*pde != 0) {
1263210846Sjchandra			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1264178172Simp			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1265178172Simp				kernel_vm_end = kernel_map->max_offset;
1266178172Simp				break;
1267178172Simp			}
1268178172Simp			continue;
1269178172Simp		}
1270210846Sjchandra
1271178172Simp		/*
1272178172Simp		 * This index is bogus, but out of the way
1273178172Simp		 */
1274216315Sjchandra		nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1275178172Simp		if (!nkpg)
1276178172Simp			panic("pmap_growkernel: no memory to grow kernel");
1277178172Simp		nkpt++;
1278211453Sjchandra		*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1279178172Simp
1280178172Simp		/*
1281178172Simp		 * The R[4-7]?00 stores only one copy of the Global bit in
1282178172Simp		 * the translation lookaside buffer for each 2 page entry.
1283178172Simp		 * Thus invalid entrys must have the Global bit set so when
1284178172Simp		 * Entry LO and Entry HI G bits are anded together they will
1285178172Simp		 * produce a global bit to store in the tlb.
1286178172Simp		 */
1287210846Sjchandra		pte = (pt_entry_t *)*pde;
1288210846Sjchandra		for (i = 0; i < NPTEPG; i++)
1289210846Sjchandra			pte[i] = PTE_G;
1290178172Simp
1291210846Sjchandra		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1292178172Simp		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1293178172Simp			kernel_vm_end = kernel_map->max_offset;
1294178172Simp			break;
1295178172Simp		}
1296178172Simp	}
1297178172Simp}
1298178172Simp
1299178172Simp/***************************************************
1300239236Salc * page management routines.
1301178172Simp ***************************************************/
1302178172Simp
1303239236SalcCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1304239236Salc#ifdef __mips_n64
1305239236SalcCTASSERT(_NPCM == 3);
1306239236SalcCTASSERT(_NPCPV == 168);
1307239236Salc#else
1308239236SalcCTASSERT(_NPCM == 11);
1309239236SalcCTASSERT(_NPCPV == 336);
1310239236Salc#endif
1311239236Salc
1312239236Salcstatic __inline struct pv_chunk *
1313239236Salcpv_to_chunk(pv_entry_t pv)
1314239236Salc{
1315239236Salc
1316239236Salc	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1317239236Salc}
1318239236Salc
1319239236Salc#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1320239236Salc
1321239236Salc#ifdef __mips_n64
1322239236Salc#define	PC_FREE0_1	0xfffffffffffffffful
1323239236Salc#define	PC_FREE2	0x000000fffffffffful
1324239236Salc#else
1325239236Salc#define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
1326239236Salc#define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
1327239236Salc#endif
1328239236Salc
1329239236Salcstatic const u_long pc_freemask[_NPCM] = {
1330239236Salc#ifdef __mips_n64
1331239236Salc	PC_FREE0_1, PC_FREE0_1, PC_FREE2
1332239236Salc#else
1333239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1334239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1335239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1336239236Salc	PC_FREE0_9, PC_FREE10
1337239236Salc#endif
1338239236Salc};
1339239236Salc
1340239236Salcstatic SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
1341239236Salc
1342239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1343239236Salc    "Current number of pv entries");
1344239236Salc
1345239236Salc#ifdef PV_STATS
1346239236Salcstatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1347239236Salc
1348239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1349239236Salc    "Current number of pv entry chunks");
1350239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1351239236Salc    "Current number of pv entry chunks allocated");
1352239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1353239236Salc    "Current number of pv entry chunks frees");
1354239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1355239236Salc    "Number of times tried to get a chunk page but failed.");
1356239236Salc
1357239236Salcstatic long pv_entry_frees, pv_entry_allocs;
1358239236Salcstatic int pv_entry_spare;
1359239236Salc
1360239236SalcSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1361239236Salc    "Current number of pv entry frees");
1362239236SalcSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1363239236Salc    "Current number of pv entry allocs");
1364239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1365239236Salc    "Current number of spare pv entries");
1366239236Salc#endif
1367239236Salc
1368178172Simp/*
1369239236Salc * We are in a serious low memory condition.  Resort to
1370239236Salc * drastic measures to free some pages so we can allocate
1371239236Salc * another pv entry chunk.
1372239236Salc */
1373239236Salcstatic vm_page_t
1374239236Salcpmap_pv_reclaim(pmap_t locked_pmap)
1375239236Salc{
1376239236Salc	struct pch newtail;
1377239236Salc	struct pv_chunk *pc;
1378239236Salc	pd_entry_t *pde;
1379239236Salc	pmap_t pmap;
1380239236Salc	pt_entry_t *pte, oldpte;
1381239236Salc	pv_entry_t pv;
1382239236Salc	vm_offset_t va;
1383239236Salc	vm_page_t m, m_pc;
1384239236Salc	u_long inuse;
1385239236Salc	int bit, field, freed, idx;
1386239236Salc
1387239236Salc	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1388239236Salc	pmap = NULL;
1389239236Salc	m_pc = NULL;
1390239236Salc	TAILQ_INIT(&newtail);
1391239236Salc	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) {
1392239236Salc		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1393239236Salc		if (pmap != pc->pc_pmap) {
1394239236Salc			if (pmap != NULL) {
1395239236Salc				pmap_invalidate_all(pmap);
1396239236Salc				if (pmap != locked_pmap)
1397239236Salc					PMAP_UNLOCK(pmap);
1398239236Salc			}
1399239236Salc			pmap = pc->pc_pmap;
1400239236Salc			/* Avoid deadlock and lock recursion. */
1401239236Salc			if (pmap > locked_pmap)
1402239236Salc				PMAP_LOCK(pmap);
1403239236Salc			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
1404239236Salc				pmap = NULL;
1405239236Salc				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1406239236Salc				continue;
1407239236Salc			}
1408239236Salc		}
1409239236Salc
1410239236Salc		/*
1411239236Salc		 * Destroy every non-wired, 4 KB page mapping in the chunk.
1412239236Salc		 */
1413239236Salc		freed = 0;
1414239236Salc		for (field = 0; field < _NPCM; field++) {
1415239236Salc			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1416239236Salc			    inuse != 0; inuse &= ~(1UL << bit)) {
1417239236Salc				bit = ffsl(inuse) - 1;
1418239236Salc				idx = field * sizeof(inuse) * NBBY + bit;
1419239236Salc				pv = &pc->pc_pventry[idx];
1420239236Salc				va = pv->pv_va;
1421239236Salc				pde = pmap_pde(pmap, va);
1422239236Salc				KASSERT(pde != NULL && *pde != 0,
1423239236Salc				    ("pmap_pv_reclaim: pde"));
1424239236Salc				pte = pmap_pde_to_pte(pde, va);
1425239236Salc				oldpte = *pte;
1426241520Salc				if (pte_test(&oldpte, PTE_W))
1427241520Salc					continue;
1428239236Salc				if (is_kernel_pmap(pmap))
1429239236Salc					*pte = PTE_G;
1430239236Salc				else
1431239236Salc					*pte = 0;
1432239236Salc				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte));
1433239236Salc				if (pte_test(&oldpte, PTE_D))
1434239236Salc					vm_page_dirty(m);
1435239236Salc				if (m->md.pv_flags & PV_TABLE_REF)
1436239236Salc					vm_page_aflag_set(m, PGA_REFERENCED);
1437239681Salc				m->md.pv_flags &= ~PV_TABLE_REF;
1438239236Salc				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1439239681Salc				if (TAILQ_EMPTY(&m->md.pv_list))
1440239236Salc					vm_page_aflag_clear(m, PGA_WRITEABLE);
1441239236Salc				pc->pc_map[field] |= 1UL << bit;
1442239236Salc				pmap_unuse_pt(pmap, va, *pde);
1443239236Salc				freed++;
1444239236Salc			}
1445239236Salc		}
1446239236Salc		if (freed == 0) {
1447239236Salc			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1448239236Salc			continue;
1449239236Salc		}
1450239236Salc		/* Every freed mapping is for a 4 KB page. */
1451239236Salc		pmap->pm_stats.resident_count -= freed;
1452239236Salc		PV_STAT(pv_entry_frees += freed);
1453239236Salc		PV_STAT(pv_entry_spare += freed);
1454239236Salc		pv_entry_count -= freed;
1455239236Salc		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1456239236Salc		for (field = 0; field < _NPCM; field++)
1457239236Salc			if (pc->pc_map[field] != pc_freemask[field]) {
1458239236Salc				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1459239236Salc				    pc_list);
1460239236Salc				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1461239236Salc
1462239236Salc				/*
1463239236Salc				 * One freed pv entry in locked_pmap is
1464239236Salc				 * sufficient.
1465239236Salc				 */
1466239236Salc				if (pmap == locked_pmap)
1467239236Salc					goto out;
1468239236Salc				break;
1469239236Salc			}
1470239236Salc		if (field == _NPCM) {
1471239236Salc			PV_STAT(pv_entry_spare -= _NPCPV);
1472239236Salc			PV_STAT(pc_chunk_count--);
1473239236Salc			PV_STAT(pc_chunk_frees++);
1474239236Salc			/* Entire chunk is free; return it. */
1475239236Salc			m_pc = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(
1476239236Salc			    (vm_offset_t)pc));
1477239236Salc			break;
1478239236Salc		}
1479239236Salc	}
1480239236Salcout:
1481239236Salc	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
1482239236Salc	if (pmap != NULL) {
1483239236Salc		pmap_invalidate_all(pmap);
1484239236Salc		if (pmap != locked_pmap)
1485239236Salc			PMAP_UNLOCK(pmap);
1486239236Salc	}
1487239236Salc	return (m_pc);
1488239236Salc}
1489239236Salc
1490239236Salc/*
1491178172Simp * free the pv_entry back to the free list
1492178172Simp */
1493239236Salcstatic void
1494239236Salcfree_pv_entry(pmap_t pmap, pv_entry_t pv)
1495178172Simp{
1496239236Salc	struct pv_chunk *pc;
1497239236Salc	int bit, field, idx;
1498178172Simp
1499239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1500239236Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1501239236Salc	PV_STAT(pv_entry_frees++);
1502239236Salc	PV_STAT(pv_entry_spare++);
1503178172Simp	pv_entry_count--;
1504239236Salc	pc = pv_to_chunk(pv);
1505239236Salc	idx = pv - &pc->pc_pventry[0];
1506239236Salc	field = idx / (sizeof(u_long) * NBBY);
1507239236Salc	bit = idx % (sizeof(u_long) * NBBY);
1508239236Salc	pc->pc_map[field] |= 1ul << bit;
1509239236Salc	for (idx = 0; idx < _NPCM; idx++)
1510239236Salc		if (pc->pc_map[idx] != pc_freemask[idx]) {
1511239236Salc			/*
1512239236Salc			 * 98% of the time, pc is already at the head of the
1513239236Salc			 * list.  If it isn't already, move it to the head.
1514239236Salc			 */
1515239236Salc			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
1516239236Salc			    pc)) {
1517239236Salc				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1518239236Salc				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1519239236Salc				    pc_list);
1520239236Salc			}
1521239236Salc			return;
1522239236Salc		}
1523239236Salc	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1524239236Salc	free_pv_chunk(pc);
1525178172Simp}
1526178172Simp
1527239236Salcstatic void
1528239236Salcfree_pv_chunk(struct pv_chunk *pc)
1529239236Salc{
1530239236Salc	vm_page_t m;
1531239236Salc
1532239236Salc 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1533239236Salc	PV_STAT(pv_entry_spare -= _NPCPV);
1534239236Salc	PV_STAT(pc_chunk_count--);
1535239236Salc	PV_STAT(pc_chunk_frees++);
1536239236Salc	/* entire chunk is free, return it */
1537239236Salc	m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc));
1538239236Salc	vm_page_unwire(m, 0);
1539239236Salc	vm_page_free(m);
1540239236Salc}
1541239236Salc
1542178172Simp/*
1543178172Simp * get a new pv_entry, allocating a block from the system
1544178172Simp * when needed.
1545178172Simp */
1546178172Simpstatic pv_entry_t
1547239236Salcget_pv_entry(pmap_t pmap, boolean_t try)
1548178172Simp{
1549239236Salc	struct pv_chunk *pc;
1550239236Salc	pv_entry_t pv;
1551188507Simp	vm_page_t m;
1552239236Salc	int bit, field, idx;
1553178172Simp
1554239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1555239236Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1556239236Salc	PV_STAT(pv_entry_allocs++);
1557239236Salc	pv_entry_count++;
1558188507Simpretry:
1559239236Salc	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1560239236Salc	if (pc != NULL) {
1561239236Salc		for (field = 0; field < _NPCM; field++) {
1562239236Salc			if (pc->pc_map[field]) {
1563239236Salc				bit = ffsl(pc->pc_map[field]) - 1;
1564239236Salc				break;
1565239236Salc			}
1566188507Simp		}
1567239236Salc		if (field < _NPCM) {
1568239236Salc			idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
1569239236Salc			pv = &pc->pc_pventry[idx];
1570239236Salc			pc->pc_map[field] &= ~(1ul << bit);
1571239236Salc			/* If this was the last item, move it to tail */
1572239236Salc			for (field = 0; field < _NPCM; field++)
1573239236Salc				if (pc->pc_map[field] != 0) {
1574239236Salc					PV_STAT(pv_entry_spare--);
1575239236Salc					return (pv);	/* not full, return */
1576239236Salc				}
1577239236Salc			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1578239236Salc			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1579239236Salc			PV_STAT(pv_entry_spare--);
1580239236Salc			return (pv);
1581208659Salc		}
1582188507Simp	}
1583239236Salc	/* No free items, allocate another chunk */
1584239236Salc	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL |
1585239236Salc	    VM_ALLOC_WIRED);
1586239236Salc	if (m == NULL) {
1587239236Salc		if (try) {
1588239236Salc			pv_entry_count--;
1589239236Salc			PV_STAT(pc_chunk_tryfail++);
1590239236Salc			return (NULL);
1591239236Salc		}
1592239236Salc		m = pmap_pv_reclaim(pmap);
1593239236Salc		if (m == NULL)
1594188507Simp			goto retry;
1595188507Simp	}
1596239236Salc	PV_STAT(pc_chunk_count++);
1597239236Salc	PV_STAT(pc_chunk_allocs++);
1598239236Salc	pc = (struct pv_chunk *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1599239236Salc	pc->pc_pmap = pmap;
1600239236Salc	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
1601239236Salc	for (field = 1; field < _NPCM; field++)
1602239236Salc		pc->pc_map[field] = pc_freemask[field];
1603239236Salc	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1604239236Salc	pv = &pc->pc_pventry[0];
1605239236Salc	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1606239236Salc	PV_STAT(pv_entry_spare += _NPCPV - 1);
1607239236Salc	return (pv);
1608178172Simp}
1609178172Simp
1610208665Salcstatic pv_entry_t
1611208665Salcpmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1612178172Simp{
1613178172Simp	pv_entry_t pv;
1614178172Simp
1615239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1616239236Salc	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1617239236Salc		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1618239236Salc			TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1619239236Salc			break;
1620178172Simp		}
1621178172Simp	}
1622208665Salc	return (pv);
1623208665Salc}
1624178172Simp
1625208665Salcstatic void
1626208665Salcpmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1627208665Salc{
1628208665Salc	pv_entry_t pv;
1629208665Salc
1630208665Salc	pv = pmap_pvh_remove(pvh, pmap, va);
1631208665Salc	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1632240539Sed	     (u_long)VM_PAGE_TO_PHYS(__containerof(pvh, struct vm_page, md)),
1633208686Salc	     (u_long)va));
1634239236Salc	free_pv_entry(pmap, pv);
1635178172Simp}
1636178172Simp
1637178172Simpstatic void
1638208665Salcpmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1639178172Simp{
1640178172Simp
1641239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1642208665Salc	pmap_pvh_free(&m->md, pmap, va);
1643208665Salc	if (TAILQ_EMPTY(&m->md.pv_list))
1644225418Skib		vm_page_aflag_clear(m, PGA_WRITEABLE);
1645178172Simp}
1646178172Simp
1647178172Simp/*
1648191300Salc * Conditionally create a pv entry.
1649191300Salc */
1650191300Salcstatic boolean_t
1651191300Salcpmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1652191300Salc    vm_page_t m)
1653191300Salc{
1654191300Salc	pv_entry_t pv;
1655191300Salc
1656239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1657191300Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1658239236Salc	if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
1659191300Salc		pv->pv_va = va;
1660191300Salc		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1661191300Salc		return (TRUE);
1662191300Salc	} else
1663191300Salc		return (FALSE);
1664191300Salc}
1665191300Salc
1666191300Salc/*
1667178172Simp * pmap_remove_pte: do the things to unmap a page in a process
1668178172Simp */
1669178172Simpstatic int
1670239152Salcpmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
1671239152Salc    pd_entry_t pde)
1672178172Simp{
1673178172Simp	pt_entry_t oldpte;
1674178172Simp	vm_page_t m;
1675217345Sjchandra	vm_paddr_t pa;
1676178172Simp
1677239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1678178172Simp	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1679178172Simp
1680240241Salc	/*
1681240241Salc	 * Write back all cache lines from the page being unmapped.
1682240241Salc	 */
1683240241Salc	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1684240241Salc
1685211068Sjchandra	oldpte = *ptq;
1686178172Simp	if (is_kernel_pmap(pmap))
1687178172Simp		*ptq = PTE_G;
1688211068Sjchandra	else
1689211068Sjchandra		*ptq = 0;
1690178172Simp
1691209482Sjchandra	if (pte_test(&oldpte, PTE_W))
1692178172Simp		pmap->pm_stats.wired_count -= 1;
1693178172Simp
1694178172Simp	pmap->pm_stats.resident_count -= 1;
1695178172Simp
1696239964Salc	if (pte_test(&oldpte, PTE_MANAGED)) {
1697239964Salc		pa = TLBLO_PTE_TO_PA(oldpte);
1698178172Simp		m = PHYS_TO_VM_PAGE(pa);
1699209482Sjchandra		if (pte_test(&oldpte, PTE_D)) {
1700211958Sjchandra			KASSERT(!pte_test(&oldpte, PTE_RO),
1701217345Sjchandra			    ("%s: modified page not writable: va: %p, pte: %#jx",
1702217345Sjchandra			    __func__, (void *)va, (uintmax_t)oldpte));
1703187319Sgonzo			vm_page_dirty(m);
1704178172Simp		}
1705178172Simp		if (m->md.pv_flags & PV_TABLE_REF)
1706225418Skib			vm_page_aflag_set(m, PGA_REFERENCED);
1707239681Salc		m->md.pv_flags &= ~PV_TABLE_REF;
1708178172Simp
1709178172Simp		pmap_remove_entry(pmap, m, va);
1710178172Simp	}
1711239152Salc	return (pmap_unuse_pt(pmap, va, pde));
1712178172Simp}
1713178172Simp
1714178172Simp/*
1715178172Simp * Remove a single page from a process address space
1716178172Simp */
1717178172Simpstatic void
1718178172Simppmap_remove_page(struct pmap *pmap, vm_offset_t va)
1719178172Simp{
1720239152Salc	pd_entry_t *pde;
1721209482Sjchandra	pt_entry_t *ptq;
1722178172Simp
1723239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1724178172Simp	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1725239152Salc	pde = pmap_pde(pmap, va);
1726239152Salc	if (pde == NULL || *pde == 0)
1727239152Salc		return;
1728239152Salc	ptq = pmap_pde_to_pte(pde, va);
1729178172Simp
1730178172Simp	/*
1731240241Salc	 * If there is no pte for this address, just skip it!
1732178172Simp	 */
1733240241Salc	if (!pte_test(ptq, PTE_V))
1734178172Simp		return;
1735202046Simp
1736239152Salc	(void)pmap_remove_pte(pmap, ptq, va, *pde);
1737178172Simp	pmap_invalidate_page(pmap, va);
1738178172Simp}
1739178172Simp
1740178172Simp/*
1741178172Simp *	Remove the given range of addresses from the specified map.
1742178172Simp *
1743178172Simp *	It is assumed that the start and end are properly
1744178172Simp *	rounded to the page size.
1745178172Simp */
1746178172Simpvoid
1747241123Salcpmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1748178172Simp{
1749210846Sjchandra	pd_entry_t *pde, *pdpe;
1750210846Sjchandra	pt_entry_t *pte;
1751241123Salc	vm_offset_t va, va_next;
1752178172Simp
1753241123Salc	/*
1754241123Salc	 * Perform an unsynchronized read.  This is, however, safe.
1755241123Salc	 */
1756178172Simp	if (pmap->pm_stats.resident_count == 0)
1757178172Simp		return;
1758178172Simp
1759239317Salc	rw_wlock(&pvh_global_lock);
1760178172Simp	PMAP_LOCK(pmap);
1761178172Simp
1762178172Simp	/*
1763178172Simp	 * special handling of removing one page.  a very common operation
1764178172Simp	 * and easy to short circuit some code.
1765178172Simp	 */
1766178172Simp	if ((sva + PAGE_SIZE) == eva) {
1767178172Simp		pmap_remove_page(pmap, sva);
1768178172Simp		goto out;
1769178172Simp	}
1770210846Sjchandra	for (; sva < eva; sva = va_next) {
1771210846Sjchandra		pdpe = pmap_segmap(pmap, sva);
1772210846Sjchandra#ifdef __mips_n64
1773210846Sjchandra		if (*pdpe == 0) {
1774210846Sjchandra			va_next = (sva + NBSEG) & ~SEGMASK;
1775210846Sjchandra			if (va_next < sva)
1776210846Sjchandra				va_next = eva;
1777178172Simp			continue;
1778178172Simp		}
1779210846Sjchandra#endif
1780210846Sjchandra		va_next = (sva + NBPDR) & ~PDRMASK;
1781210846Sjchandra		if (va_next < sva)
1782210846Sjchandra			va_next = eva;
1783210846Sjchandra
1784210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, sva);
1785241123Salc		if (*pde == NULL)
1786210846Sjchandra			continue;
1787241123Salc
1788241123Salc		/*
1789241123Salc		 * Limit our scan to either the end of the va represented
1790241123Salc		 * by the current page table page, or to the end of the
1791241123Salc		 * range being removed.
1792241123Salc		 */
1793210846Sjchandra		if (va_next > eva)
1794210846Sjchandra			va_next = eva;
1795241123Salc
1796241123Salc		va = va_next;
1797240241Salc		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1798240241Salc		    sva += PAGE_SIZE) {
1799241123Salc			if (!pte_test(pte, PTE_V)) {
1800241123Salc				if (va != va_next) {
1801241123Salc					pmap_invalidate_range(pmap, va, sva);
1802241123Salc					va = va_next;
1803241123Salc				}
1804240241Salc				continue;
1805241123Salc			}
1806241123Salc			if (va == va_next)
1807241123Salc				va = sva;
1808241123Salc			if (pmap_remove_pte(pmap, pte, sva, *pde)) {
1809241123Salc				sva += PAGE_SIZE;
1810241123Salc				break;
1811241123Salc			}
1812210846Sjchandra		}
1813241123Salc		if (va != va_next)
1814241123Salc			pmap_invalidate_range(pmap, va, sva);
1815178172Simp	}
1816178172Simpout:
1817239317Salc	rw_wunlock(&pvh_global_lock);
1818178172Simp	PMAP_UNLOCK(pmap);
1819178172Simp}
1820178172Simp
1821178172Simp/*
1822178172Simp *	Routine:	pmap_remove_all
1823178172Simp *	Function:
1824178172Simp *		Removes this physical page from
1825178172Simp *		all physical maps in which it resides.
1826178172Simp *		Reflects back modify bits to the pager.
1827178172Simp *
1828178172Simp *	Notes:
1829178172Simp *		Original versions of this routine were very
1830178172Simp *		inefficient because they iteratively called
1831178172Simp *		pmap_remove (slow...)
1832178172Simp */
1833178172Simp
1834178172Simpvoid
1835178172Simppmap_remove_all(vm_page_t m)
1836178172Simp{
1837209482Sjchandra	pv_entry_t pv;
1838239236Salc	pmap_t pmap;
1839239152Salc	pd_entry_t *pde;
1840209482Sjchandra	pt_entry_t *pte, tpte;
1841178172Simp
1842224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1843223677Salc	    ("pmap_remove_all: page %p is not managed", m));
1844239317Salc	rw_wlock(&pvh_global_lock);
1845178172Simp
1846178172Simp	if (m->md.pv_flags & PV_TABLE_REF)
1847225418Skib		vm_page_aflag_set(m, PGA_REFERENCED);
1848178172Simp
1849178172Simp	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1850239236Salc		pmap = PV_PMAP(pv);
1851239236Salc		PMAP_LOCK(pmap);
1852202046Simp
1853202046Simp		/*
1854202046Simp		 * If it's last mapping writeback all caches from
1855202046Simp		 * the page being destroyed
1856202046Simp	 	 */
1857239236Salc		if (TAILQ_NEXT(pv, pv_list) == NULL)
1858206746Sjmallett			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1859202046Simp
1860239236Salc		pmap->pm_stats.resident_count--;
1861178172Simp
1862239236Salc		pde = pmap_pde(pmap, pv->pv_va);
1863239152Salc		KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde"));
1864239152Salc		pte = pmap_pde_to_pte(pde, pv->pv_va);
1865178172Simp
1866211068Sjchandra		tpte = *pte;
1867239236Salc		if (is_kernel_pmap(pmap))
1868178172Simp			*pte = PTE_G;
1869211068Sjchandra		else
1870211068Sjchandra			*pte = 0;
1871178172Simp
1872209482Sjchandra		if (pte_test(&tpte, PTE_W))
1873239236Salc			pmap->pm_stats.wired_count--;
1874178172Simp
1875178172Simp		/*
1876178172Simp		 * Update the vm_page_t clean and reference bits.
1877178172Simp		 */
1878209482Sjchandra		if (pte_test(&tpte, PTE_D)) {
1879211958Sjchandra			KASSERT(!pte_test(&tpte, PTE_RO),
1880217345Sjchandra			    ("%s: modified page not writable: va: %p, pte: %#jx",
1881217345Sjchandra			    __func__, (void *)pv->pv_va, (uintmax_t)tpte));
1882178606Salc			vm_page_dirty(m);
1883178172Simp		}
1884239236Salc		pmap_invalidate_page(pmap, pv->pv_va);
1885178172Simp
1886178172Simp		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1887239236Salc		pmap_unuse_pt(pmap, pv->pv_va, *pde);
1888239236Salc		free_pv_entry(pmap, pv);
1889239236Salc		PMAP_UNLOCK(pmap);
1890178172Simp	}
1891178172Simp
1892225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1893239681Salc	m->md.pv_flags &= ~PV_TABLE_REF;
1894239317Salc	rw_wunlock(&pvh_global_lock);
1895178172Simp}
1896178172Simp
1897178172Simp/*
1898178172Simp *	Set the physical protection on the
1899178172Simp *	specified range of this map as requested.
1900178172Simp */
1901178172Simpvoid
1902178172Simppmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1903178172Simp{
1904241313Salc	pt_entry_t pbits, *pte;
1905210846Sjchandra	pd_entry_t *pde, *pdpe;
1906241313Salc	vm_offset_t va, va_next;
1907241313Salc	vm_paddr_t pa;
1908241313Salc	vm_page_t m;
1909178172Simp
1910178172Simp	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1911178172Simp		pmap_remove(pmap, sva, eva);
1912178172Simp		return;
1913178172Simp	}
1914178172Simp	if (prot & VM_PROT_WRITE)
1915178172Simp		return;
1916178172Simp
1917178172Simp	PMAP_LOCK(pmap);
1918210846Sjchandra	for (; sva < eva; sva = va_next) {
1919210846Sjchandra		pdpe = pmap_segmap(pmap, sva);
1920210846Sjchandra#ifdef __mips_n64
1921210846Sjchandra		if (*pdpe == 0) {
1922210846Sjchandra			va_next = (sva + NBSEG) & ~SEGMASK;
1923210846Sjchandra			if (va_next < sva)
1924210846Sjchandra				va_next = eva;
1925178172Simp			continue;
1926178172Simp		}
1927210846Sjchandra#endif
1928210846Sjchandra		va_next = (sva + NBPDR) & ~PDRMASK;
1929210846Sjchandra		if (va_next < sva)
1930210846Sjchandra			va_next = eva;
1931210846Sjchandra
1932210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, sva);
1933240185Salc		if (*pde == NULL)
1934178172Simp			continue;
1935241313Salc
1936241313Salc		/*
1937241313Salc		 * Limit our scan to either the end of the va represented
1938241313Salc		 * by the current page table page, or to the end of the
1939241313Salc		 * range being write protected.
1940241313Salc		 */
1941210846Sjchandra		if (va_next > eva)
1942210846Sjchandra			va_next = eva;
1943178172Simp
1944241313Salc		va = va_next;
1945210846Sjchandra		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1946241313Salc		    sva += PAGE_SIZE) {
1947241313Salc			pbits = *pte;
1948241313Salc			if (!pte_test(&pbits, PTE_V) || pte_test(&pbits,
1949241313Salc			    PTE_RO)) {
1950241313Salc				if (va != va_next) {
1951241313Salc					pmap_invalidate_range(pmap, va, sva);
1952241313Salc					va = va_next;
1953241313Salc				}
1954210846Sjchandra				continue;
1955210846Sjchandra			}
1956210846Sjchandra			pte_set(&pbits, PTE_RO);
1957241313Salc			if (pte_test(&pbits, PTE_D)) {
1958241313Salc				pte_clear(&pbits, PTE_D);
1959241313Salc				if (pte_test(&pbits, PTE_MANAGED)) {
1960241313Salc					pa = TLBLO_PTE_TO_PA(pbits);
1961241313Salc					m = PHYS_TO_VM_PAGE(pa);
1962241313Salc					vm_page_dirty(m);
1963241313Salc				}
1964241313Salc				if (va == va_next)
1965241313Salc					va = sva;
1966241313Salc			} else {
1967241313Salc				/*
1968241313Salc				 * Unless PTE_D is set, any TLB entries
1969241313Salc				 * mapping "sva" don't allow write access, so
1970241313Salc				 * they needn't be invalidated.
1971241313Salc				 */
1972241313Salc				if (va != va_next) {
1973241313Salc					pmap_invalidate_range(pmap, va, sva);
1974241313Salc					va = va_next;
1975241313Salc				}
1976210846Sjchandra			}
1977241313Salc			*pte = pbits;
1978178172Simp		}
1979241313Salc		if (va != va_next)
1980241313Salc			pmap_invalidate_range(pmap, va, sva);
1981178172Simp	}
1982178172Simp	PMAP_UNLOCK(pmap);
1983178172Simp}
1984178172Simp
1985178172Simp/*
1986178172Simp *	Insert the given physical page (p) at
1987178172Simp *	the specified virtual address (v) in the
1988178172Simp *	target physical map with the protection requested.
1989178172Simp *
1990178172Simp *	If specified, the page will be wired down, meaning
1991178172Simp *	that the related pte can not be reclaimed.
1992178172Simp *
1993178172Simp *	NB:  This is the only routine which MAY NOT lazy-evaluate
1994178172Simp *	or lose information.  That is, this routine must actually
1995178172Simp *	insert this page into the given map NOW.
1996178172Simp */
1997178172Simpvoid
1998192659Salcpmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1999192659Salc    vm_prot_t prot, boolean_t wired)
2000178172Simp{
2001217345Sjchandra	vm_paddr_t pa, opa;
2002209482Sjchandra	pt_entry_t *pte;
2003178172Simp	pt_entry_t origpte, newpte;
2004208665Salc	pv_entry_t pv;
2005178172Simp	vm_page_t mpte, om;
2006178172Simp
2007178172Simp	va &= ~PAGE_MASK;
2008208175Salc 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2009240000Salc	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
2010240000Salc	    va >= kmi.clean_eva,
2011240000Salc	    ("pmap_enter: managed mapping within the clean submap"));
2012254138Sattilio	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m),
2013208175Salc	    ("pmap_enter: page %p is not busy", m));
2014239964Salc	pa = VM_PAGE_TO_PHYS(m);
2015240000Salc	newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot);
2016240000Salc	if (wired)
2017240000Salc		newpte |= PTE_W;
2018240000Salc	if (is_kernel_pmap(pmap))
2019240000Salc		newpte |= PTE_G;
2020240000Salc	if (is_cacheable_mem(pa))
2021240000Salc		newpte |= PTE_C_CACHE;
2022240000Salc	else
2023240000Salc		newpte |= PTE_C_UNCACHED;
2024178172Simp
2025178172Simp	mpte = NULL;
2026178172Simp
2027239317Salc	rw_wlock(&pvh_global_lock);
2028178172Simp	PMAP_LOCK(pmap);
2029178172Simp
2030178172Simp	/*
2031178172Simp	 * In the case that a page table page is not resident, we are
2032178172Simp	 * creating it here.
2033178172Simp	 */
2034178172Simp	if (va < VM_MAXUSER_ADDRESS) {
2035178172Simp		mpte = pmap_allocpte(pmap, va, M_WAITOK);
2036178172Simp	}
2037178172Simp	pte = pmap_pte(pmap, va);
2038178172Simp
2039178172Simp	/*
2040178172Simp	 * Page Directory table entry not valid, we need a new PT page
2041178172Simp	 */
2042178172Simp	if (pte == NULL) {
2043211958Sjchandra		panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
2044202046Simp		    (void *)pmap->pm_segtab, (void *)va);
2045178172Simp	}
2046178172Simp	om = NULL;
2047178172Simp	origpte = *pte;
2048209243Sjchandra	opa = TLBLO_PTE_TO_PA(origpte);
2049178172Simp
2050178172Simp	/*
2051178172Simp	 * Mapping has not changed, must be protection or wiring change.
2052178172Simp	 */
2053209482Sjchandra	if (pte_test(&origpte, PTE_V) && opa == pa) {
2054178172Simp		/*
2055178172Simp		 * Wiring change, just update stats. We don't worry about
2056178172Simp		 * wiring PT pages as they remain resident as long as there
2057178172Simp		 * are valid mappings in them. Hence, if a user page is
2058178172Simp		 * wired, the PT page will be also.
2059178172Simp		 */
2060209482Sjchandra		if (wired && !pte_test(&origpte, PTE_W))
2061178172Simp			pmap->pm_stats.wired_count++;
2062209482Sjchandra		else if (!wired && pte_test(&origpte, PTE_W))
2063178172Simp			pmap->pm_stats.wired_count--;
2064178172Simp
2065211958Sjchandra		KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
2066217345Sjchandra		    ("%s: modified page not writable: va: %p, pte: %#jx",
2067217345Sjchandra		    __func__, (void *)va, (uintmax_t)origpte));
2068178172Simp
2069178172Simp		/*
2070178172Simp		 * Remove extra pte reference
2071178172Simp		 */
2072178172Simp		if (mpte)
2073178172Simp			mpte->wire_count--;
2074178172Simp
2075239964Salc		if (pte_test(&origpte, PTE_MANAGED)) {
2076240241Salc			m->md.pv_flags |= PV_TABLE_REF;
2077178172Simp			om = m;
2078239964Salc			newpte |= PTE_MANAGED;
2079240000Salc			if (!pte_test(&newpte, PTE_RO))
2080240000Salc				vm_page_aflag_set(m, PGA_WRITEABLE);
2081178172Simp		}
2082178172Simp		goto validate;
2083178172Simp	}
2084208665Salc
2085208665Salc	pv = NULL;
2086208665Salc
2087178172Simp	/*
2088178172Simp	 * Mapping has changed, invalidate old range and fall through to
2089178172Simp	 * handle validating new mapping.
2090178172Simp	 */
2091178172Simp	if (opa) {
2092209482Sjchandra		if (pte_test(&origpte, PTE_W))
2093178172Simp			pmap->pm_stats.wired_count--;
2094178172Simp
2095239964Salc		if (pte_test(&origpte, PTE_MANAGED)) {
2096178172Simp			om = PHYS_TO_VM_PAGE(opa);
2097208665Salc			pv = pmap_pvh_remove(&om->md, pmap, va);
2098178172Simp		}
2099178172Simp		if (mpte != NULL) {
2100178172Simp			mpte->wire_count--;
2101178172Simp			KASSERT(mpte->wire_count > 0,
2102178172Simp			    ("pmap_enter: missing reference to page table page,"
2103202046Simp			    " va: %p", (void *)va));
2104178172Simp		}
2105178172Simp	} else
2106178172Simp		pmap->pm_stats.resident_count++;
2107178172Simp
2108178172Simp	/*
2109240000Salc	 * Enter on the PV list if part of our managed memory.
2110178172Simp	 */
2111224746Skib	if ((m->oflags & VPO_UNMANAGED) == 0) {
2112240241Salc		m->md.pv_flags |= PV_TABLE_REF;
2113208665Salc		if (pv == NULL)
2114239236Salc			pv = get_pv_entry(pmap, FALSE);
2115208665Salc		pv->pv_va = va;
2116208665Salc		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2117239964Salc		newpte |= PTE_MANAGED;
2118240000Salc		if (!pte_test(&newpte, PTE_RO))
2119240000Salc			vm_page_aflag_set(m, PGA_WRITEABLE);
2120208665Salc	} else if (pv != NULL)
2121239236Salc		free_pv_entry(pmap, pv);
2122208665Salc
2123178172Simp	/*
2124178172Simp	 * Increment counters
2125178172Simp	 */
2126178172Simp	if (wired)
2127178172Simp		pmap->pm_stats.wired_count++;
2128178172Simp
2129178172Simpvalidate:
2130178172Simp
2131187301Sgonzo#ifdef PMAP_DEBUG
2132209482Sjchandra	printf("pmap_enter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
2133187301Sgonzo#endif
2134178172Simp
2135178172Simp	/*
2136178172Simp	 * if the mapping or permission bits are different, we need to
2137178172Simp	 * update the pte.
2138178172Simp	 */
2139178172Simp	if (origpte != newpte) {
2140240241Salc		*pte = newpte;
2141209482Sjchandra		if (pte_test(&origpte, PTE_V)) {
2142239964Salc			if (pte_test(&origpte, PTE_MANAGED) && opa != pa) {
2143178172Simp				if (om->md.pv_flags & PV_TABLE_REF)
2144225418Skib					vm_page_aflag_set(om, PGA_REFERENCED);
2145239681Salc				om->md.pv_flags &= ~PV_TABLE_REF;
2146178172Simp			}
2147209482Sjchandra			if (pte_test(&origpte, PTE_D)) {
2148209482Sjchandra				KASSERT(!pte_test(&origpte, PTE_RO),
2149178172Simp				    ("pmap_enter: modified page not writable:"
2150217345Sjchandra				    " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
2151239964Salc				if (pte_test(&origpte, PTE_MANAGED))
2152178172Simp					vm_page_dirty(om);
2153178172Simp			}
2154239964Salc			if (pte_test(&origpte, PTE_MANAGED) &&
2155208665Salc			    TAILQ_EMPTY(&om->md.pv_list))
2156225418Skib				vm_page_aflag_clear(om, PGA_WRITEABLE);
2157240241Salc			pmap_update_page(pmap, va, newpte);
2158178172Simp		}
2159178172Simp	}
2160178172Simp
2161178172Simp	/*
2162218909Sbrucec	 * Sync I & D caches for executable pages.  Do this only if the
2163178172Simp	 * target pmap belongs to the current process.  Otherwise, an
2164178172Simp	 * unresolvable TLB miss may occur.
2165178172Simp	 */
2166178172Simp	if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2167178172Simp	    (prot & VM_PROT_EXECUTE)) {
2168206746Sjmallett		mips_icache_sync_range(va, PAGE_SIZE);
2169206746Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2170178172Simp	}
2171239317Salc	rw_wunlock(&pvh_global_lock);
2172178172Simp	PMAP_UNLOCK(pmap);
2173178172Simp}
2174178172Simp
2175178172Simp/*
2176178172Simp * this code makes some *MAJOR* assumptions:
2177178172Simp * 1. Current pmap & pmap exists.
2178178172Simp * 2. Not wired.
2179178172Simp * 3. Read access.
2180178172Simp * 4. No page table pages.
2181178172Simp * but is *MUCH* faster than pmap_enter...
2182178172Simp */
2183178172Simp
2184178172Simpvoid
2185178172Simppmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2186178172Simp{
2187191300Salc
2188239317Salc	rw_wlock(&pvh_global_lock);
2189191300Salc	PMAP_LOCK(pmap);
2190191300Salc	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2191239317Salc	rw_wunlock(&pvh_global_lock);
2192191300Salc	PMAP_UNLOCK(pmap);
2193191300Salc}
2194191300Salc
2195191300Salcstatic vm_page_t
2196191300Salcpmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2197191300Salc    vm_prot_t prot, vm_page_t mpte)
2198191300Salc{
2199178172Simp	pt_entry_t *pte;
2200217345Sjchandra	vm_paddr_t pa;
2201178172Simp
2202178606Salc	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2203224746Skib	    (m->oflags & VPO_UNMANAGED) != 0,
2204191300Salc	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2205239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2206191300Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2207191300Salc
2208178172Simp	/*
2209178172Simp	 * In the case that a page table page is not resident, we are
2210178172Simp	 * creating it here.
2211178172Simp	 */
2212178172Simp	if (va < VM_MAXUSER_ADDRESS) {
2213210846Sjchandra		pd_entry_t *pde;
2214178172Simp		unsigned ptepindex;
2215178172Simp
2216178172Simp		/*
2217178172Simp		 * Calculate pagetable page index
2218178172Simp		 */
2219210846Sjchandra		ptepindex = pmap_pde_pindex(va);
2220178172Simp		if (mpte && (mpte->pindex == ptepindex)) {
2221178172Simp			mpte->wire_count++;
2222178172Simp		} else {
2223178172Simp			/*
2224178172Simp			 * Get the page directory entry
2225178172Simp			 */
2226210846Sjchandra			pde = pmap_pde(pmap, va);
2227178172Simp
2228178172Simp			/*
2229178172Simp			 * If the page table page is mapped, we just
2230178172Simp			 * increment the hold count, and activate it.
2231178172Simp			 */
2232210846Sjchandra			if (pde && *pde != 0) {
2233239152Salc				mpte = PHYS_TO_VM_PAGE(
2234239152Salc				    MIPS_DIRECT_TO_PHYS(*pde));
2235178172Simp				mpte->wire_count++;
2236178172Simp			} else {
2237191300Salc				mpte = _pmap_allocpte(pmap, ptepindex,
2238191300Salc				    M_NOWAIT);
2239191300Salc				if (mpte == NULL)
2240191300Salc					return (mpte);
2241178172Simp			}
2242178172Simp		}
2243178172Simp	} else {
2244178172Simp		mpte = NULL;
2245178172Simp	}
2246178172Simp
2247178172Simp	pte = pmap_pte(pmap, va);
2248209482Sjchandra	if (pte_test(pte, PTE_V)) {
2249191300Salc		if (mpte != NULL) {
2250191300Salc			mpte->wire_count--;
2251191300Salc			mpte = NULL;
2252191300Salc		}
2253191300Salc		return (mpte);
2254178172Simp	}
2255191300Salc
2256178172Simp	/*
2257191300Salc	 * Enter on the PV list if part of our managed memory.
2258178172Simp	 */
2259224746Skib	if ((m->oflags & VPO_UNMANAGED) == 0 &&
2260191300Salc	    !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2261191300Salc		if (mpte != NULL) {
2262240126Salc			pmap_unwire_ptp(pmap, va, mpte);
2263191300Salc			mpte = NULL;
2264191300Salc		}
2265191300Salc		return (mpte);
2266191300Salc	}
2267178172Simp
2268178172Simp	/*
2269178172Simp	 * Increment counters
2270178172Simp	 */
2271178172Simp	pmap->pm_stats.resident_count++;
2272178172Simp
2273178172Simp	pa = VM_PAGE_TO_PHYS(m);
2274178172Simp
2275178172Simp	/*
2276178172Simp	 * Now validate mapping with RO protection
2277178172Simp	 */
2278240241Salc	*pte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
2279239964Salc	if ((m->oflags & VPO_UNMANAGED) == 0)
2280239964Salc		*pte |= PTE_MANAGED;
2281178172Simp
2282178172Simp	if (is_cacheable_mem(pa))
2283209482Sjchandra		*pte |= PTE_C_CACHE;
2284178172Simp	else
2285209482Sjchandra		*pte |= PTE_C_UNCACHED;
2286178172Simp
2287178172Simp	if (is_kernel_pmap(pmap))
2288178172Simp		*pte |= PTE_G;
2289178172Simp	else {
2290178172Simp		/*
2291218909Sbrucec		 * Sync I & D caches.  Do this only if the target pmap
2292178172Simp		 * belongs to the current process.  Otherwise, an
2293178172Simp		 * unresolvable TLB miss may occur. */
2294178172Simp		if (pmap == &curproc->p_vmspace->vm_pmap) {
2295178172Simp			va &= ~PAGE_MASK;
2296206746Sjmallett			mips_icache_sync_range(va, PAGE_SIZE);
2297206746Sjmallett			mips_dcache_wbinv_range(va, PAGE_SIZE);
2298178172Simp		}
2299178172Simp	}
2300191300Salc	return (mpte);
2301178172Simp}
2302178172Simp
2303178172Simp/*
2304178172Simp * Make a temporary mapping for a physical address.  This is only intended
2305178172Simp * to be used for panic dumps.
2306209930Sjchandra *
2307209930Sjchandra * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2308178172Simp */
2309178172Simpvoid *
2310178172Simppmap_kenter_temporary(vm_paddr_t pa, int i)
2311178172Simp{
2312178172Simp	vm_offset_t va;
2313211453Sjchandra
2314178172Simp	if (i != 0)
2315178172Simp		printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2316178172Simp		    __func__);
2317178172Simp
2318211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa)) {
2319211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(pa);
2320178172Simp	} else {
2321211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2322178172Simp		int cpu;
2323211453Sjchandra		register_t intr;
2324178172Simp		struct local_sysmaps *sysm;
2325206717Sjmallett		pt_entry_t *pte, npte;
2326206717Sjmallett
2327203151Srrs		/* If this is used other than for dumps, we may need to leave
2328203151Srrs		 * interrupts disasbled on return. If crash dumps don't work when
2329203151Srrs		 * we get to this point, we might want to consider this (leaving things
2330203151Srrs		 * disabled as a starting point ;-)
2331203151Srrs	 	 */
2332206717Sjmallett		intr = intr_disable();
2333178172Simp		cpu = PCPU_GET(cpuid);
2334178172Simp		sysm = &sysmap_lmem[cpu];
2335178172Simp		/* Since this is for the debugger, no locks or any other fun */
2336241287Salc		npte = TLBLO_PA_TO_PFN(pa) | PTE_C_CACHE | PTE_D | PTE_V |
2337241287Salc		    PTE_G;
2338206717Sjmallett		pte = pmap_pte(kernel_pmap, sysm->base);
2339206717Sjmallett		*pte = npte;
2340203151Srrs		sysm->valid1 = 1;
2341206717Sjmallett		pmap_update_page(kernel_pmap, sysm->base, npte);
2342206717Sjmallett		va = sysm->base;
2343206717Sjmallett		intr_restore(intr);
2344211453Sjchandra#endif
2345178172Simp	}
2346178172Simp	return ((void *)va);
2347178172Simp}
2348178172Simp
2349178172Simpvoid
2350178172Simppmap_kenter_temporary_free(vm_paddr_t pa)
2351178172Simp{
2352211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2353178172Simp	int cpu;
2354206717Sjmallett	register_t intr;
2355178172Simp	struct local_sysmaps *sysm;
2356211453Sjchandra#endif
2357178172Simp
2358211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa)) {
2359178172Simp		/* nothing to do for this case */
2360178172Simp		return;
2361178172Simp	}
2362211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2363178172Simp	cpu = PCPU_GET(cpuid);
2364178172Simp	sysm = &sysmap_lmem[cpu];
2365178172Simp	if (sysm->valid1) {
2366206717Sjmallett		pt_entry_t *pte;
2367206717Sjmallett
2368206717Sjmallett		intr = intr_disable();
2369206717Sjmallett		pte = pmap_pte(kernel_pmap, sysm->base);
2370206717Sjmallett		*pte = PTE_G;
2371206717Sjmallett		pmap_invalidate_page(kernel_pmap, sysm->base);
2372206717Sjmallett		intr_restore(intr);
2373178172Simp		sysm->valid1 = 0;
2374178172Simp	}
2375211453Sjchandra#endif
2376178172Simp}
2377178172Simp
2378178172Simp/*
2379178172Simp * Maps a sequence of resident pages belonging to the same object.
2380178172Simp * The sequence begins with the given page m_start.  This page is
2381178172Simp * mapped at the given virtual address start.  Each subsequent page is
2382178172Simp * mapped at a virtual address that is offset from start by the same
2383178172Simp * amount as the page is offset from m_start within the object.  The
2384178172Simp * last page in the sequence is the page with the largest offset from
2385178172Simp * m_start that can be mapped at a virtual address less than the given
2386178172Simp * virtual address end.  Not every virtual page between start and end
2387178172Simp * is mapped; only those for which a resident page exists with the
2388178172Simp * corresponding offset from m_start are mapped.
2389178172Simp */
2390178172Simpvoid
2391178172Simppmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2392178172Simp    vm_page_t m_start, vm_prot_t prot)
2393178172Simp{
2394191300Salc	vm_page_t m, mpte;
2395178172Simp	vm_pindex_t diff, psize;
2396178172Simp
2397250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
2398250884Sattilio
2399178172Simp	psize = atop(end - start);
2400191300Salc	mpte = NULL;
2401178172Simp	m = m_start;
2402239317Salc	rw_wlock(&pvh_global_lock);
2403191300Salc	PMAP_LOCK(pmap);
2404178172Simp	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2405191300Salc		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2406191300Salc		    prot, mpte);
2407178172Simp		m = TAILQ_NEXT(m, listq);
2408178172Simp	}
2409239317Salc	rw_wunlock(&pvh_global_lock);
2410191300Salc 	PMAP_UNLOCK(pmap);
2411178172Simp}
2412178172Simp
2413178172Simp/*
2414178172Simp * pmap_object_init_pt preloads the ptes for a given object
2415178172Simp * into the specified pmap.  This eliminates the blast of soft
2416178172Simp * faults on process startup and immediately after an mmap.
2417178172Simp */
2418178172Simpvoid
2419178172Simppmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2420178172Simp    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2421178172Simp{
2422248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(object);
2423195840Sjhb	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2424178172Simp	    ("pmap_object_init_pt: non-device object"));
2425178172Simp}
2426178172Simp
2427178172Simp/*
2428178172Simp *	Routine:	pmap_change_wiring
2429178172Simp *	Function:	Change the wiring attribute for a map/virtual-address
2430178172Simp *			pair.
2431178172Simp *	In/out conditions:
2432178172Simp *			The mapping must already exist in the pmap.
2433178172Simp */
2434178172Simpvoid
2435178172Simppmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2436178172Simp{
2437209482Sjchandra	pt_entry_t *pte;
2438178172Simp
2439178172Simp	PMAP_LOCK(pmap);
2440178172Simp	pte = pmap_pte(pmap, va);
2441178172Simp
2442209482Sjchandra	if (wired && !pte_test(pte, PTE_W))
2443178172Simp		pmap->pm_stats.wired_count++;
2444209482Sjchandra	else if (!wired && pte_test(pte, PTE_W))
2445178172Simp		pmap->pm_stats.wired_count--;
2446178172Simp
2447178172Simp	/*
2448178172Simp	 * Wiring is not a hardware characteristic so there is no need to
2449178172Simp	 * invalidate TLB.
2450178172Simp	 */
2451209482Sjchandra	if (wired)
2452209482Sjchandra		pte_set(pte, PTE_W);
2453209482Sjchandra	else
2454209482Sjchandra		pte_clear(pte, PTE_W);
2455178172Simp	PMAP_UNLOCK(pmap);
2456178172Simp}
2457178172Simp
2458178172Simp/*
2459178172Simp *	Copy the range specified by src_addr/len
2460178172Simp *	from the source map to the range dst_addr/len
2461178172Simp *	in the destination map.
2462178172Simp *
2463178172Simp *	This routine is only advisory and need not do anything.
2464178172Simp */
2465178172Simp
2466178172Simpvoid
2467178172Simppmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2468178172Simp    vm_size_t len, vm_offset_t src_addr)
2469178172Simp{
2470178172Simp}
2471178172Simp
2472178172Simp/*
2473178172Simp *	pmap_zero_page zeros the specified hardware page by mapping
2474178172Simp *	the page into KVM and using bzero to clear its contents.
2475209930Sjchandra *
2476209930Sjchandra * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2477178172Simp */
2478178172Simpvoid
2479178172Simppmap_zero_page(vm_page_t m)
2480178172Simp{
2481178172Simp	vm_offset_t va;
2482178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2483209930Sjchandra
2484211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2485211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2486178172Simp		bzero((caddr_t)va, PAGE_SIZE);
2487187301Sgonzo		mips_dcache_wbinv_range(va, PAGE_SIZE);
2488178172Simp	} else {
2489211453Sjchandra		va = pmap_lmem_map1(phys);
2490206717Sjmallett		bzero((caddr_t)va, PAGE_SIZE);
2491206717Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2492211453Sjchandra		pmap_lmem_unmap();
2493178172Simp	}
2494178172Simp}
2495211453Sjchandra
2496178172Simp/*
2497178172Simp *	pmap_zero_page_area zeros the specified hardware page by mapping
2498178172Simp *	the page into KVM and using bzero to clear its contents.
2499178172Simp *
2500178172Simp *	off and size may not cover an area beyond a single hardware page.
2501178172Simp */
2502178172Simpvoid
2503178172Simppmap_zero_page_area(vm_page_t m, int off, int size)
2504178172Simp{
2505178172Simp	vm_offset_t va;
2506178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2507209930Sjchandra
2508211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2509211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2510178172Simp		bzero((char *)(caddr_t)va + off, size);
2511187301Sgonzo		mips_dcache_wbinv_range(va + off, size);
2512178172Simp	} else {
2513211453Sjchandra		va = pmap_lmem_map1(phys);
2514206717Sjmallett		bzero((char *)va + off, size);
2515206717Sjmallett		mips_dcache_wbinv_range(va + off, size);
2516211453Sjchandra		pmap_lmem_unmap();
2517178172Simp	}
2518178172Simp}
2519178172Simp
2520178172Simpvoid
2521178172Simppmap_zero_page_idle(vm_page_t m)
2522178172Simp{
2523178172Simp	vm_offset_t va;
2524178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2525209930Sjchandra
2526211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2527211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2528178172Simp		bzero((caddr_t)va, PAGE_SIZE);
2529187301Sgonzo		mips_dcache_wbinv_range(va, PAGE_SIZE);
2530178172Simp	} else {
2531211453Sjchandra		va = pmap_lmem_map1(phys);
2532206717Sjmallett		bzero((caddr_t)va, PAGE_SIZE);
2533206717Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2534211453Sjchandra		pmap_lmem_unmap();
2535178172Simp	}
2536178172Simp}
2537178172Simp
2538178172Simp/*
2539178172Simp *	pmap_copy_page copies the specified (machine independent)
2540178172Simp *	page by mapping the page into virtual memory and using
2541178172Simp *	bcopy to copy the page, one machine dependent page at a
2542178172Simp *	time.
2543209930Sjchandra *
2544209930Sjchandra * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2545178172Simp */
2546178172Simpvoid
2547178172Simppmap_copy_page(vm_page_t src, vm_page_t dst)
2548178172Simp{
2549178172Simp	vm_offset_t va_src, va_dst;
2550211453Sjchandra	vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2551211453Sjchandra	vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2552209930Sjchandra
2553211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2554206716Sjmallett		/* easy case, all can be accessed via KSEG0 */
2555206716Sjmallett		/*
2556206716Sjmallett		 * Flush all caches for VA that are mapped to this page
2557206716Sjmallett		 * to make sure that data in SDRAM is up to date
2558206716Sjmallett		 */
2559206716Sjmallett		pmap_flush_pvcache(src);
2560206716Sjmallett		mips_dcache_wbinv_range_index(
2561211453Sjchandra		    MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2562211453Sjchandra		va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2563211453Sjchandra		va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2564178172Simp		bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2565206716Sjmallett		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2566206716Sjmallett	} else {
2567211453Sjchandra		va_src = pmap_lmem_map2(phys_src, phys_dst);
2568211453Sjchandra		va_dst = va_src + PAGE_SIZE;
2569206716Sjmallett		bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2570206717Sjmallett		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2571211453Sjchandra		pmap_lmem_unmap();
2572178172Simp	}
2573178172Simp}
2574178172Simp
2575248508Skibint unmapped_buf_allowed;
2576248508Skib
2577248280Skibvoid
2578248280Skibpmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2579248280Skib    vm_offset_t b_offset, int xfersize)
2580248280Skib{
2581248280Skib	char *a_cp, *b_cp;
2582248280Skib	vm_page_t a_m, b_m;
2583248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
2584248280Skib	vm_paddr_t a_phys, b_phys;
2585248280Skib	int cnt;
2586248280Skib
2587248280Skib	while (xfersize > 0) {
2588248280Skib		a_pg_offset = a_offset & PAGE_MASK;
2589248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2590248280Skib		a_m = ma[a_offset >> PAGE_SHIFT];
2591248280Skib		a_phys = VM_PAGE_TO_PHYS(a_m);
2592248280Skib		b_pg_offset = b_offset & PAGE_MASK;
2593248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2594248280Skib		b_m = mb[b_offset >> PAGE_SHIFT];
2595248280Skib		b_phys = VM_PAGE_TO_PHYS(b_m);
2596248280Skib		if (MIPS_DIRECT_MAPPABLE(a_phys) &&
2597248280Skib		    MIPS_DIRECT_MAPPABLE(b_phys)) {
2598248280Skib			pmap_flush_pvcache(a_m);
2599248280Skib			mips_dcache_wbinv_range_index(
2600248280Skib			    MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
2601248280Skib			a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
2602248280Skib			    a_pg_offset;
2603248280Skib			b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
2604248280Skib			    b_pg_offset;
2605248280Skib			bcopy(a_cp, b_cp, cnt);
2606248280Skib			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2607248280Skib		} else {
2608248280Skib			a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
2609248280Skib			b_cp = (char *)a_cp + PAGE_SIZE;
2610248280Skib			a_cp += a_pg_offset;
2611248280Skib			b_cp += b_pg_offset;
2612248280Skib			bcopy(a_cp, b_cp, cnt);
2613248280Skib			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2614248280Skib			pmap_lmem_unmap();
2615248280Skib		}
2616248280Skib		a_offset += cnt;
2617248280Skib		b_offset += cnt;
2618248280Skib		xfersize -= cnt;
2619248280Skib	}
2620248280Skib}
2621248280Skib
2622178172Simp/*
2623178172Simp * Returns true if the pmap's pv is one of the first
2624178172Simp * 16 pvs linked to from this page.  This count may
2625178172Simp * be changed upwards or downwards in the future; it
2626178172Simp * is only necessary that true be returned for a small
2627178172Simp * subset of pmaps for proper page aging.
2628178172Simp */
2629178172Simpboolean_t
2630178172Simppmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2631178172Simp{
2632178172Simp	pv_entry_t pv;
2633178172Simp	int loops = 0;
2634208990Salc	boolean_t rv;
2635178172Simp
2636224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2637208990Salc	    ("pmap_page_exists_quick: page %p is not managed", m));
2638208990Salc	rv = FALSE;
2639239317Salc	rw_wlock(&pvh_global_lock);
2640178172Simp	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2641239236Salc		if (PV_PMAP(pv) == pmap) {
2642208990Salc			rv = TRUE;
2643208990Salc			break;
2644178172Simp		}
2645178172Simp		loops++;
2646178172Simp		if (loops >= 16)
2647178172Simp			break;
2648178172Simp	}
2649239317Salc	rw_wunlock(&pvh_global_lock);
2650208990Salc	return (rv);
2651178172Simp}
2652178172Simp
2653178172Simp/*
2654178172Simp * Remove all pages from specified address space
2655178172Simp * this aids process exit speeds.  Also, this code
2656178172Simp * is special cased for current process only, but
2657178172Simp * can have the more generic (and slightly slower)
2658178172Simp * mode enabled.  This is much faster than pmap_remove
2659178172Simp * in the case of running down an entire address space.
2660178172Simp */
2661178172Simpvoid
2662178172Simppmap_remove_pages(pmap_t pmap)
2663178172Simp{
2664239152Salc	pd_entry_t *pde;
2665178172Simp	pt_entry_t *pte, tpte;
2666239236Salc	pv_entry_t pv;
2667178172Simp	vm_page_t m;
2668239236Salc	struct pv_chunk *pc, *npc;
2669239236Salc	u_long inuse, bitmask;
2670239236Salc	int allfree, bit, field, idx;
2671178172Simp
2672178172Simp	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2673178172Simp		printf("warning: pmap_remove_pages called with non-current pmap\n");
2674178172Simp		return;
2675178172Simp	}
2676239317Salc	rw_wlock(&pvh_global_lock);
2677178172Simp	PMAP_LOCK(pmap);
2678239236Salc	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2679239236Salc		allfree = 1;
2680239236Salc		for (field = 0; field < _NPCM; field++) {
2681239236Salc			inuse = ~pc->pc_map[field] & pc_freemask[field];
2682239236Salc			while (inuse != 0) {
2683239236Salc				bit = ffsl(inuse) - 1;
2684239236Salc				bitmask = 1UL << bit;
2685239236Salc				idx = field * sizeof(inuse) * NBBY + bit;
2686239236Salc				pv = &pc->pc_pventry[idx];
2687239236Salc				inuse &= ~bitmask;
2688178172Simp
2689239236Salc				pde = pmap_pde(pmap, pv->pv_va);
2690239236Salc				KASSERT(pde != NULL && *pde != 0,
2691239236Salc				    ("pmap_remove_pages: pde"));
2692239236Salc				pte = pmap_pde_to_pte(pde, pv->pv_va);
2693239236Salc				if (!pte_test(pte, PTE_V))
2694239236Salc					panic("pmap_remove_pages: bad pte");
2695239236Salc				tpte = *pte;
2696178172Simp
2697178172Simp/*
2698178172Simp * We cannot remove wired pages from a process' mapping at this time
2699178172Simp */
2700239236Salc				if (pte_test(&tpte, PTE_W)) {
2701239236Salc					allfree = 0;
2702239236Salc					continue;
2703239236Salc				}
2704239236Salc				*pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2705178172Simp
2706239236Salc				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2707239236Salc				KASSERT(m != NULL,
2708239236Salc				    ("pmap_remove_pages: bad tpte %#jx",
2709239236Salc				    (uintmax_t)tpte));
2710178172Simp
2711239236Salc				/*
2712239236Salc				 * Update the vm_page_t clean and reference bits.
2713239236Salc				 */
2714239236Salc				if (pte_test(&tpte, PTE_D))
2715239236Salc					vm_page_dirty(m);
2716178172Simp
2717239236Salc				/* Mark free */
2718239236Salc				PV_STAT(pv_entry_frees++);
2719239236Salc				PV_STAT(pv_entry_spare++);
2720239236Salc				pv_entry_count--;
2721239236Salc				pc->pc_map[field] |= bitmask;
2722239236Salc				pmap->pm_stats.resident_count--;
2723239236Salc				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2724239236Salc				if (TAILQ_EMPTY(&m->md.pv_list))
2725239236Salc					vm_page_aflag_clear(m, PGA_WRITEABLE);
2726239236Salc				pmap_unuse_pt(pmap, pv->pv_va, *pde);
2727239236Salc			}
2728178172Simp		}
2729239236Salc		if (allfree) {
2730239236Salc			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2731239236Salc			free_pv_chunk(pc);
2732178172Simp		}
2733178172Simp	}
2734178172Simp	pmap_invalidate_all(pmap);
2735178172Simp	PMAP_UNLOCK(pmap);
2736239317Salc	rw_wunlock(&pvh_global_lock);
2737178172Simp}
2738178172Simp
2739178172Simp/*
2740178172Simp * pmap_testbit tests bits in pte's
2741178172Simp */
2742178172Simpstatic boolean_t
2743178172Simppmap_testbit(vm_page_t m, int bit)
2744178172Simp{
2745178172Simp	pv_entry_t pv;
2746239236Salc	pmap_t pmap;
2747178172Simp	pt_entry_t *pte;
2748178172Simp	boolean_t rv = FALSE;
2749178172Simp
2750224746Skib	if (m->oflags & VPO_UNMANAGED)
2751211445Sjchandra		return (rv);
2752178172Simp
2753239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2754178172Simp	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2755239236Salc		pmap = PV_PMAP(pv);
2756239236Salc		PMAP_LOCK(pmap);
2757239236Salc		pte = pmap_pte(pmap, pv->pv_va);
2758209482Sjchandra		rv = pte_test(pte, bit);
2759239236Salc		PMAP_UNLOCK(pmap);
2760178172Simp		if (rv)
2761178172Simp			break;
2762178172Simp	}
2763178172Simp	return (rv);
2764178172Simp}
2765178172Simp
2766178172Simp/*
2767178172Simp *	pmap_page_wired_mappings:
2768178172Simp *
2769178172Simp *	Return the number of managed mappings to the given physical page
2770178172Simp *	that are wired.
2771178172Simp */
2772178172Simpint
2773178172Simppmap_page_wired_mappings(vm_page_t m)
2774178172Simp{
2775178172Simp	pv_entry_t pv;
2776210914Sjchandra	pmap_t pmap;
2777210914Sjchandra	pt_entry_t *pte;
2778178172Simp	int count;
2779178172Simp
2780178172Simp	count = 0;
2781224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
2782178172Simp		return (count);
2783239317Salc	rw_wlock(&pvh_global_lock);
2784210914Sjchandra	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2785239236Salc		pmap = PV_PMAP(pv);
2786210914Sjchandra		PMAP_LOCK(pmap);
2787210914Sjchandra		pte = pmap_pte(pmap, pv->pv_va);
2788210914Sjchandra		if (pte_test(pte, PTE_W))
2789210914Sjchandra			count++;
2790210914Sjchandra		PMAP_UNLOCK(pmap);
2791210914Sjchandra	}
2792239317Salc	rw_wunlock(&pvh_global_lock);
2793178172Simp	return (count);
2794178172Simp}
2795178172Simp
2796178172Simp/*
2797178172Simp * Clear the write and modified bits in each of the given page's mappings.
2798178172Simp */
2799178172Simpvoid
2800178172Simppmap_remove_write(vm_page_t m)
2801178172Simp{
2802239236Salc	pmap_t pmap;
2803239236Salc	pt_entry_t pbits, *pte;
2804239236Salc	pv_entry_t pv;
2805178172Simp
2806224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2807208175Salc	    ("pmap_remove_write: page %p is not managed", m));
2808208175Salc
2809208175Salc	/*
2810254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2811254138Sattilio	 * set by another thread while the object is locked.  Thus,
2812254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
2813208175Salc	 */
2814248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2815254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2816178172Simp		return;
2817239317Salc	rw_wlock(&pvh_global_lock);
2818239236Salc	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2819239236Salc		pmap = PV_PMAP(pv);
2820239236Salc		PMAP_LOCK(pmap);
2821239236Salc		pte = pmap_pte(pmap, pv->pv_va);
2822239236Salc		KASSERT(pte != NULL && pte_test(pte, PTE_V),
2823239236Salc		    ("page on pv_list has no pte"));
2824239236Salc		pbits = *pte;
2825239236Salc		if (pte_test(&pbits, PTE_D)) {
2826239236Salc			pte_clear(&pbits, PTE_D);
2827239236Salc			vm_page_dirty(m);
2828239236Salc		}
2829239236Salc		pte_set(&pbits, PTE_RO);
2830239236Salc		if (pbits != *pte) {
2831239236Salc			*pte = pbits;
2832239236Salc			pmap_update_page(pmap, pv->pv_va, pbits);
2833239236Salc		}
2834239236Salc		PMAP_UNLOCK(pmap);
2835178172Simp	}
2836225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
2837239317Salc	rw_wunlock(&pvh_global_lock);
2838178172Simp}
2839178172Simp
2840178172Simp/*
2841178172Simp *	pmap_ts_referenced:
2842178172Simp *
2843178172Simp *	Return the count of reference bits for a page, clearing all of them.
2844178172Simp */
2845178172Simpint
2846178172Simppmap_ts_referenced(vm_page_t m)
2847178172Simp{
2848178172Simp
2849224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2850208990Salc	    ("pmap_ts_referenced: page %p is not managed", m));
2851178172Simp	if (m->md.pv_flags & PV_TABLE_REF) {
2852239317Salc		rw_wlock(&pvh_global_lock);
2853178172Simp		m->md.pv_flags &= ~PV_TABLE_REF;
2854239317Salc		rw_wunlock(&pvh_global_lock);
2855208990Salc		return (1);
2856178172Simp	}
2857208990Salc	return (0);
2858178172Simp}
2859178172Simp
2860178172Simp/*
2861178172Simp *	pmap_is_modified:
2862178172Simp *
2863178172Simp *	Return whether or not the specified physical page was modified
2864178172Simp *	in any physical maps.
2865178172Simp */
2866178172Simpboolean_t
2867178172Simppmap_is_modified(vm_page_t m)
2868178172Simp{
2869208504Salc	boolean_t rv;
2870178172Simp
2871224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2872208504Salc	    ("pmap_is_modified: page %p is not managed", m));
2873208504Salc
2874208504Salc	/*
2875254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2876225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2877209482Sjchandra	 * is clear, no PTEs can have PTE_D set.
2878208504Salc	 */
2879248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2880254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2881208504Salc		return (FALSE);
2882239317Salc	rw_wlock(&pvh_global_lock);
2883239681Salc	rv = pmap_testbit(m, PTE_D);
2884239317Salc	rw_wunlock(&pvh_global_lock);
2885208504Salc	return (rv);
2886178172Simp}
2887178172Simp
2888178172Simp/* N/C */
2889178172Simp
2890178172Simp/*
2891178172Simp *	pmap_is_prefaultable:
2892178172Simp *
2893178172Simp *	Return whether or not the specified virtual address is elgible
2894178172Simp *	for prefault.
2895178172Simp */
2896178172Simpboolean_t
2897178172Simppmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2898178172Simp{
2899210846Sjchandra	pd_entry_t *pde;
2900178172Simp	pt_entry_t *pte;
2901178172Simp	boolean_t rv;
2902178172Simp
2903178172Simp	rv = FALSE;
2904178172Simp	PMAP_LOCK(pmap);
2905210846Sjchandra	pde = pmap_pde(pmap, addr);
2906210846Sjchandra	if (pde != NULL && *pde != 0) {
2907210846Sjchandra		pte = pmap_pde_to_pte(pde, addr);
2908178172Simp		rv = (*pte == 0);
2909178172Simp	}
2910178172Simp	PMAP_UNLOCK(pmap);
2911178172Simp	return (rv);
2912178172Simp}
2913178172Simp
2914178172Simp/*
2915255098Salc *	Apply the given advice to the specified range of addresses within the
2916255098Salc *	given pmap.  Depending on the advice, clear the referenced and/or
2917255098Salc *	modified flags in each mapping and set the mapped page's dirty field.
2918255028Salc */
2919255028Salcvoid
2920255028Salcpmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
2921255028Salc{
2922255098Salc	pd_entry_t *pde, *pdpe;
2923255098Salc	pt_entry_t *pte;
2924255098Salc	vm_offset_t va, va_next;
2925255098Salc	vm_paddr_t pa;
2926255098Salc	vm_page_t m;
2927255098Salc
2928255098Salc	if (advice != MADV_DONTNEED && advice != MADV_FREE)
2929255098Salc		return;
2930255098Salc	rw_wlock(&pvh_global_lock);
2931255098Salc	PMAP_LOCK(pmap);
2932255098Salc	for (; sva < eva; sva = va_next) {
2933255098Salc		pdpe = pmap_segmap(pmap, sva);
2934255098Salc#ifdef __mips_n64
2935255098Salc		if (*pdpe == 0) {
2936255098Salc			va_next = (sva + NBSEG) & ~SEGMASK;
2937255098Salc			if (va_next < sva)
2938255098Salc				va_next = eva;
2939255098Salc			continue;
2940255098Salc		}
2941255098Salc#endif
2942255098Salc		va_next = (sva + NBPDR) & ~PDRMASK;
2943255098Salc		if (va_next < sva)
2944255098Salc			va_next = eva;
2945255098Salc
2946255098Salc		pde = pmap_pdpe_to_pde(pdpe, sva);
2947255098Salc		if (*pde == NULL)
2948255098Salc			continue;
2949255098Salc
2950255098Salc		/*
2951255098Salc		 * Limit our scan to either the end of the va represented
2952255098Salc		 * by the current page table page, or to the end of the
2953255098Salc		 * range being write protected.
2954255098Salc		 */
2955255098Salc		if (va_next > eva)
2956255098Salc			va_next = eva;
2957255098Salc
2958255098Salc		va = va_next;
2959255098Salc		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2960255098Salc		    sva += PAGE_SIZE) {
2961255098Salc			if (!pte_test(pte, PTE_MANAGED | PTE_V)) {
2962255098Salc				if (va != va_next) {
2963255098Salc					pmap_invalidate_range(pmap, va, sva);
2964255098Salc					va = va_next;
2965255098Salc				}
2966255098Salc				continue;
2967255098Salc			}
2968255098Salc			pa = TLBLO_PTE_TO_PA(*pte);
2969255098Salc			m = PHYS_TO_VM_PAGE(pa);
2970255098Salc			m->md.pv_flags &= ~PV_TABLE_REF;
2971255098Salc			if (pte_test(pte, PTE_D)) {
2972255098Salc				if (advice == MADV_DONTNEED) {
2973255098Salc					/*
2974255098Salc					 * Future calls to pmap_is_modified()
2975255098Salc					 * can be avoided by making the page
2976255098Salc					 * dirty now.
2977255098Salc					 */
2978255098Salc					vm_page_dirty(m);
2979255098Salc				} else {
2980255098Salc					pte_clear(pte, PTE_D);
2981255098Salc					if (va == va_next)
2982255098Salc						va = sva;
2983255098Salc				}
2984255098Salc			} else {
2985255098Salc				/*
2986255098Salc				 * Unless PTE_D is set, any TLB entries
2987255098Salc				 * mapping "sva" don't allow write access, so
2988255098Salc				 * they needn't be invalidated.
2989255098Salc				 */
2990255098Salc				if (va != va_next) {
2991255098Salc					pmap_invalidate_range(pmap, va, sva);
2992255098Salc					va = va_next;
2993255098Salc				}
2994255098Salc			}
2995255098Salc		}
2996255098Salc		if (va != va_next)
2997255098Salc			pmap_invalidate_range(pmap, va, sva);
2998255098Salc	}
2999255098Salc	rw_wunlock(&pvh_global_lock);
3000255098Salc	PMAP_UNLOCK(pmap);
3001255028Salc}
3002255028Salc
3003255028Salc/*
3004178172Simp *	Clear the modify bits on the specified physical page.
3005178172Simp */
3006178172Simpvoid
3007178172Simppmap_clear_modify(vm_page_t m)
3008178172Simp{
3009239352Salc	pmap_t pmap;
3010239352Salc	pt_entry_t *pte;
3011239352Salc	pv_entry_t pv;
3012208504Salc
3013224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3014208504Salc	    ("pmap_clear_modify: page %p is not managed", m));
3015248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
3016254138Sattilio	KASSERT(!vm_page_xbusied(m),
3017254138Sattilio	    ("pmap_clear_modify: page %p is exclusive busied", m));
3018208504Salc
3019208504Salc	/*
3020225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
3021208504Salc	 * If the object containing the page is locked and the page is not
3022254138Sattilio	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
3023208504Salc	 */
3024225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
3025178172Simp		return;
3026239317Salc	rw_wlock(&pvh_global_lock);
3027239352Salc	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3028239352Salc		pmap = PV_PMAP(pv);
3029239352Salc		PMAP_LOCK(pmap);
3030239352Salc		pte = pmap_pte(pmap, pv->pv_va);
3031239352Salc		if (pte_test(pte, PTE_D)) {
3032239352Salc			pte_clear(pte, PTE_D);
3033239352Salc			pmap_update_page(pmap, pv->pv_va, *pte);
3034239352Salc		}
3035239352Salc		PMAP_UNLOCK(pmap);
3036178172Simp	}
3037239317Salc	rw_wunlock(&pvh_global_lock);
3038178172Simp}
3039178172Simp
3040178172Simp/*
3041207155Salc *	pmap_is_referenced:
3042207155Salc *
3043207155Salc *	Return whether or not the specified physical page was referenced
3044207155Salc *	in any physical maps.
3045207155Salc */
3046207155Salcboolean_t
3047207155Salcpmap_is_referenced(vm_page_t m)
3048207155Salc{
3049207155Salc
3050224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3051208574Salc	    ("pmap_is_referenced: page %p is not managed", m));
3052208574Salc	return ((m->md.pv_flags & PV_TABLE_REF) != 0);
3053207155Salc}
3054207155Salc
3055207155Salc/*
3056178172Simp * Miscellaneous support routines follow
3057178172Simp */
3058178172Simp
3059178172Simp/*
3060178172Simp * Map a set of physical memory pages into the kernel virtual
3061178172Simp * address space. Return a pointer to where it is mapped. This
3062178172Simp * routine is intended to be used for mapping device memory,
3063178172Simp * NOT real memory.
3064209930Sjchandra *
3065209930Sjchandra * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
3066178172Simp */
3067178172Simpvoid *
3068217345Sjchandrapmap_mapdev(vm_paddr_t pa, vm_size_t size)
3069178172Simp{
3070178172Simp        vm_offset_t va, tmpva, offset;
3071178172Simp
3072178172Simp	/*
3073178172Simp	 * KSEG1 maps only first 512M of phys address space. For
3074178172Simp	 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
3075178172Simp	 */
3076211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
3077211453Sjchandra		return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
3078178172Simp	else {
3079178172Simp		offset = pa & PAGE_MASK;
3080202046Simp		size = roundup(size + offset, PAGE_SIZE);
3081178172Simp
3082254025Sjeff		va = kva_alloc(size);
3083178172Simp		if (!va)
3084178172Simp			panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3085202046Simp		pa = trunc_page(pa);
3086178172Simp		for (tmpva = va; size > 0;) {
3087212589Sneel			pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
3088178172Simp			size -= PAGE_SIZE;
3089178172Simp			tmpva += PAGE_SIZE;
3090178172Simp			pa += PAGE_SIZE;
3091178172Simp		}
3092178172Simp	}
3093178172Simp
3094178172Simp	return ((void *)(va + offset));
3095178172Simp}
3096178172Simp
3097178172Simpvoid
3098178172Simppmap_unmapdev(vm_offset_t va, vm_size_t size)
3099178172Simp{
3100211453Sjchandra#ifndef __mips_n64
3101240317Salc	vm_offset_t base, offset;
3102202046Simp
3103202046Simp	/* If the address is within KSEG1 then there is nothing to do */
3104202046Simp	if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
3105202046Simp		return;
3106202046Simp
3107202046Simp	base = trunc_page(va);
3108202046Simp	offset = va & PAGE_MASK;
3109202046Simp	size = roundup(size + offset, PAGE_SIZE);
3110254025Sjeff	kva_free(base, size);
3111211453Sjchandra#endif
3112178172Simp}
3113178172Simp
3114178172Simp/*
3115178172Simp * perform the pmap work for mincore
3116178172Simp */
3117178172Simpint
3118208504Salcpmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
3119178172Simp{
3120178172Simp	pt_entry_t *ptep, pte;
3121217345Sjchandra	vm_paddr_t pa;
3122208532Sneel	vm_page_t m;
3123208504Salc	int val;
3124178172Simp
3125178172Simp	PMAP_LOCK(pmap);
3126208504Salcretry:
3127178172Simp	ptep = pmap_pte(pmap, addr);
3128178172Simp	pte = (ptep != NULL) ? *ptep : 0;
3129209482Sjchandra	if (!pte_test(&pte, PTE_V)) {
3130208504Salc		val = 0;
3131208504Salc		goto out;
3132208504Salc	}
3133208504Salc	val = MINCORE_INCORE;
3134209482Sjchandra	if (pte_test(&pte, PTE_D))
3135208504Salc		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3136209243Sjchandra	pa = TLBLO_PTE_TO_PA(pte);
3137239964Salc	if (pte_test(&pte, PTE_MANAGED)) {
3138178172Simp		/*
3139208504Salc		 * This may falsely report the given address as
3140208504Salc		 * MINCORE_REFERENCED.  Unfortunately, due to the lack of
3141208504Salc		 * per-PTE reference information, it is impossible to
3142208504Salc		 * determine if the address is MINCORE_REFERENCED.
3143178172Simp		 */
3144208504Salc		m = PHYS_TO_VM_PAGE(pa);
3145225418Skib		if ((m->aflags & PGA_REFERENCED) != 0)
3146178172Simp			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3147178172Simp	}
3148208504Salc	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
3149239964Salc	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
3150239964Salc	    pte_test(&pte, PTE_MANAGED)) {
3151208504Salc		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
3152208504Salc		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
3153208504Salc			goto retry;
3154208504Salc	} else
3155208504Salcout:
3156208504Salc		PA_UNLOCK_COND(*locked_pa);
3157208504Salc	PMAP_UNLOCK(pmap);
3158208504Salc	return (val);
3159178172Simp}
3160178172Simp
3161178172Simpvoid
3162178172Simppmap_activate(struct thread *td)
3163178172Simp{
3164178172Simp	pmap_t pmap, oldpmap;
3165178172Simp	struct proc *p = td->td_proc;
3166223758Sattilio	u_int cpuid;
3167178172Simp
3168178172Simp	critical_enter();
3169178172Simp
3170178172Simp	pmap = vmspace_pmap(p->p_vmspace);
3171178172Simp	oldpmap = PCPU_GET(curpmap);
3172223758Sattilio	cpuid = PCPU_GET(cpuid);
3173178172Simp
3174178172Simp	if (oldpmap)
3175223758Sattilio		CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
3176223758Sattilio	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
3177178172Simp	pmap_asid_alloc(pmap);
3178178172Simp	if (td == curthread) {
3179178172Simp		PCPU_SET(segbase, pmap->pm_segtab);
3180223758Sattilio		mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
3181178172Simp	}
3182202046Simp
3183178172Simp	PCPU_SET(curpmap, pmap);
3184178172Simp	critical_exit();
3185178172Simp}
3186178172Simp
3187198341Smarcelvoid
3188198341Smarcelpmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3189198341Smarcel{
3190198341Smarcel}
3191198341Smarcel
3192178893Salc/*
3193178893Salc *	Increase the starting virtual address of the given mapping if a
3194178893Salc *	different alignment might result in more superpage mappings.
3195178893Salc */
3196178893Salcvoid
3197178893Salcpmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3198178893Salc    vm_offset_t *addr, vm_size_t size)
3199178893Salc{
3200179081Salc	vm_offset_t superpage_offset;
3201179081Salc
3202179081Salc	if (size < NBSEG)
3203179081Salc		return;
3204179081Salc	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3205179081Salc		offset += ptoa(object->pg_color);
3206210627Sjchandra	superpage_offset = offset & SEGMASK;
3207210627Sjchandra	if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
3208210627Sjchandra	    (*addr & SEGMASK) == superpage_offset)
3209179081Salc		return;
3210210627Sjchandra	if ((*addr & SEGMASK) < superpage_offset)
3211210627Sjchandra		*addr = (*addr & ~SEGMASK) + superpage_offset;
3212179081Salc	else
3213210627Sjchandra		*addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
3214178893Salc}
3215178893Salc
3216211167Sjchandra#ifdef DDB
3217210846SjchandraDB_SHOW_COMMAND(ptable, ddb_pid_dump)
3218178172Simp{
3219178172Simp	pmap_t pmap;
3220210846Sjchandra	struct thread *td = NULL;
3221178172Simp	struct proc *p;
3222210846Sjchandra	int i, j, k;
3223210846Sjchandra	vm_paddr_t pa;
3224210846Sjchandra	vm_offset_t va;
3225178172Simp
3226210846Sjchandra	if (have_addr) {
3227210846Sjchandra		td = db_lookup_thread(addr, TRUE);
3228210846Sjchandra		if (td == NULL) {
3229210846Sjchandra			db_printf("Invalid pid or tid");
3230210846Sjchandra			return;
3231210846Sjchandra		}
3232210846Sjchandra		p = td->td_proc;
3233210846Sjchandra		if (p->p_vmspace == NULL) {
3234210846Sjchandra			db_printf("No vmspace for process");
3235210846Sjchandra			return;
3236210846Sjchandra		}
3237210846Sjchandra			pmap = vmspace_pmap(p->p_vmspace);
3238210846Sjchandra	} else
3239210846Sjchandra		pmap = kernel_pmap;
3240178172Simp
3241210846Sjchandra	db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3242211167Sjchandra	    pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3243211167Sjchandra	    pmap->pm_asid[0].gen);
3244210846Sjchandra	for (i = 0; i < NPDEPG; i++) {
3245210846Sjchandra		pd_entry_t *pdpe;
3246210846Sjchandra		pt_entry_t *pde;
3247210846Sjchandra		pt_entry_t pte;
3248178172Simp
3249210846Sjchandra		pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3250210846Sjchandra		if (pdpe == NULL)
3251210846Sjchandra			continue;
3252210846Sjchandra		db_printf("[%4d] %p\n", i, pdpe);
3253210846Sjchandra#ifdef __mips_n64
3254210846Sjchandra		for (j = 0; j < NPDEPG; j++) {
3255210846Sjchandra			pde = (pt_entry_t *)pdpe[j];
3256210846Sjchandra			if (pde == NULL)
3257210846Sjchandra				continue;
3258210846Sjchandra			db_printf("\t[%4d] %p\n", j, pde);
3259210846Sjchandra#else
3260210846Sjchandra		{
3261210846Sjchandra			j = 0;
3262210846Sjchandra			pde =  (pt_entry_t *)pdpe;
3263210846Sjchandra#endif
3264210846Sjchandra			for (k = 0; k < NPTEPG; k++) {
3265210846Sjchandra				pte = pde[k];
3266210846Sjchandra				if (pte == 0 || !pte_test(&pte, PTE_V))
3267210846Sjchandra					continue;
3268210846Sjchandra				pa = TLBLO_PTE_TO_PA(pte);
3269210846Sjchandra				va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3270217345Sjchandra				db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3271217345Sjchandra				       k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3272178172Simp			}
3273178172Simp		}
3274178172Simp	}
3275178172Simp}
3276211167Sjchandra#endif
3277178172Simp
3278178172Simp#if defined(DEBUG)
3279178172Simp
3280178172Simpstatic void pads(pmap_t pm);
3281178172Simpvoid pmap_pvdump(vm_offset_t pa);
3282178172Simp
3283178172Simp/* print address space of pmap*/
3284178172Simpstatic void
3285178172Simppads(pmap_t pm)
3286178172Simp{
3287178172Simp	unsigned va, i, j;
3288178172Simp	pt_entry_t *ptep;
3289178172Simp
3290178172Simp	if (pm == kernel_pmap)
3291178172Simp		return;
3292178172Simp	for (i = 0; i < NPTEPG; i++)
3293178172Simp		if (pm->pm_segtab[i])
3294178172Simp			for (j = 0; j < NPTEPG; j++) {
3295178172Simp				va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3296178172Simp				if (pm == kernel_pmap && va < KERNBASE)
3297178172Simp					continue;
3298178172Simp				if (pm != kernel_pmap &&
3299178172Simp				    va >= VM_MAXUSER_ADDRESS)
3300178172Simp					continue;
3301178172Simp				ptep = pmap_pte(pm, va);
3302216324Sjchandra				if (pte_test(ptep, PTE_V))
3303178172Simp					printf("%x:%x ", va, *(int *)ptep);
3304178172Simp			}
3305178172Simp
3306178172Simp}
3307178172Simp
3308178172Simpvoid
3309178172Simppmap_pvdump(vm_offset_t pa)
3310178172Simp{
3311178172Simp	register pv_entry_t pv;
3312178172Simp	vm_page_t m;
3313178172Simp
3314178172Simp	printf("pa %x", pa);
3315178172Simp	m = PHYS_TO_VM_PAGE(pa);
3316178172Simp	for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3317178172Simp	    pv = TAILQ_NEXT(pv, pv_list)) {
3318178172Simp		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3319178172Simp		pads(pv->pv_pmap);
3320178172Simp	}
3321178172Simp	printf(" ");
3322178172Simp}
3323178172Simp
3324178172Simp/* N/C */
3325178172Simp#endif
3326178172Simp
3327178172Simp
3328178172Simp/*
3329178172Simp * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3330178172Simp * It takes almost as much or more time to search the TLB for a
3331178172Simp * specific ASID and flush those entries as it does to flush the entire TLB.
3332178172Simp * Therefore, when we allocate a new ASID, we just take the next number. When
3333178172Simp * we run out of numbers, we flush the TLB, increment the generation count
3334178172Simp * and start over. ASID zero is reserved for kernel use.
3335178172Simp */
3336178172Simpstatic void
3337178172Simppmap_asid_alloc(pmap)
3338178172Simp	pmap_t pmap;
3339178172Simp{
3340178172Simp	if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3341178172Simp	    pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3342178172Simp	else {
3343178172Simp		if (PCPU_GET(next_asid) == pmap_max_asid) {
3344209243Sjchandra			tlb_invalidate_all_user(NULL);
3345178172Simp			PCPU_SET(asid_generation,
3346178172Simp			    (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3347178172Simp			if (PCPU_GET(asid_generation) == 0) {
3348178172Simp				PCPU_SET(asid_generation, 1);
3349178172Simp			}
3350178172Simp			PCPU_SET(next_asid, 1);	/* 0 means invalid */
3351178172Simp		}
3352178172Simp		pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3353178172Simp		pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3354178172Simp		PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3355178172Simp	}
3356178172Simp}
3357178172Simp
3358217345Sjchandrastatic pt_entry_t
3359239681Salcinit_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot)
3360178172Simp{
3361217345Sjchandra	pt_entry_t rw;
3362178172Simp
3363178172Simp	if (!(prot & VM_PROT_WRITE))
3364239321Salc		rw = PTE_V | PTE_RO;
3365224746Skib	else if ((m->oflags & VPO_UNMANAGED) == 0) {
3366239681Salc		if ((access & VM_PROT_WRITE) != 0)
3367239321Salc			rw = PTE_V | PTE_D;
3368178172Simp		else
3369238861Srwatson			rw = PTE_V;
3370208866Salc	} else
3371208866Salc		/* Needn't emulate a modified bit for unmanaged pages. */
3372239321Salc		rw = PTE_V | PTE_D;
3373208866Salc	return (rw);
3374178172Simp}
3375178172Simp
3376178172Simp/*
3377211217Sjchandra * pmap_emulate_modified : do dirty bit emulation
3378178172Simp *
3379211217Sjchandra * On SMP, update just the local TLB, other CPUs will update their
3380211217Sjchandra * TLBs from PTE lazily, if they get the exception.
3381211217Sjchandra * Returns 0 in case of sucess, 1 if the page is read only and we
3382211217Sjchandra * need to fault.
3383178172Simp */
3384211217Sjchandraint
3385211217Sjchandrapmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3386178172Simp{
3387211217Sjchandra	pt_entry_t *pte;
3388178172Simp
3389211217Sjchandra	PMAP_LOCK(pmap);
3390211217Sjchandra	pte = pmap_pte(pmap, va);
3391211217Sjchandra	if (pte == NULL)
3392211217Sjchandra		panic("pmap_emulate_modified: can't find PTE");
3393211217Sjchandra#ifdef SMP
3394211217Sjchandra	/* It is possible that some other CPU changed m-bit */
3395211217Sjchandra	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3396227623Sjchandra		tlb_update(pmap, va, *pte);
3397211217Sjchandra		PMAP_UNLOCK(pmap);
3398211217Sjchandra		return (0);
3399211217Sjchandra	}
3400211217Sjchandra#else
3401211217Sjchandra	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3402211217Sjchandra		panic("pmap_emulate_modified: invalid pte");
3403211217Sjchandra#endif
3404211217Sjchandra	if (pte_test(pte, PTE_RO)) {
3405211217Sjchandra		PMAP_UNLOCK(pmap);
3406211217Sjchandra		return (1);
3407211217Sjchandra	}
3408211217Sjchandra	pte_set(pte, PTE_D);
3409227623Sjchandra	tlb_update(pmap, va, *pte);
3410239964Salc	if (!pte_test(pte, PTE_MANAGED))
3411239964Salc		panic("pmap_emulate_modified: unmanaged page");
3412211217Sjchandra	PMAP_UNLOCK(pmap);
3413211217Sjchandra	return (0);
3414178172Simp}
3415178172Simp
3416178172Simp/*
3417178172Simp *	Routine:	pmap_kextract
3418178172Simp *	Function:
3419178172Simp *		Extract the physical page address associated
3420178172Simp *		virtual address.
3421178172Simp */
3422233308Sjchandravm_paddr_t
3423178172Simppmap_kextract(vm_offset_t va)
3424178172Simp{
3425209930Sjchandra	int mapped;
3426178172Simp
3427209930Sjchandra	/*
3428209930Sjchandra	 * First, the direct-mapped regions.
3429209930Sjchandra	 */
3430209930Sjchandra#if defined(__mips_n64)
3431209930Sjchandra	if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3432209930Sjchandra		return (MIPS_XKPHYS_TO_PHYS(va));
3433209930Sjchandra#endif
3434209930Sjchandra	if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3435209930Sjchandra		return (MIPS_KSEG0_TO_PHYS(va));
3436209930Sjchandra
3437209930Sjchandra	if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3438209930Sjchandra		return (MIPS_KSEG1_TO_PHYS(va));
3439209930Sjchandra
3440209930Sjchandra	/*
3441209930Sjchandra	 * User virtual addresses.
3442209930Sjchandra	 */
3443209930Sjchandra	if (va < VM_MAXUSER_ADDRESS) {
3444178172Simp		pt_entry_t *ptep;
3445178172Simp
3446178172Simp		if (curproc && curproc->p_vmspace) {
3447178172Simp			ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3448209930Sjchandra			if (ptep) {
3449209930Sjchandra				return (TLBLO_PTE_TO_PA(*ptep) |
3450209930Sjchandra				    (va & PAGE_MASK));
3451209930Sjchandra			}
3452209930Sjchandra			return (0);
3453178172Simp		}
3454209930Sjchandra	}
3455209930Sjchandra
3456209930Sjchandra	/*
3457209930Sjchandra	 * Should be kernel virtual here, otherwise fail
3458209930Sjchandra	 */
3459209930Sjchandra	mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3460209930Sjchandra#if defined(__mips_n64)
3461209930Sjchandra	mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3462209930Sjchandra#endif
3463209930Sjchandra	/*
3464209930Sjchandra	 * Kernel virtual.
3465209930Sjchandra	 */
3466209930Sjchandra
3467209930Sjchandra	if (mapped) {
3468178172Simp		pt_entry_t *ptep;
3469178172Simp
3470191735Salc		/* Is the kernel pmap initialized? */
3471222813Sattilio		if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3472209930Sjchandra			/* It's inside the virtual address range */
3473206717Sjmallett			ptep = pmap_pte(kernel_pmap, va);
3474209243Sjchandra			if (ptep) {
3475209243Sjchandra				return (TLBLO_PTE_TO_PA(*ptep) |
3476209243Sjchandra				    (va & PAGE_MASK));
3477209243Sjchandra			}
3478178172Simp		}
3479209930Sjchandra		return (0);
3480178172Simp	}
3481209930Sjchandra
3482209930Sjchandra	panic("%s for unknown address space %p.", __func__, (void *)va);
3483178172Simp}
3484202046Simp
3485209930Sjchandra
3486202046Simpvoid
3487202046Simppmap_flush_pvcache(vm_page_t m)
3488202046Simp{
3489202046Simp	pv_entry_t pv;
3490202046Simp
3491202046Simp	if (m != NULL) {
3492202046Simp		for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3493210846Sjchandra		    pv = TAILQ_NEXT(pv, pv_list)) {
3494206746Sjmallett			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
3495202046Simp		}
3496202046Simp	}
3497202046Simp}
3498