1239268Sgonzo/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
2239268Sgonzo/*-
3239268Sgonzo * Copyright 2011 Semihalf
4239268Sgonzo * Copyright 2004 Olivier Houchard.
5239268Sgonzo * Copyright 2003 Wasabi Systems, Inc.
6239268Sgonzo * All rights reserved.
7239268Sgonzo *
8239268Sgonzo * Written by Steve C. Woodford for Wasabi Systems, Inc.
9239268Sgonzo *
10239268Sgonzo * Redistribution and use in source and binary forms, with or without
11239268Sgonzo * modification, are permitted provided that the following conditions
12239268Sgonzo * are met:
13239268Sgonzo * 1. Redistributions of source code must retain the above copyright
14239268Sgonzo *    notice, this list of conditions and the following disclaimer.
15239268Sgonzo * 2. Redistributions in binary form must reproduce the above copyright
16239268Sgonzo *    notice, this list of conditions and the following disclaimer in the
17239268Sgonzo *    documentation and/or other materials provided with the distribution.
18239268Sgonzo * 3. All advertising materials mentioning features or use of this software
19239268Sgonzo *    must display the following acknowledgement:
20239268Sgonzo *      This product includes software developed for the NetBSD Project by
21239268Sgonzo *      Wasabi Systems, Inc.
22239268Sgonzo * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23239268Sgonzo *    or promote products derived from this software without specific prior
24239268Sgonzo *    written permission.
25239268Sgonzo *
26239268Sgonzo * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27239268Sgonzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28239268Sgonzo * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29239268Sgonzo * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30239268Sgonzo * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31239268Sgonzo * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32239268Sgonzo * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33239268Sgonzo * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34239268Sgonzo * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35239268Sgonzo * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36239268Sgonzo * POSSIBILITY OF SUCH DAMAGE.
37239268Sgonzo *
38239268Sgonzo * From: FreeBSD: src/sys/arm/arm/pmap.c,v 1.113 2009/07/24 13:50:29
39239268Sgonzo */
40239268Sgonzo
41239268Sgonzo/*-
42239268Sgonzo * Copyright (c) 2002-2003 Wasabi Systems, Inc.
43239268Sgonzo * Copyright (c) 2001 Richard Earnshaw
44239268Sgonzo * Copyright (c) 2001-2002 Christopher Gilbert
45239268Sgonzo * All rights reserved.
46239268Sgonzo *
47239268Sgonzo * 1. Redistributions of source code must retain the above copyright
48239268Sgonzo *    notice, this list of conditions and the following disclaimer.
49239268Sgonzo * 2. Redistributions in binary form must reproduce the above copyright
50239268Sgonzo *    notice, this list of conditions and the following disclaimer in the
51239268Sgonzo *    documentation and/or other materials provided with the distribution.
52239268Sgonzo * 3. The name of the company nor the name of the author may be used to
53239268Sgonzo *    endorse or promote products derived from this software without specific
54239268Sgonzo *    prior written permission.
55239268Sgonzo *
56239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
57239268Sgonzo * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
58239268Sgonzo * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59239268Sgonzo * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
60239268Sgonzo * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61239268Sgonzo * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62239268Sgonzo * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63239268Sgonzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64239268Sgonzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65239268Sgonzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66239268Sgonzo * SUCH DAMAGE.
67239268Sgonzo */
68239268Sgonzo/*-
69239268Sgonzo * Copyright (c) 1999 The NetBSD Foundation, Inc.
70239268Sgonzo * All rights reserved.
71239268Sgonzo *
72239268Sgonzo * This code is derived from software contributed to The NetBSD Foundation
73239268Sgonzo * by Charles M. Hannum.
74239268Sgonzo *
75239268Sgonzo * Redistribution and use in source and binary forms, with or without
76239268Sgonzo * modification, are permitted provided that the following conditions
77239268Sgonzo * are met:
78239268Sgonzo * 1. Redistributions of source code must retain the above copyright
79239268Sgonzo *    notice, this list of conditions and the following disclaimer.
80239268Sgonzo * 2. Redistributions in binary form must reproduce the above copyright
81239268Sgonzo *    notice, this list of conditions and the following disclaimer in the
82239268Sgonzo *    documentation and/or other materials provided with the distribution.
83239268Sgonzo *
84239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
85239268Sgonzo * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
86239268Sgonzo * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
87239268Sgonzo * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
88239268Sgonzo * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89239268Sgonzo * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90239268Sgonzo * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91239268Sgonzo * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92239268Sgonzo * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93239268Sgonzo * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
94239268Sgonzo * POSSIBILITY OF SUCH DAMAGE.
95239268Sgonzo */
96239268Sgonzo
97239268Sgonzo/*-
98239268Sgonzo * Copyright (c) 1994-1998 Mark Brinicombe.
99239268Sgonzo * Copyright (c) 1994 Brini.
100239268Sgonzo * All rights reserved.
101239268Sgonzo *
102239268Sgonzo * This code is derived from software written for Brini by Mark Brinicombe
103239268Sgonzo *
104239268Sgonzo * Redistribution and use in source and binary forms, with or without
105239268Sgonzo * modification, are permitted provided that the following conditions
106239268Sgonzo * are met:
107239268Sgonzo * 1. Redistributions of source code must retain the above copyright
108239268Sgonzo *    notice, this list of conditions and the following disclaimer.
109239268Sgonzo * 2. Redistributions in binary form must reproduce the above copyright
110239268Sgonzo *    notice, this list of conditions and the following disclaimer in the
111239268Sgonzo *    documentation and/or other materials provided with the distribution.
112239268Sgonzo * 3. All advertising materials mentioning features or use of this software
113239268Sgonzo *    must display the following acknowledgement:
114239268Sgonzo *      This product includes software developed by Mark Brinicombe.
115239268Sgonzo * 4. The name of the author may not be used to endorse or promote products
116239268Sgonzo *    derived from this software without specific prior written permission.
117239268Sgonzo *
118239268Sgonzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
119239268Sgonzo * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
120239268Sgonzo * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
121239268Sgonzo * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
122239268Sgonzo * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
123239268Sgonzo * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
124239268Sgonzo * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
125239268Sgonzo * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
126239268Sgonzo * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
127239268Sgonzo *
128239268Sgonzo * RiscBSD kernel project
129239268Sgonzo *
130239268Sgonzo * pmap.c
131239268Sgonzo *
132239268Sgonzo * Machine dependant vm stuff
133239268Sgonzo *
134239268Sgonzo * Created      : 20/09/94
135239268Sgonzo */
136239268Sgonzo
137239268Sgonzo/*
138239268Sgonzo * Special compilation symbols
139239268Sgonzo * PMAP_DEBUG           - Build in pmap_debug_level code
140259364Sian *
141259364Sian * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c
142259364Sian*/
143239268Sgonzo/* Include header files */
144239268Sgonzo
145239268Sgonzo#include "opt_vm.h"
146250634Sgber#include "opt_pmap.h"
147239268Sgonzo
148239268Sgonzo#include <sys/cdefs.h>
149239268Sgonzo__FBSDID("$FreeBSD$");
150239268Sgonzo#include <sys/param.h>
151239268Sgonzo#include <sys/systm.h>
152239268Sgonzo#include <sys/kernel.h>
153239268Sgonzo#include <sys/ktr.h>
154240983Salc#include <sys/lock.h>
155239268Sgonzo#include <sys/proc.h>
156239268Sgonzo#include <sys/malloc.h>
157239268Sgonzo#include <sys/msgbuf.h>
158240983Salc#include <sys/mutex.h>
159239268Sgonzo#include <sys/vmmeter.h>
160239268Sgonzo#include <sys/mman.h>
161240321Salc#include <sys/rwlock.h>
162239268Sgonzo#include <sys/smp.h>
163239268Sgonzo#include <sys/sched.h>
164250634Sgber#include <sys/sysctl.h>
165239268Sgonzo
166239268Sgonzo#include <vm/vm.h>
167240321Salc#include <vm/vm_param.h>
168239268Sgonzo#include <vm/uma.h>
169239268Sgonzo#include <vm/pmap.h>
170239268Sgonzo#include <vm/vm_kern.h>
171239268Sgonzo#include <vm/vm_object.h>
172239268Sgonzo#include <vm/vm_map.h>
173239268Sgonzo#include <vm/vm_page.h>
174239268Sgonzo#include <vm/vm_pageout.h>
175239268Sgonzo#include <vm/vm_extern.h>
176254918Sraj#include <vm/vm_reserv.h>
177240983Salc
178239268Sgonzo#include <machine/md_var.h>
179239268Sgonzo#include <machine/cpu.h>
180239268Sgonzo#include <machine/cpufunc.h>
181239268Sgonzo#include <machine/pcb.h>
182239268Sgonzo
183239369Shrs#ifdef DEBUG
184239369Shrsextern int last_fault_code;
185239369Shrs#endif
186239369Shrs
187239268Sgonzo#ifdef PMAP_DEBUG
188239268Sgonzo#define PDEBUG(_lev_,_stat_) \
189239268Sgonzo        if (pmap_debug_level >= (_lev_)) \
190239268Sgonzo                ((_stat_))
191239268Sgonzo#define dprintf printf
192239268Sgonzo
193239268Sgonzoint pmap_debug_level = 0;
194239268Sgonzo#define PMAP_INLINE
195239268Sgonzo#else   /* PMAP_DEBUG */
196239268Sgonzo#define PDEBUG(_lev_,_stat_) /* Nothing */
197239268Sgonzo#define dprintf(x, arg...)
198239268Sgonzo#define PMAP_INLINE __inline
199239268Sgonzo#endif  /* PMAP_DEBUG */
200239268Sgonzo
201250634Sgber#ifdef PV_STATS
202250634Sgber#define PV_STAT(x)	do { x ; } while (0)
203250634Sgber#else
204250634Sgber#define PV_STAT(x)	do { } while (0)
205250634Sgber#endif
206250634Sgber
207254918Sraj#define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
208254918Sraj
209245146Sgonzo#ifdef ARM_L2_PIPT
210245146Sgonzo#define pmap_l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range((pa), (size))
211245146Sgonzo#define pmap_l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range((pa), (size))
212245146Sgonzo#else
213245146Sgonzo#define pmap_l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range((va), (size))
214245146Sgonzo#define pmap_l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range((va), (size))
215245146Sgonzo#endif
216245146Sgonzo
217239268Sgonzoextern struct pv_addr systempage;
218239268Sgonzo
219239268Sgonzo/*
220239268Sgonzo * Internal function prototypes
221239268Sgonzo */
222239268Sgonzo
223254918Srajstatic PMAP_INLINE
224254918Srajstruct pv_entry		*pmap_find_pv(struct md_page *, pmap_t, vm_offset_t);
225250634Sgberstatic void		pmap_free_pv_chunk(struct pv_chunk *pc);
226250634Sgberstatic void		pmap_free_pv_entry(pmap_t pmap, pv_entry_t pv);
227250634Sgberstatic pv_entry_t 	pmap_get_pv_entry(pmap_t pmap, boolean_t try);
228250634Sgberstatic vm_page_t 	pmap_pv_reclaim(pmap_t locked_pmap);
229254918Srajstatic boolean_t	pmap_pv_insert_section(pmap_t, vm_offset_t,
230254918Sraj    vm_paddr_t);
231254918Srajstatic struct pv_entry	*pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t);
232254918Srajstatic int		pmap_pvh_wired_mappings(struct md_page *, int);
233250634Sgber
234270439Skibstatic int		pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
235270439Skib    vm_prot_t, u_int);
236240983Salcstatic vm_paddr_t	pmap_extract_locked(pmap_t pmap, vm_offset_t va);
237239268Sgonzostatic void		pmap_alloc_l1(pmap_t);
238239268Sgonzostatic void		pmap_free_l1(pmap_t);
239239268Sgonzo
240254918Srajstatic void		pmap_map_section(pmap_t, vm_offset_t, vm_offset_t,
241254918Sraj    vm_prot_t, boolean_t);
242254918Srajstatic void		pmap_promote_section(pmap_t, vm_offset_t);
243254918Srajstatic boolean_t	pmap_demote_section(pmap_t, vm_offset_t);
244254918Srajstatic boolean_t	pmap_enter_section(pmap_t, vm_offset_t, vm_page_t,
245254918Sraj    vm_prot_t);
246254918Srajstatic void		pmap_remove_section(pmap_t, vm_offset_t);
247254918Sraj
248239268Sgonzostatic int		pmap_clearbit(struct vm_page *, u_int);
249239268Sgonzo
250239268Sgonzostatic struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t);
251239268Sgonzostatic struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t);
252239268Sgonzostatic void		pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
253239268Sgonzostatic vm_offset_t	kernel_pt_lookup(vm_paddr_t);
254239268Sgonzo
255239268Sgonzostatic MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
256239268Sgonzo
257239268Sgonzovm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
258239268Sgonzovm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
259239268Sgonzovm_offset_t pmap_curmaxkvaddr;
260239268Sgonzovm_paddr_t kernel_l1pa;
261239268Sgonzo
262239268Sgonzovm_offset_t kernel_vm_end = 0;
263239268Sgonzo
264246926Salcvm_offset_t vm_max_kernel_address;
265246926Salc
266239268Sgonzostruct pmap kernel_pmap_store;
267239268Sgonzo
268266353Sian/*
269266353Sian * Resources for quickly copying and zeroing pages using virtual address space
270266353Sian * and page table entries that are pre-allocated per-CPU by pmap_init().
271266353Sian */
272266353Sianstruct czpages {
273266353Sian	struct	mtx 	lock;
274266353Sian	pt_entry_t	*srcptep;
275266353Sian	pt_entry_t	*dstptep;
276266353Sian	vm_offset_t	srcva;
277266353Sian	vm_offset_t	dstva;
278266353Sian};
279266353Sianstatic struct czpages cpu_czpages[MAXCPU];
280239268Sgonzo
281239268Sgonzostatic void		pmap_init_l1(struct l1_ttable *, pd_entry_t *);
282239268Sgonzo/*
283239268Sgonzo * These routines are called when the CPU type is identified to set up
284239268Sgonzo * the PTE prototypes, cache modes, etc.
285239268Sgonzo *
286239268Sgonzo * The variables are always here, just in case LKMs need to reference
287239268Sgonzo * them (though, they shouldn't).
288239268Sgonzo */
289239268Sgonzostatic void pmap_set_prot(pt_entry_t *pte, vm_prot_t prot, uint8_t user);
290239268Sgonzopt_entry_t	pte_l1_s_cache_mode;
291239268Sgonzopt_entry_t	pte_l1_s_cache_mode_pt;
292239268Sgonzo
293239268Sgonzopt_entry_t	pte_l2_l_cache_mode;
294239268Sgonzopt_entry_t	pte_l2_l_cache_mode_pt;
295239268Sgonzo
296239268Sgonzopt_entry_t	pte_l2_s_cache_mode;
297239268Sgonzopt_entry_t	pte_l2_s_cache_mode_pt;
298239268Sgonzo
299239268Sgonzostruct msgbuf *msgbufp = 0;
300239268Sgonzo
301239268Sgonzo/*
302239268Sgonzo * Crashdump maps.
303239268Sgonzo */
304239268Sgonzostatic caddr_t crashdumpmap;
305239268Sgonzo
306239268Sgonzoextern void bcopy_page(vm_offset_t, vm_offset_t);
307239268Sgonzoextern void bzero_page(vm_offset_t);
308239268Sgonzo
309239268Sgonzochar *_tmppt;
310239268Sgonzo
311239268Sgonzo/*
312239268Sgonzo * Metadata for L1 translation tables.
313239268Sgonzo */
314239268Sgonzostruct l1_ttable {
315239268Sgonzo	/* Entry on the L1 Table list */
316239268Sgonzo	SLIST_ENTRY(l1_ttable) l1_link;
317239268Sgonzo
318239268Sgonzo	/* Entry on the L1 Least Recently Used list */
319239268Sgonzo	TAILQ_ENTRY(l1_ttable) l1_lru;
320239268Sgonzo
321239268Sgonzo	/* Track how many domains are allocated from this L1 */
322239268Sgonzo	volatile u_int l1_domain_use_count;
323239268Sgonzo
324239268Sgonzo	/*
325239268Sgonzo	 * A free-list of domain numbers for this L1.
326239268Sgonzo	 * We avoid using ffs() and a bitmap to track domains since ffs()
327239268Sgonzo	 * is slow on ARM.
328239268Sgonzo	 */
329239268Sgonzo	u_int8_t l1_domain_first;
330239268Sgonzo	u_int8_t l1_domain_free[PMAP_DOMAINS];
331239268Sgonzo
332239268Sgonzo	/* Physical address of this L1 page table */
333239268Sgonzo	vm_paddr_t l1_physaddr;
334239268Sgonzo
335239268Sgonzo	/* KVA of this L1 page table */
336239268Sgonzo	pd_entry_t *l1_kva;
337239268Sgonzo};
338239268Sgonzo
339239268Sgonzo/*
340239268Sgonzo * Convert a virtual address into its L1 table index. That is, the
341239268Sgonzo * index used to locate the L2 descriptor table pointer in an L1 table.
342239268Sgonzo * This is basically used to index l1->l1_kva[].
343239268Sgonzo *
344239268Sgonzo * Each L2 descriptor table represents 1MB of VA space.
345239268Sgonzo */
346239268Sgonzo#define	L1_IDX(va)		(((vm_offset_t)(va)) >> L1_S_SHIFT)
347239268Sgonzo
348239268Sgonzo/*
349239268Sgonzo * L1 Page Tables are tracked using a Least Recently Used list.
350239268Sgonzo *  - New L1s are allocated from the HEAD.
351239268Sgonzo *  - Freed L1s are added to the TAIl.
352239268Sgonzo *  - Recently accessed L1s (where an 'access' is some change to one of
353239268Sgonzo *    the userland pmaps which owns this L1) are moved to the TAIL.
354239268Sgonzo */
355239268Sgonzostatic TAILQ_HEAD(, l1_ttable) l1_lru_list;
356239268Sgonzo/*
357239268Sgonzo * A list of all L1 tables
358239268Sgonzo */
359239268Sgonzostatic SLIST_HEAD(, l1_ttable) l1_list;
360239268Sgonzostatic struct mtx l1_lru_lock;
361239268Sgonzo
362239268Sgonzo/*
363239268Sgonzo * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
364239268Sgonzo *
365239268Sgonzo * This is normally 16MB worth L2 page descriptors for any given pmap.
366239268Sgonzo * Reference counts are maintained for L2 descriptors so they can be
367239268Sgonzo * freed when empty.
368239268Sgonzo */
369239268Sgonzostruct l2_dtable {
370239268Sgonzo	/* The number of L2 page descriptors allocated to this l2_dtable */
371239268Sgonzo	u_int l2_occupancy;
372239268Sgonzo
373239268Sgonzo	/* List of L2 page descriptors */
374239268Sgonzo	struct l2_bucket {
375239268Sgonzo		pt_entry_t *l2b_kva;	/* KVA of L2 Descriptor Table */
376239268Sgonzo		vm_paddr_t l2b_phys;	/* Physical address of same */
377239268Sgonzo		u_short l2b_l1idx;	/* This L2 table's L1 index */
378239268Sgonzo		u_short l2b_occupancy;	/* How many active descriptors */
379239268Sgonzo	} l2_bucket[L2_BUCKET_SIZE];
380239268Sgonzo};
381239268Sgonzo
382239268Sgonzo/* pmap_kenter_internal flags */
383239268Sgonzo#define KENTER_CACHE	0x1
384269103Sian#define KENTER_DEVICE	0x2
385269103Sian#define KENTER_USER	0x4
386239268Sgonzo
387239268Sgonzo/*
388239268Sgonzo * Given an L1 table index, calculate the corresponding l2_dtable index
389239268Sgonzo * and bucket index within the l2_dtable.
390239268Sgonzo */
391239268Sgonzo#define	L2_IDX(l1idx)		(((l1idx) >> L2_BUCKET_LOG2) & \
392239268Sgonzo				 (L2_SIZE - 1))
393239268Sgonzo#define	L2_BUCKET(l1idx)	((l1idx) & (L2_BUCKET_SIZE - 1))
394239268Sgonzo
395239268Sgonzo/*
396239268Sgonzo * Given a virtual address, this macro returns the
397239268Sgonzo * virtual address required to drop into the next L2 bucket.
398239268Sgonzo */
399239268Sgonzo#define	L2_NEXT_BUCKET(va)	(((va) & L1_S_FRAME) + L1_S_SIZE)
400239268Sgonzo
401239268Sgonzo/*
402239268Sgonzo * We try to map the page tables write-through, if possible.  However, not
403239268Sgonzo * all CPUs have a write-through cache mode, so on those we have to sync
404239268Sgonzo * the cache when we frob page tables.
405239268Sgonzo *
406239268Sgonzo * We try to evaluate this at compile time, if possible.  However, it's
407239268Sgonzo * not always possible to do that, hence this run-time var.
408239268Sgonzo */
409239268Sgonzoint	pmap_needs_pte_sync;
410239268Sgonzo
411239268Sgonzo/*
412239268Sgonzo * Macro to determine if a mapping might be resident in the
413239268Sgonzo * instruction cache and/or TLB
414239268Sgonzo */
415250930Sgber#define	PTE_BEEN_EXECD(pte)  (L2_S_EXECUTABLE(pte) && L2_S_REFERENCED(pte))
416239268Sgonzo
417239268Sgonzo/*
418239268Sgonzo * Macro to determine if a mapping might be resident in the
419239268Sgonzo * data cache and/or TLB
420239268Sgonzo */
421250930Sgber#define	PTE_BEEN_REFD(pte)   (L2_S_REFERENCED(pte))
422239268Sgonzo
423239268Sgonzo#ifndef PMAP_SHPGPERPROC
424239268Sgonzo#define PMAP_SHPGPERPROC 200
425239268Sgonzo#endif
426239268Sgonzo
427239268Sgonzo#define pmap_is_current(pm)	((pm) == pmap_kernel() || \
428239268Sgonzo            curproc->p_vmspace->vm_map.pmap == (pm))
429250634Sgber
430250634Sgber/*
431250634Sgber * Data for the pv entry allocation mechanism
432250634Sgber */
433250634Sgberstatic TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
434250634Sgberstatic int pv_entry_count, pv_entry_max, pv_entry_high_water;
435254918Srajstatic struct md_page *pv_table;
436250634Sgberstatic int shpgperproc = PMAP_SHPGPERPROC;
437250634Sgber
438250634Sgberstruct pv_chunk *pv_chunkbase;		/* KVA block for pv_chunks */
439250634Sgberint pv_maxchunks;			/* How many chunks we have KVA for */
440250634Sgbervm_offset_t pv_vafree;			/* Freelist stored in the PTE */
441250634Sgber
442250634Sgberstatic __inline struct pv_chunk *
443250634Sgberpv_to_chunk(pv_entry_t pv)
444250634Sgber{
445250634Sgber
446250634Sgber	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
447250634Sgber}
448250634Sgber
449250634Sgber#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
450250634Sgber
451250634SgberCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
452250634SgberCTASSERT(_NPCM == 8);
453250634SgberCTASSERT(_NPCPV == 252);
454250634Sgber
455250634Sgber#define	PC_FREE0_6	0xfffffffful	/* Free values for index 0 through 6 */
456250634Sgber#define	PC_FREE7	0x0ffffffful	/* Free values for index 7 */
457250634Sgber
458250634Sgberstatic const uint32_t pc_freemask[_NPCM] = {
459250634Sgber	PC_FREE0_6, PC_FREE0_6, PC_FREE0_6,
460250634Sgber	PC_FREE0_6, PC_FREE0_6, PC_FREE0_6,
461250634Sgber	PC_FREE0_6, PC_FREE7
462250634Sgber};
463250634Sgber
464250634Sgberstatic SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
465250634Sgber
466254918Sraj/* Superpages utilization enabled = 1 / disabled = 0 */
467254918Srajstatic int sp_enabled = 0;
468254918SrajSYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN, &sp_enabled, 0,
469254918Sraj    "Are large page mappings enabled?");
470254918Sraj
471250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
472250634Sgber    "Current number of pv entries");
473250634Sgber
474250634Sgber#ifdef PV_STATS
475250634Sgberstatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
476250634Sgber
477250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
478250634Sgber    "Current number of pv entry chunks");
479250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
480250634Sgber    "Current number of pv entry chunks allocated");
481250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
482250634Sgber    "Current number of pv entry chunks frees");
483250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
484250634Sgber    "Number of times tried to get a chunk page but failed.");
485250634Sgber
486250634Sgberstatic long pv_entry_frees, pv_entry_allocs;
487250634Sgberstatic int pv_entry_spare;
488250634Sgber
489250634SgberSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
490250634Sgber    "Current number of pv entry frees");
491250634SgberSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
492250634Sgber    "Current number of pv entry allocs");
493250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
494250634Sgber    "Current number of spare pv entries");
495250634Sgber#endif
496250634Sgber
497239268Sgonzouma_zone_t l2zone;
498239268Sgonzostatic uma_zone_t l2table_zone;
499239268Sgonzostatic vm_offset_t pmap_kernel_l2dtable_kva;
500239268Sgonzostatic vm_offset_t pmap_kernel_l2ptp_kva;
501239268Sgonzostatic vm_paddr_t pmap_kernel_l2ptp_phys;
502240321Salcstatic struct rwlock pvh_global_lock;
503239268Sgonzo
504239268Sgonzoint l1_mem_types[] = {
505239268Sgonzo	ARM_L1S_STRONG_ORD,
506239268Sgonzo	ARM_L1S_DEVICE_NOSHARE,
507239268Sgonzo	ARM_L1S_DEVICE_SHARE,
508239268Sgonzo	ARM_L1S_NRML_NOCACHE,
509239268Sgonzo	ARM_L1S_NRML_IWT_OWT,
510239268Sgonzo	ARM_L1S_NRML_IWB_OWB,
511239268Sgonzo	ARM_L1S_NRML_IWBA_OWBA
512239268Sgonzo};
513239268Sgonzo
514239268Sgonzoint l2l_mem_types[] = {
515239268Sgonzo	ARM_L2L_STRONG_ORD,
516239268Sgonzo	ARM_L2L_DEVICE_NOSHARE,
517239268Sgonzo	ARM_L2L_DEVICE_SHARE,
518239268Sgonzo	ARM_L2L_NRML_NOCACHE,
519239268Sgonzo	ARM_L2L_NRML_IWT_OWT,
520239268Sgonzo	ARM_L2L_NRML_IWB_OWB,
521239268Sgonzo	ARM_L2L_NRML_IWBA_OWBA
522239268Sgonzo};
523239268Sgonzo
524239268Sgonzoint l2s_mem_types[] = {
525239268Sgonzo	ARM_L2S_STRONG_ORD,
526239268Sgonzo	ARM_L2S_DEVICE_NOSHARE,
527239268Sgonzo	ARM_L2S_DEVICE_SHARE,
528239268Sgonzo	ARM_L2S_NRML_NOCACHE,
529239268Sgonzo	ARM_L2S_NRML_IWT_OWT,
530239268Sgonzo	ARM_L2S_NRML_IWB_OWB,
531239268Sgonzo	ARM_L2S_NRML_IWBA_OWBA
532239268Sgonzo};
533239268Sgonzo
534239268Sgonzo/*
535239268Sgonzo * This list exists for the benefit of pmap_map_chunk().  It keeps track
536239268Sgonzo * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
537239268Sgonzo * find them as necessary.
538239268Sgonzo *
539239268Sgonzo * Note that the data on this list MUST remain valid after initarm() returns,
540239268Sgonzo * as pmap_bootstrap() uses it to contruct L2 table metadata.
541239268Sgonzo */
542239268SgonzoSLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
543239268Sgonzo
544239268Sgonzostatic void
545239268Sgonzopmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
546239268Sgonzo{
547239268Sgonzo	int i;
548239268Sgonzo
549239268Sgonzo	l1->l1_kva = l1pt;
550239268Sgonzo	l1->l1_domain_use_count = 0;
551239268Sgonzo	l1->l1_domain_first = 0;
552239268Sgonzo
553239268Sgonzo	for (i = 0; i < PMAP_DOMAINS; i++)
554239268Sgonzo		l1->l1_domain_free[i] = i + 1;
555239268Sgonzo
556239268Sgonzo	/*
557239268Sgonzo	 * Copy the kernel's L1 entries to each new L1.
558239268Sgonzo	 */
559239268Sgonzo	if (l1pt != pmap_kernel()->pm_l1->l1_kva)
560239268Sgonzo		memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
561239268Sgonzo
562239268Sgonzo	if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0)
563239268Sgonzo		panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
564239268Sgonzo	SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
565239268Sgonzo	TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
566239268Sgonzo}
567239268Sgonzo
568239268Sgonzostatic vm_offset_t
569239268Sgonzokernel_pt_lookup(vm_paddr_t pa)
570239268Sgonzo{
571239268Sgonzo	struct pv_addr *pv;
572239268Sgonzo
573239268Sgonzo	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
574239268Sgonzo		if (pv->pv_pa == pa)
575239268Sgonzo			return (pv->pv_va);
576239268Sgonzo	}
577239268Sgonzo	return (0);
578239268Sgonzo}
579239268Sgonzo
580239268Sgonzovoid
581239268Sgonzopmap_pte_init_mmu_v6(void)
582239268Sgonzo{
583239268Sgonzo
584239268Sgonzo	if (PTE_PAGETABLE >= 3)
585239268Sgonzo		pmap_needs_pte_sync = 1;
586239268Sgonzo	pte_l1_s_cache_mode = l1_mem_types[PTE_CACHE];
587239268Sgonzo	pte_l2_l_cache_mode = l2l_mem_types[PTE_CACHE];
588239268Sgonzo	pte_l2_s_cache_mode = l2s_mem_types[PTE_CACHE];
589239268Sgonzo
590239268Sgonzo	pte_l1_s_cache_mode_pt = l1_mem_types[PTE_PAGETABLE];
591239268Sgonzo	pte_l2_l_cache_mode_pt = l2l_mem_types[PTE_PAGETABLE];
592239268Sgonzo	pte_l2_s_cache_mode_pt = l2s_mem_types[PTE_PAGETABLE];
593239268Sgonzo
594239268Sgonzo}
595239268Sgonzo
596239268Sgonzo/*
597239268Sgonzo * Allocate an L1 translation table for the specified pmap.
598239268Sgonzo * This is called at pmap creation time.
599239268Sgonzo */
600239268Sgonzostatic void
601250929Sgberpmap_alloc_l1(pmap_t pmap)
602239268Sgonzo{
603239268Sgonzo	struct l1_ttable *l1;
604239268Sgonzo	u_int8_t domain;
605239268Sgonzo
606239268Sgonzo	/*
607239268Sgonzo	 * Remove the L1 at the head of the LRU list
608239268Sgonzo	 */
609239268Sgonzo	mtx_lock(&l1_lru_lock);
610239268Sgonzo	l1 = TAILQ_FIRST(&l1_lru_list);
611239268Sgonzo	TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
612239268Sgonzo
613239268Sgonzo	/*
614239268Sgonzo	 * Pick the first available domain number, and update
615239268Sgonzo	 * the link to the next number.
616239268Sgonzo	 */
617239268Sgonzo	domain = l1->l1_domain_first;
618239268Sgonzo	l1->l1_domain_first = l1->l1_domain_free[domain];
619239268Sgonzo
620239268Sgonzo	/*
621239268Sgonzo	 * If there are still free domain numbers in this L1,
622239268Sgonzo	 * put it back on the TAIL of the LRU list.
623239268Sgonzo	 */
624239268Sgonzo	if (++l1->l1_domain_use_count < PMAP_DOMAINS)
625239268Sgonzo		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
626239268Sgonzo
627239268Sgonzo	mtx_unlock(&l1_lru_lock);
628239268Sgonzo
629239268Sgonzo	/*
630239268Sgonzo	 * Fix up the relevant bits in the pmap structure
631239268Sgonzo	 */
632250929Sgber	pmap->pm_l1 = l1;
633250929Sgber	pmap->pm_domain = domain + 1;
634239268Sgonzo}
635239268Sgonzo
636239268Sgonzo/*
637239268Sgonzo * Free an L1 translation table.
638239268Sgonzo * This is called at pmap destruction time.
639239268Sgonzo */
640239268Sgonzostatic void
641250929Sgberpmap_free_l1(pmap_t pmap)
642239268Sgonzo{
643250929Sgber	struct l1_ttable *l1 = pmap->pm_l1;
644239268Sgonzo
645239268Sgonzo	mtx_lock(&l1_lru_lock);
646239268Sgonzo
647239268Sgonzo	/*
648239268Sgonzo	 * If this L1 is currently on the LRU list, remove it.
649239268Sgonzo	 */
650239268Sgonzo	if (l1->l1_domain_use_count < PMAP_DOMAINS)
651239268Sgonzo		TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
652239268Sgonzo
653239268Sgonzo	/*
654239268Sgonzo	 * Free up the domain number which was allocated to the pmap
655239268Sgonzo	 */
656250929Sgber	l1->l1_domain_free[pmap->pm_domain - 1] = l1->l1_domain_first;
657250929Sgber	l1->l1_domain_first = pmap->pm_domain - 1;
658239268Sgonzo	l1->l1_domain_use_count--;
659239268Sgonzo
660239268Sgonzo	/*
661239268Sgonzo	 * The L1 now must have at least 1 free domain, so add
662239268Sgonzo	 * it back to the LRU list. If the use count is zero,
663239268Sgonzo	 * put it at the head of the list, otherwise it goes
664239268Sgonzo	 * to the tail.
665239268Sgonzo	 */
666239268Sgonzo	if (l1->l1_domain_use_count == 0) {
667239268Sgonzo		TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
668239268Sgonzo	}	else
669239268Sgonzo		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
670239268Sgonzo
671239268Sgonzo	mtx_unlock(&l1_lru_lock);
672239268Sgonzo}
673239268Sgonzo
674239268Sgonzo/*
675239268Sgonzo * Returns a pointer to the L2 bucket associated with the specified pmap
676239268Sgonzo * and VA, or NULL if no L2 bucket exists for the address.
677239268Sgonzo */
678239268Sgonzostatic PMAP_INLINE struct l2_bucket *
679250929Sgberpmap_get_l2_bucket(pmap_t pmap, vm_offset_t va)
680239268Sgonzo{
681239268Sgonzo	struct l2_dtable *l2;
682239268Sgonzo	struct l2_bucket *l2b;
683239268Sgonzo	u_short l1idx;
684239268Sgonzo
685239268Sgonzo	l1idx = L1_IDX(va);
686239268Sgonzo
687250929Sgber	if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL ||
688239268Sgonzo	    (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
689239268Sgonzo		return (NULL);
690239268Sgonzo
691239268Sgonzo	return (l2b);
692239268Sgonzo}
693239268Sgonzo
694239268Sgonzo/*
695239268Sgonzo * Returns a pointer to the L2 bucket associated with the specified pmap
696239268Sgonzo * and VA.
697239268Sgonzo *
698239268Sgonzo * If no L2 bucket exists, perform the necessary allocations to put an L2
699239268Sgonzo * bucket/page table in place.
700239268Sgonzo *
701239268Sgonzo * Note that if a new L2 bucket/page was allocated, the caller *must*
702239268Sgonzo * increment the bucket occupancy counter appropriately *before*
703239268Sgonzo * releasing the pmap's lock to ensure no other thread or cpu deallocates
704239268Sgonzo * the bucket/page in the meantime.
705239268Sgonzo */
706239268Sgonzostatic struct l2_bucket *
707250929Sgberpmap_alloc_l2_bucket(pmap_t pmap, vm_offset_t va)
708239268Sgonzo{
709239268Sgonzo	struct l2_dtable *l2;
710239268Sgonzo	struct l2_bucket *l2b;
711239268Sgonzo	u_short l1idx;
712239268Sgonzo
713239268Sgonzo	l1idx = L1_IDX(va);
714239268Sgonzo
715250929Sgber	PMAP_ASSERT_LOCKED(pmap);
716240321Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
717250929Sgber	if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
718239268Sgonzo		/*
719239268Sgonzo		 * No mapping at this address, as there is
720239268Sgonzo		 * no entry in the L1 table.
721239268Sgonzo		 * Need to allocate a new l2_dtable.
722239268Sgonzo		 */
723250929Sgber		PMAP_UNLOCK(pmap);
724240321Salc		rw_wunlock(&pvh_global_lock);
725240803Salc		if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) {
726240321Salc			rw_wlock(&pvh_global_lock);
727250929Sgber			PMAP_LOCK(pmap);
728239268Sgonzo			return (NULL);
729239268Sgonzo		}
730240321Salc		rw_wlock(&pvh_global_lock);
731250929Sgber		PMAP_LOCK(pmap);
732250929Sgber		if (pmap->pm_l2[L2_IDX(l1idx)] != NULL) {
733239268Sgonzo			/*
734239268Sgonzo			 * Someone already allocated the l2_dtable while
735239268Sgonzo			 * we were doing the same.
736239268Sgonzo			 */
737240803Salc			uma_zfree(l2table_zone, l2);
738250929Sgber			l2 = pmap->pm_l2[L2_IDX(l1idx)];
739239268Sgonzo		} else {
740239268Sgonzo			bzero(l2, sizeof(*l2));
741239268Sgonzo			/*
742239268Sgonzo			 * Link it into the parent pmap
743239268Sgonzo			 */
744250929Sgber			pmap->pm_l2[L2_IDX(l1idx)] = l2;
745239268Sgonzo		}
746239268Sgonzo	}
747239268Sgonzo
748239268Sgonzo	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
749239268Sgonzo
750239268Sgonzo	/*
751239268Sgonzo	 * Fetch pointer to the L2 page table associated with the address.
752239268Sgonzo	 */
753239268Sgonzo	if (l2b->l2b_kva == NULL) {
754239268Sgonzo		pt_entry_t *ptep;
755239268Sgonzo
756239268Sgonzo		/*
757239268Sgonzo		 * No L2 page table has been allocated. Chances are, this
758239268Sgonzo		 * is because we just allocated the l2_dtable, above.
759239268Sgonzo		 */
760250929Sgber		PMAP_UNLOCK(pmap);
761240321Salc		rw_wunlock(&pvh_global_lock);
762240803Salc		ptep = uma_zalloc(l2zone, M_NOWAIT);
763240321Salc		rw_wlock(&pvh_global_lock);
764250929Sgber		PMAP_LOCK(pmap);
765239268Sgonzo		if (l2b->l2b_kva != 0) {
766239268Sgonzo			/* We lost the race. */
767239268Sgonzo			uma_zfree(l2zone, ptep);
768239268Sgonzo			return (l2b);
769239268Sgonzo		}
770239268Sgonzo		l2b->l2b_phys = vtophys(ptep);
771239268Sgonzo		if (ptep == NULL) {
772239268Sgonzo			/*
773239268Sgonzo			 * Oops, no more L2 page tables available at this
774239268Sgonzo			 * time. We may need to deallocate the l2_dtable
775239268Sgonzo			 * if we allocated a new one above.
776239268Sgonzo			 */
777239268Sgonzo			if (l2->l2_occupancy == 0) {
778250929Sgber				pmap->pm_l2[L2_IDX(l1idx)] = NULL;
779240803Salc				uma_zfree(l2table_zone, l2);
780239268Sgonzo			}
781239268Sgonzo			return (NULL);
782239268Sgonzo		}
783239268Sgonzo
784239268Sgonzo		l2->l2_occupancy++;
785239268Sgonzo		l2b->l2b_kva = ptep;
786239268Sgonzo		l2b->l2b_l1idx = l1idx;
787239268Sgonzo	}
788239268Sgonzo
789239268Sgonzo	return (l2b);
790239268Sgonzo}
791239268Sgonzo
792239268Sgonzostatic PMAP_INLINE void
793239268Sgonzopmap_free_l2_ptp(pt_entry_t *l2)
794239268Sgonzo{
795239268Sgonzo	uma_zfree(l2zone, l2);
796239268Sgonzo}
797239268Sgonzo/*
798239268Sgonzo * One or more mappings in the specified L2 descriptor table have just been
799239268Sgonzo * invalidated.
800239268Sgonzo *
801239268Sgonzo * Garbage collect the metadata and descriptor table itself if necessary.
802239268Sgonzo *
803239268Sgonzo * The pmap lock must be acquired when this is called (not necessary
804239268Sgonzo * for the kernel pmap).
805239268Sgonzo */
806239268Sgonzostatic void
807250929Sgberpmap_free_l2_bucket(pmap_t pmap, struct l2_bucket *l2b, u_int count)
808239268Sgonzo{
809239268Sgonzo	struct l2_dtable *l2;
810239268Sgonzo	pd_entry_t *pl1pd, l1pd;
811239268Sgonzo	pt_entry_t *ptep;
812239268Sgonzo	u_short l1idx;
813239268Sgonzo
814239268Sgonzo
815239268Sgonzo	/*
816239268Sgonzo	 * Update the bucket's reference count according to how many
817239268Sgonzo	 * PTEs the caller has just invalidated.
818239268Sgonzo	 */
819239268Sgonzo	l2b->l2b_occupancy -= count;
820239268Sgonzo
821239268Sgonzo	/*
822239268Sgonzo	 * Note:
823239268Sgonzo	 *
824239268Sgonzo	 * Level 2 page tables allocated to the kernel pmap are never freed
825239268Sgonzo	 * as that would require checking all Level 1 page tables and
826239268Sgonzo	 * removing any references to the Level 2 page table. See also the
827239268Sgonzo	 * comment elsewhere about never freeing bootstrap L2 descriptors.
828239268Sgonzo	 *
829239268Sgonzo	 * We make do with just invalidating the mapping in the L2 table.
830239268Sgonzo	 *
831239268Sgonzo	 * This isn't really a big deal in practice and, in fact, leads
832239268Sgonzo	 * to a performance win over time as we don't need to continually
833239268Sgonzo	 * alloc/free.
834239268Sgonzo	 */
835250929Sgber	if (l2b->l2b_occupancy > 0 || pmap == pmap_kernel())
836239268Sgonzo		return;
837239268Sgonzo
838239268Sgonzo	/*
839239268Sgonzo	 * There are no more valid mappings in this level 2 page table.
840239268Sgonzo	 * Go ahead and NULL-out the pointer in the bucket, then
841239268Sgonzo	 * free the page table.
842239268Sgonzo	 */
843239268Sgonzo	l1idx = l2b->l2b_l1idx;
844239268Sgonzo	ptep = l2b->l2b_kva;
845239268Sgonzo	l2b->l2b_kva = NULL;
846239268Sgonzo
847250929Sgber	pl1pd = &pmap->pm_l1->l1_kva[l1idx];
848239268Sgonzo
849239268Sgonzo	/*
850239268Sgonzo	 * If the L1 slot matches the pmap's domain
851239268Sgonzo	 * number, then invalidate it.
852239268Sgonzo	 */
853239268Sgonzo	l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
854250929Sgber	if (l1pd == (L1_C_DOM(pmap->pm_domain) | L1_TYPE_C)) {
855239268Sgonzo		*pl1pd = 0;
856239268Sgonzo		PTE_SYNC(pl1pd);
857266357Sian		cpu_tlb_flushD_SE((vm_offset_t)ptep);
858266357Sian		cpu_cpwait();
859239268Sgonzo	}
860239268Sgonzo
861239268Sgonzo	/*
862239268Sgonzo	 * Release the L2 descriptor table back to the pool cache.
863239268Sgonzo	 */
864239268Sgonzo	pmap_free_l2_ptp(ptep);
865239268Sgonzo
866239268Sgonzo	/*
867239268Sgonzo	 * Update the reference count in the associated l2_dtable
868239268Sgonzo	 */
869250929Sgber	l2 = pmap->pm_l2[L2_IDX(l1idx)];
870239268Sgonzo	if (--l2->l2_occupancy > 0)
871239268Sgonzo		return;
872239268Sgonzo
873239268Sgonzo	/*
874239268Sgonzo	 * There are no more valid mappings in any of the Level 1
875239268Sgonzo	 * slots managed by this l2_dtable. Go ahead and NULL-out
876239268Sgonzo	 * the pointer in the parent pmap and free the l2_dtable.
877239268Sgonzo	 */
878250929Sgber	pmap->pm_l2[L2_IDX(l1idx)] = NULL;
879240803Salc	uma_zfree(l2table_zone, l2);
880239268Sgonzo}
881239268Sgonzo
882239268Sgonzo/*
883239268Sgonzo * Pool cache constructors for L2 descriptor tables, metadata and pmap
884239268Sgonzo * structures.
885239268Sgonzo */
886239268Sgonzostatic int
887239268Sgonzopmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
888239268Sgonzo{
889239268Sgonzo	struct l2_bucket *l2b;
890239268Sgonzo	pt_entry_t *ptep, pte;
891239268Sgonzo	vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK;
892239268Sgonzo
893239268Sgonzo	/*
894239268Sgonzo	 * The mappings for these page tables were initially made using
895239268Sgonzo	 * pmap_kenter() by the pool subsystem. Therefore, the cache-
896239268Sgonzo	 * mode will not be right for page table mappings. To avoid
897239268Sgonzo	 * polluting the pmap_kenter() code with a special case for
898239268Sgonzo	 * page tables, we simply fix up the cache-mode here if it's not
899239268Sgonzo	 * correct.
900239268Sgonzo	 */
901239268Sgonzo	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
902239268Sgonzo	ptep = &l2b->l2b_kva[l2pte_index(va)];
903239268Sgonzo	pte = *ptep;
904239268Sgonzo
905239268Sgonzo	cpu_idcache_wbinv_range(va, PAGE_SIZE);
906245146Sgonzo	pmap_l2cache_wbinv_range(va, pte & L2_S_FRAME, PAGE_SIZE);
907239268Sgonzo	if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
908239268Sgonzo		/*
909239268Sgonzo		 * Page tables must have the cache-mode set to
910239268Sgonzo		 * Write-Thru.
911239268Sgonzo		 */
912239268Sgonzo		*ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
913239268Sgonzo		PTE_SYNC(ptep);
914239268Sgonzo		cpu_tlb_flushD_SE(va);
915239268Sgonzo		cpu_cpwait();
916239268Sgonzo	}
917239268Sgonzo
918239268Sgonzo	memset(mem, 0, L2_TABLE_SIZE_REAL);
919239268Sgonzo	return (0);
920239268Sgonzo}
921239268Sgonzo
922239268Sgonzo/*
923239268Sgonzo * Modify pte bits for all ptes corresponding to the given physical address.
924239268Sgonzo * We use `maskbits' rather than `clearbits' because we're always passing
925239268Sgonzo * constants and the latter would require an extra inversion at run-time.
926239268Sgonzo */
927239268Sgonzostatic int
928250929Sgberpmap_clearbit(struct vm_page *m, u_int maskbits)
929239268Sgonzo{
930239268Sgonzo	struct l2_bucket *l2b;
931254918Sraj	struct pv_entry *pv, *pve, *next_pv;
932254918Sraj	struct md_page *pvh;
933254918Sraj	pd_entry_t *pl1pd;
934239268Sgonzo	pt_entry_t *ptep, npte, opte;
935250929Sgber	pmap_t pmap;
936239268Sgonzo	vm_offset_t va;
937239268Sgonzo	u_int oflags;
938239268Sgonzo	int count = 0;
939239268Sgonzo
940240321Salc	rw_wlock(&pvh_global_lock);
941254918Sraj	if ((m->flags & PG_FICTITIOUS) != 0)
942254918Sraj		goto small_mappings;
943239268Sgonzo
944254918Sraj	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
945254918Sraj	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
946254918Sraj		va = pv->pv_va;
947254918Sraj		pmap = PV_PMAP(pv);
948254918Sraj		PMAP_LOCK(pmap);
949254918Sraj		pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
950254918Sraj		KASSERT((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO,
951254918Sraj		    ("pmap_clearbit: valid section mapping expected"));
952254918Sraj		if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_WRITE))
953254918Sraj			(void)pmap_demote_section(pmap, va);
954254918Sraj		else if ((maskbits & PVF_REF) && L1_S_REFERENCED(*pl1pd)) {
955254918Sraj			if (pmap_demote_section(pmap, va)) {
956254918Sraj				if ((pv->pv_flags & PVF_WIRED) == 0) {
957254918Sraj					/*
958254918Sraj					 * Remove the mapping to a single page
959254918Sraj					 * so that a subsequent access may
960254918Sraj					 * repromote. Since the underlying
961254918Sraj					 * l2_bucket is fully populated, this
962254918Sraj					 * removal never frees an entire
963254918Sraj					 * l2_bucket.
964254918Sraj					 */
965254918Sraj					va += (VM_PAGE_TO_PHYS(m) &
966254918Sraj					    L1_S_OFFSET);
967254918Sraj					l2b = pmap_get_l2_bucket(pmap, va);
968254918Sraj					KASSERT(l2b != NULL,
969254918Sraj					    ("pmap_clearbit: no l2 bucket for "
970254918Sraj					     "va 0x%#x, pmap 0x%p", va, pmap));
971254918Sraj					ptep = &l2b->l2b_kva[l2pte_index(va)];
972254918Sraj					*ptep = 0;
973254918Sraj					PTE_SYNC(ptep);
974254918Sraj					pmap_free_l2_bucket(pmap, l2b, 1);
975254918Sraj					pve = pmap_remove_pv(m, pmap, va);
976254918Sraj					KASSERT(pve != NULL, ("pmap_clearbit: "
977254918Sraj					    "no PV entry for managed mapping"));
978254918Sraj					pmap_free_pv_entry(pmap, pve);
979254918Sraj
980254918Sraj				}
981254918Sraj			}
982254918Sraj		} else if ((maskbits & PVF_MOD) && L1_S_WRITABLE(*pl1pd)) {
983254918Sraj			if (pmap_demote_section(pmap, va)) {
984254918Sraj				if ((pv->pv_flags & PVF_WIRED) == 0) {
985254918Sraj					/*
986254918Sraj					 * Write protect the mapping to a
987254918Sraj					 * single page so that a subsequent
988254918Sraj					 * write access may repromote.
989254918Sraj					 */
990254918Sraj					va += (VM_PAGE_TO_PHYS(m) &
991254918Sraj					    L1_S_OFFSET);
992254918Sraj					l2b = pmap_get_l2_bucket(pmap, va);
993254918Sraj					KASSERT(l2b != NULL,
994254918Sraj					    ("pmap_clearbit: no l2 bucket for "
995254918Sraj					     "va 0x%#x, pmap 0x%p", va, pmap));
996254918Sraj					ptep = &l2b->l2b_kva[l2pte_index(va)];
997254918Sraj					if ((*ptep & L2_S_PROTO) != 0) {
998254918Sraj						pve = pmap_find_pv(&m->md,
999254918Sraj						    pmap, va);
1000254918Sraj						KASSERT(pve != NULL,
1001254918Sraj						    ("pmap_clearbit: no PV "
1002254918Sraj						    "entry for managed mapping"));
1003254918Sraj						pve->pv_flags &= ~PVF_WRITE;
1004255611Szbb						*ptep |= L2_APX;
1005254918Sraj						PTE_SYNC(ptep);
1006254918Sraj					}
1007254918Sraj				}
1008254918Sraj			}
1009254918Sraj		}
1010254918Sraj		PMAP_UNLOCK(pmap);
1011254918Sraj	}
1012254918Sraj
1013254918Srajsmall_mappings:
1014250929Sgber	if (TAILQ_EMPTY(&m->md.pv_list)) {
1015240321Salc		rw_wunlock(&pvh_global_lock);
1016239268Sgonzo		return (0);
1017239268Sgonzo	}
1018239268Sgonzo
1019239268Sgonzo	/*
1020239268Sgonzo	 * Loop over all current mappings setting/clearing as appropos
1021239268Sgonzo	 */
1022250929Sgber	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1023239268Sgonzo		va = pv->pv_va;
1024250929Sgber		pmap = PV_PMAP(pv);
1025239268Sgonzo		oflags = pv->pv_flags;
1026239268Sgonzo		pv->pv_flags &= ~maskbits;
1027239268Sgonzo
1028250929Sgber		PMAP_LOCK(pmap);
1029239268Sgonzo
1030250929Sgber		l2b = pmap_get_l2_bucket(pmap, va);
1031254918Sraj		KASSERT(l2b != NULL, ("pmap_clearbit: no l2 bucket for "
1032254918Sraj		    "va 0x%#x, pmap 0x%p", va, pmap));
1033239268Sgonzo
1034239268Sgonzo		ptep = &l2b->l2b_kva[l2pte_index(va)];
1035239268Sgonzo		npte = opte = *ptep;
1036239268Sgonzo
1037254535Sraj		if (maskbits & (PVF_WRITE | PVF_MOD)) {
1038239268Sgonzo			/* make the pte read only */
1039239268Sgonzo			npte |= L2_APX;
1040239268Sgonzo		}
1041239268Sgonzo
1042254535Sraj		if (maskbits & PVF_REF) {
1043239268Sgonzo			/*
1044250928Sgber			 * Clear referenced flag in PTE so that we
1045250928Sgber			 * will take a flag fault the next time the mapping
1046250928Sgber			 * is referenced.
1047239268Sgonzo			 */
1048250928Sgber			npte &= ~L2_S_REF;
1049239268Sgonzo		}
1050239268Sgonzo
1051239268Sgonzo		CTR4(KTR_PMAP,"clearbit: pmap:%p bits:%x pte:%x->%x",
1052250929Sgber		    pmap, maskbits, opte, npte);
1053239268Sgonzo		if (npte != opte) {
1054239268Sgonzo			count++;
1055239268Sgonzo			*ptep = npte;
1056239268Sgonzo			PTE_SYNC(ptep);
1057239268Sgonzo			/* Flush the TLB entry if a current pmap. */
1058250930Sgber			if (PTE_BEEN_EXECD(opte))
1059239268Sgonzo				cpu_tlb_flushID_SE(pv->pv_va);
1060250930Sgber			else if (PTE_BEEN_REFD(opte))
1061239268Sgonzo				cpu_tlb_flushD_SE(pv->pv_va);
1062266353Sian			cpu_cpwait();
1063239268Sgonzo		}
1064239268Sgonzo
1065250929Sgber		PMAP_UNLOCK(pmap);
1066239268Sgonzo
1067239268Sgonzo	}
1068239268Sgonzo
1069239268Sgonzo	if (maskbits & PVF_WRITE)
1070250929Sgber		vm_page_aflag_clear(m, PGA_WRITEABLE);
1071240321Salc	rw_wunlock(&pvh_global_lock);
1072239268Sgonzo	return (count);
1073239268Sgonzo}
1074239268Sgonzo
1075239268Sgonzo/*
1076239268Sgonzo * main pv_entry manipulation functions:
1077239268Sgonzo *   pmap_enter_pv: enter a mapping onto a vm_page list
1078239268Sgonzo *   pmap_remove_pv: remove a mappiing from a vm_page list
1079239268Sgonzo *
1080239268Sgonzo * NOTE: pmap_enter_pv expects to lock the pvh itself
1081240321Salc *       pmap_remove_pv expects the caller to lock the pvh before calling
1082239268Sgonzo */
1083239268Sgonzo
1084239268Sgonzo/*
1085240321Salc * pmap_enter_pv: enter a mapping onto a vm_page's PV list
1086239268Sgonzo *
1087240321Salc * => caller should hold the proper lock on pvh_global_lock
1088239268Sgonzo * => caller should have pmap locked
1089240321Salc * => we will (someday) gain the lock on the vm_page's PV list
1090239268Sgonzo * => caller should adjust ptp's wire_count before calling
1091239268Sgonzo * => caller should not adjust pmap's wire_count
1092239268Sgonzo */
1093239268Sgonzostatic void
1094250929Sgberpmap_enter_pv(struct vm_page *m, struct pv_entry *pve, pmap_t pmap,
1095239268Sgonzo    vm_offset_t va, u_int flags)
1096239268Sgonzo{
1097239268Sgonzo
1098240321Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1099239268Sgonzo
1100250929Sgber	PMAP_ASSERT_LOCKED(pmap);
1101239268Sgonzo	pve->pv_va = va;
1102239268Sgonzo	pve->pv_flags = flags;
1103239268Sgonzo
1104250929Sgber	TAILQ_INSERT_HEAD(&m->md.pv_list, pve, pv_list);
1105239268Sgonzo	if (pve->pv_flags & PVF_WIRED)
1106250929Sgber		++pmap->pm_stats.wired_count;
1107239268Sgonzo}
1108239268Sgonzo
1109239268Sgonzo/*
1110239268Sgonzo *
1111239268Sgonzo * pmap_find_pv: Find a pv entry
1112239268Sgonzo *
1113239268Sgonzo * => caller should hold lock on vm_page
1114239268Sgonzo */
1115239268Sgonzostatic PMAP_INLINE struct pv_entry *
1116254918Srajpmap_find_pv(struct md_page *md, pmap_t pmap, vm_offset_t va)
1117239268Sgonzo{
1118239268Sgonzo	struct pv_entry *pv;
1119239268Sgonzo
1120240321Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1121254918Sraj	TAILQ_FOREACH(pv, &md->pv_list, pv_list)
1122254918Sraj		if (pmap == PV_PMAP(pv) && va == pv->pv_va)
1123254918Sraj			break;
1124254918Sraj
1125239268Sgonzo	return (pv);
1126239268Sgonzo}
1127239268Sgonzo
1128239268Sgonzo/*
1129239268Sgonzo * vector_page_setprot:
1130239268Sgonzo *
1131239268Sgonzo *	Manipulate the protection of the vector page.
1132239268Sgonzo */
1133239268Sgonzovoid
1134239268Sgonzovector_page_setprot(int prot)
1135239268Sgonzo{
1136239268Sgonzo	struct l2_bucket *l2b;
1137239268Sgonzo	pt_entry_t *ptep;
1138239268Sgonzo
1139239268Sgonzo	l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
1140239268Sgonzo
1141239268Sgonzo	ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
1142250928Sgber	/*
1143250928Sgber	 * Set referenced flag.
1144250928Sgber	 * Vectors' page is always desired
1145250928Sgber	 * to be allowed to reside in TLB.
1146250928Sgber	 */
1147250928Sgber	*ptep |= L2_S_REF;
1148239268Sgonzo
1149239268Sgonzo	pmap_set_prot(ptep, prot|VM_PROT_EXECUTE, 0);
1150266353Sian	PTE_SYNC(ptep);
1151266353Sian	cpu_tlb_flushID_SE(vector_page);
1152239268Sgonzo	cpu_cpwait();
1153239268Sgonzo}
1154239268Sgonzo
1155239268Sgonzostatic void
1156239268Sgonzopmap_set_prot(pt_entry_t *ptep, vm_prot_t prot, uint8_t user)
1157239268Sgonzo{
1158239268Sgonzo
1159254532Sraj	*ptep &= ~(L2_S_PROT_MASK | L2_XN);
1160239268Sgonzo
1161239268Sgonzo	if (!(prot & VM_PROT_EXECUTE))
1162239268Sgonzo		*ptep |= L2_XN;
1163239268Sgonzo
1164250928Sgber	/* Set defaults first - kernel read access */
1165250297Sgber	*ptep |= L2_APX;
1166239268Sgonzo	*ptep |= L2_S_PROT_R;
1167250928Sgber	/* Now tune APs as desired */
1168239268Sgonzo	if (user)
1169239268Sgonzo		*ptep |= L2_S_PROT_U;
1170239268Sgonzo
1171239268Sgonzo	if (prot & VM_PROT_WRITE)
1172239268Sgonzo		*ptep &= ~(L2_APX);
1173239268Sgonzo}
1174239268Sgonzo
1175239268Sgonzo/*
1176239268Sgonzo * pmap_remove_pv: try to remove a mapping from a pv_list
1177239268Sgonzo *
1178239268Sgonzo * => caller should hold proper lock on pmap_main_lock
1179239268Sgonzo * => pmap should be locked
1180239268Sgonzo * => caller should hold lock on vm_page [so that attrs can be adjusted]
1181239268Sgonzo * => caller should adjust ptp's wire_count and free PTP if needed
1182239268Sgonzo * => caller should NOT adjust pmap's wire_count
1183239268Sgonzo * => we return the removed pve
1184239268Sgonzo */
1185239268Sgonzostatic struct pv_entry *
1186250929Sgberpmap_remove_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va)
1187239268Sgonzo{
1188239268Sgonzo	struct pv_entry *pve;
1189239268Sgonzo
1190240321Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1191254531Sraj	PMAP_ASSERT_LOCKED(pmap);
1192239268Sgonzo
1193254918Sraj	pve = pmap_find_pv(&m->md, pmap, va);	/* find corresponding pve */
1194254531Sraj	if (pve != NULL) {
1195254531Sraj		TAILQ_REMOVE(&m->md.pv_list, pve, pv_list);
1196254531Sraj		if (pve->pv_flags & PVF_WIRED)
1197254531Sraj			--pmap->pm_stats.wired_count;
1198254531Sraj	}
1199254531Sraj	if (TAILQ_EMPTY(&m->md.pv_list))
1200254531Sraj		vm_page_aflag_clear(m, PGA_WRITEABLE);
1201239268Sgonzo
1202239268Sgonzo	return(pve);				/* return removed pve */
1203239268Sgonzo}
1204239268Sgonzo
1205239268Sgonzo/*
1206239268Sgonzo *
1207239268Sgonzo * pmap_modify_pv: Update pv flags
1208239268Sgonzo *
1209239268Sgonzo * => caller should hold lock on vm_page [so that attrs can be adjusted]
1210239268Sgonzo * => caller should NOT adjust pmap's wire_count
1211239268Sgonzo * => we return the old flags
1212239268Sgonzo *
1213239268Sgonzo * Modify a physical-virtual mapping in the pv table
1214239268Sgonzo */
1215239268Sgonzostatic u_int
1216250929Sgberpmap_modify_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va,
1217239268Sgonzo    u_int clr_mask, u_int set_mask)
1218239268Sgonzo{
1219239268Sgonzo	struct pv_entry *npv;
1220239268Sgonzo	u_int flags, oflags;
1221239268Sgonzo
1222250929Sgber	PMAP_ASSERT_LOCKED(pmap);
1223240321Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1224254918Sraj	if ((npv = pmap_find_pv(&m->md, pmap, va)) == NULL)
1225239268Sgonzo		return (0);
1226239268Sgonzo
1227239268Sgonzo	/*
1228239268Sgonzo	 * There is at least one VA mapping this page.
1229239268Sgonzo	 */
1230239268Sgonzo	oflags = npv->pv_flags;
1231239268Sgonzo	npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
1232239268Sgonzo
1233239268Sgonzo	if ((flags ^ oflags) & PVF_WIRED) {
1234239268Sgonzo		if (flags & PVF_WIRED)
1235250929Sgber			++pmap->pm_stats.wired_count;
1236239268Sgonzo		else
1237250929Sgber			--pmap->pm_stats.wired_count;
1238239268Sgonzo	}
1239239268Sgonzo
1240239268Sgonzo	return (oflags);
1241239268Sgonzo}
1242239268Sgonzo
1243239268Sgonzo/* Function to set the debug level of the pmap code */
1244239268Sgonzo#ifdef PMAP_DEBUG
1245239268Sgonzovoid
1246239268Sgonzopmap_debug(int level)
1247239268Sgonzo{
1248239268Sgonzo	pmap_debug_level = level;
1249239268Sgonzo	dprintf("pmap_debug: level=%d\n", pmap_debug_level);
1250239268Sgonzo}
1251239268Sgonzo#endif  /* PMAP_DEBUG */
1252239268Sgonzo
1253239268Sgonzovoid
1254239268Sgonzopmap_pinit0(struct pmap *pmap)
1255239268Sgonzo{
1256239268Sgonzo	PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
1257239268Sgonzo
1258239268Sgonzo	bcopy(kernel_pmap, pmap, sizeof(*pmap));
1259239268Sgonzo	bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx));
1260239268Sgonzo	PMAP_LOCK_INIT(pmap);
1261254913Sraj	TAILQ_INIT(&pmap->pm_pvchunk);
1262239268Sgonzo}
1263239268Sgonzo
1264239268Sgonzo/*
1265239268Sgonzo *	Initialize a vm_page's machine-dependent fields.
1266239268Sgonzo */
1267239268Sgonzovoid
1268239268Sgonzopmap_page_init(vm_page_t m)
1269239268Sgonzo{
1270239268Sgonzo
1271239268Sgonzo	TAILQ_INIT(&m->md.pv_list);
1272244414Scognet	m->md.pv_memattr = VM_MEMATTR_DEFAULT;
1273239268Sgonzo}
1274239268Sgonzo
1275250634Sgberstatic vm_offset_t
1276250634Sgberpmap_ptelist_alloc(vm_offset_t *head)
1277250634Sgber{
1278250634Sgber	pt_entry_t *pte;
1279250634Sgber	vm_offset_t va;
1280250634Sgber
1281250634Sgber	va = *head;
1282250634Sgber	if (va == 0)
1283250634Sgber		return (va);	/* Out of memory */
1284250634Sgber	pte = vtopte(va);
1285250634Sgber	*head = *pte;
1286250634Sgber	if ((*head & L2_TYPE_MASK) != L2_TYPE_INV)
1287250634Sgber		panic("%s: va is not L2_TYPE_INV!", __func__);
1288250634Sgber	*pte = 0;
1289250634Sgber	return (va);
1290250634Sgber}
1291250634Sgber
1292250634Sgberstatic void
1293250634Sgberpmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
1294250634Sgber{
1295250634Sgber	pt_entry_t *pte;
1296250634Sgber
1297250634Sgber	if ((va & L2_TYPE_MASK) != L2_TYPE_INV)
1298250634Sgber		panic("%s: freeing va that is not L2_TYPE INV!", __func__);
1299250634Sgber	pte = vtopte(va);
1300250634Sgber	*pte = *head;		/* virtual! L2_TYPE is L2_TYPE_INV though */
1301250634Sgber	*head = va;
1302250634Sgber}
1303250634Sgber
1304250634Sgberstatic void
1305250634Sgberpmap_ptelist_init(vm_offset_t *head, void *base, int npages)
1306250634Sgber{
1307250634Sgber	int i;
1308250634Sgber	vm_offset_t va;
1309250634Sgber
1310250634Sgber	*head = 0;
1311250634Sgber	for (i = npages - 1; i >= 0; i--) {
1312250634Sgber		va = (vm_offset_t)base + i * PAGE_SIZE;
1313250634Sgber		pmap_ptelist_free(head, va);
1314250634Sgber	}
1315250634Sgber}
1316250634Sgber
1317239268Sgonzo/*
1318239268Sgonzo *      Initialize the pmap module.
1319239268Sgonzo *      Called by vm_init, to initialize any structures that the pmap
1320239268Sgonzo *      system needs to map virtual memory.
1321239268Sgonzo */
1322239268Sgonzovoid
1323239268Sgonzopmap_init(void)
1324239268Sgonzo{
1325254918Sraj	vm_size_t s;
1326254918Sraj	int i, pv_npg;
1327239268Sgonzo
1328240803Salc	l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
1329240803Salc	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1330240803Salc	l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL,
1331240803Salc	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1332240803Salc
1333239268Sgonzo	/*
1334254918Sraj	 * Are large page mappings supported and enabled?
1335254918Sraj	 */
1336254918Sraj	TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled);
1337254918Sraj	if (sp_enabled) {
1338254918Sraj		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1339254918Sraj		    ("pmap_init: can't assign to pagesizes[1]"));
1340254918Sraj		pagesizes[1] = NBPDR;
1341254918Sraj	}
1342254918Sraj
1343254918Sraj	/*
1344254918Sraj	 * Calculate the size of the pv head table for superpages.
1345254918Sraj	 */
1346254918Sraj	for (i = 0; phys_avail[i + 1]; i += 2);
1347254918Sraj	pv_npg = round_1mpage(phys_avail[(i - 2) + 1]) / NBPDR;
1348254918Sraj
1349254918Sraj	/*
1350254918Sraj	 * Allocate memory for the pv head table for superpages.
1351254918Sraj	 */
1352254918Sraj	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1353254918Sraj	s = round_page(s);
1354254918Sraj	pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
1355254918Sraj	    M_WAITOK | M_ZERO);
1356254918Sraj	for (i = 0; i < pv_npg; i++)
1357254918Sraj		TAILQ_INIT(&pv_table[i].pv_list);
1358254918Sraj
1359254918Sraj	/*
1360250634Sgber	 * Initialize the address space for the pv chunks.
1361239268Sgonzo	 */
1362250634Sgber
1363240803Salc	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1364240803Salc	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1365250634Sgber	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1366250634Sgber	pv_entry_max = roundup(pv_entry_max, _NPCPV);
1367240803Salc	pv_entry_high_water = 9 * (pv_entry_max / 10);
1368240803Salc
1369250634Sgber	pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
1370254025Sjeff	pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
1371250634Sgber
1372250634Sgber	if (pv_chunkbase == NULL)
1373250634Sgber		panic("pmap_init: not enough kvm for pv chunks");
1374250634Sgber
1375250634Sgber	pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
1376250634Sgber
1377239268Sgonzo	/*
1378239268Sgonzo	 * Now it is safe to enable pv_table recording.
1379239268Sgonzo	 */
1380239268Sgonzo	PDEBUG(1, printf("pmap_init: done!\n"));
1381239268Sgonzo}
1382239268Sgonzo
1383250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
1384250634Sgber	"Max number of PV entries");
1385250634SgberSYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
1386250634Sgber	"Page share factor per proc");
1387250634Sgber
1388254918Srajstatic SYSCTL_NODE(_vm_pmap, OID_AUTO, section, CTLFLAG_RD, 0,
1389254918Sraj    "1MB page mapping counters");
1390254918Sraj
1391254918Srajstatic u_long pmap_section_demotions;
1392254918SrajSYSCTL_ULONG(_vm_pmap_section, OID_AUTO, demotions, CTLFLAG_RD,
1393254918Sraj    &pmap_section_demotions, 0, "1MB page demotions");
1394254918Sraj
1395254918Srajstatic u_long pmap_section_mappings;
1396254918SrajSYSCTL_ULONG(_vm_pmap_section, OID_AUTO, mappings, CTLFLAG_RD,
1397254918Sraj    &pmap_section_mappings, 0, "1MB page mappings");
1398254918Sraj
1399254918Srajstatic u_long pmap_section_p_failures;
1400254918SrajSYSCTL_ULONG(_vm_pmap_section, OID_AUTO, p_failures, CTLFLAG_RD,
1401254918Sraj    &pmap_section_p_failures, 0, "1MB page promotion failures");
1402254918Sraj
1403254918Srajstatic u_long pmap_section_promotions;
1404254918SrajSYSCTL_ULONG(_vm_pmap_section, OID_AUTO, promotions, CTLFLAG_RD,
1405254918Sraj    &pmap_section_promotions, 0, "1MB page promotions");
1406254918Sraj
1407239268Sgonzoint
1408250929Sgberpmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype, int user)
1409239268Sgonzo{
1410239268Sgonzo	struct l2_dtable *l2;
1411239268Sgonzo	struct l2_bucket *l2b;
1412239268Sgonzo	pd_entry_t *pl1pd, l1pd;
1413239268Sgonzo	pt_entry_t *ptep, pte;
1414239268Sgonzo	vm_paddr_t pa;
1415239268Sgonzo	u_int l1idx;
1416239268Sgonzo	int rv = 0;
1417239268Sgonzo
1418239268Sgonzo	l1idx = L1_IDX(va);
1419240321Salc	rw_wlock(&pvh_global_lock);
1420250929Sgber	PMAP_LOCK(pmap);
1421239268Sgonzo	/*
1422254918Sraj	 * Check and possibly fix-up L1 section mapping
1423254918Sraj	 * only when superpage mappings are enabled to speed up.
1424254918Sraj	 */
1425254918Sraj	if (sp_enabled) {
1426254918Sraj		pl1pd = &pmap->pm_l1->l1_kva[l1idx];
1427254918Sraj		l1pd = *pl1pd;
1428254918Sraj		if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
1429254918Sraj			/* Catch an access to the vectors section */
1430254918Sraj			if (l1idx == L1_IDX(vector_page))
1431254918Sraj				goto out;
1432254918Sraj			/*
1433254918Sraj			 * Stay away from the kernel mappings.
1434254918Sraj			 * None of them should fault from L1 entry.
1435254918Sraj			 */
1436254918Sraj			if (pmap == pmap_kernel())
1437254918Sraj				goto out;
1438254918Sraj			/*
1439254918Sraj			 * Catch a forbidden userland access
1440254918Sraj			 */
1441254918Sraj			if (user && !(l1pd & L1_S_PROT_U))
1442254918Sraj				goto out;
1443254918Sraj			/*
1444254918Sraj			 * Superpage is always either mapped read only
1445254918Sraj			 * or it is modified and permitted to be written
1446254918Sraj			 * by default. Therefore, process only reference
1447254918Sraj			 * flag fault and demote page in case of write fault.
1448254918Sraj			 */
1449254918Sraj			if ((ftype & VM_PROT_WRITE) && !L1_S_WRITABLE(l1pd) &&
1450254918Sraj			    L1_S_REFERENCED(l1pd)) {
1451254918Sraj				(void)pmap_demote_section(pmap, va);
1452254918Sraj				goto out;
1453254918Sraj			} else if (!L1_S_REFERENCED(l1pd)) {
1454254918Sraj				/* Mark the page "referenced" */
1455254918Sraj				*pl1pd = l1pd | L1_S_REF;
1456254918Sraj				PTE_SYNC(pl1pd);
1457254918Sraj				goto l1_section_out;
1458254918Sraj			} else
1459254918Sraj				goto out;
1460254918Sraj		}
1461254918Sraj	}
1462254918Sraj	/*
1463239268Sgonzo	 * If there is no l2_dtable for this address, then the process
1464239268Sgonzo	 * has no business accessing it.
1465239268Sgonzo	 *
1466239268Sgonzo	 * Note: This will catch userland processes trying to access
1467239268Sgonzo	 * kernel addresses.
1468239268Sgonzo	 */
1469250929Sgber	l2 = pmap->pm_l2[L2_IDX(l1idx)];
1470239268Sgonzo	if (l2 == NULL)
1471239268Sgonzo		goto out;
1472239268Sgonzo
1473239268Sgonzo	/*
1474239268Sgonzo	 * Likewise if there is no L2 descriptor table
1475239268Sgonzo	 */
1476239268Sgonzo	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
1477239268Sgonzo	if (l2b->l2b_kva == NULL)
1478239268Sgonzo		goto out;
1479239268Sgonzo
1480239268Sgonzo	/*
1481239268Sgonzo	 * Check the PTE itself.
1482239268Sgonzo	 */
1483239268Sgonzo	ptep = &l2b->l2b_kva[l2pte_index(va)];
1484239268Sgonzo	pte = *ptep;
1485239268Sgonzo	if (pte == 0)
1486239268Sgonzo		goto out;
1487239268Sgonzo
1488239268Sgonzo	/*
1489239268Sgonzo	 * Catch a userland access to the vector page mapped at 0x0
1490239268Sgonzo	 */
1491250297Sgber	if (user && !(pte & L2_S_PROT_U))
1492239268Sgonzo		goto out;
1493239268Sgonzo	if (va == vector_page)
1494239268Sgonzo		goto out;
1495239268Sgonzo
1496239268Sgonzo	pa = l2pte_pa(pte);
1497239268Sgonzo	CTR5(KTR_PMAP, "pmap_fault_fix: pmap:%p va:%x pte:0x%x ftype:%x user:%x",
1498250929Sgber	    pmap, va, pte, ftype, user);
1499250928Sgber	if ((ftype & VM_PROT_WRITE) && !(L2_S_WRITABLE(pte)) &&
1500250928Sgber	    L2_S_REFERENCED(pte)) {
1501239268Sgonzo		/*
1502239268Sgonzo		 * This looks like a good candidate for "page modified"
1503239268Sgonzo		 * emulation...
1504239268Sgonzo		 */
1505239268Sgonzo		struct pv_entry *pv;
1506250929Sgber		struct vm_page *m;
1507239268Sgonzo
1508239268Sgonzo		/* Extract the physical address of the page */
1509250929Sgber		if ((m = PHYS_TO_VM_PAGE(pa)) == NULL) {
1510239268Sgonzo			goto out;
1511239268Sgonzo		}
1512239268Sgonzo		/* Get the current flags for this page. */
1513239268Sgonzo
1514254918Sraj		pv = pmap_find_pv(&m->md, pmap, va);
1515239268Sgonzo		if (pv == NULL) {
1516239268Sgonzo			goto out;
1517239268Sgonzo		}
1518239268Sgonzo
1519239268Sgonzo		/*
1520239268Sgonzo		 * Do the flags say this page is writable? If not then it
1521239268Sgonzo		 * is a genuine write fault. If yes then the write fault is
1522239268Sgonzo		 * our fault as we did not reflect the write access in the
1523239268Sgonzo		 * PTE. Now we know a write has occurred we can correct this
1524239268Sgonzo		 * and also set the modified bit
1525239268Sgonzo		 */
1526239268Sgonzo		if ((pv->pv_flags & PVF_WRITE) == 0) {
1527239268Sgonzo			goto out;
1528239268Sgonzo		}
1529250928Sgber
1530250929Sgber		vm_page_dirty(m);
1531239268Sgonzo
1532239268Sgonzo		/* Re-enable write permissions for the page */
1533266050Sian		*ptep = (pte & ~L2_APX);
1534239268Sgonzo		PTE_SYNC(ptep);
1535239268Sgonzo		rv = 1;
1536266050Sian		CTR1(KTR_PMAP, "pmap_fault_fix: new pte:0x%x", *ptep);
1537250928Sgber	} else if (!L2_S_REFERENCED(pte)) {
1538239268Sgonzo		/*
1539239268Sgonzo		 * This looks like a good candidate for "page referenced"
1540239268Sgonzo		 * emulation.
1541239268Sgonzo		 */
1542239268Sgonzo		struct pv_entry *pv;
1543250929Sgber		struct vm_page *m;
1544239268Sgonzo
1545239268Sgonzo		/* Extract the physical address of the page */
1546250929Sgber		if ((m = PHYS_TO_VM_PAGE(pa)) == NULL)
1547239268Sgonzo			goto out;
1548239268Sgonzo		/* Get the current flags for this page. */
1549254918Sraj		pv = pmap_find_pv(&m->md, pmap, va);
1550239268Sgonzo		if (pv == NULL)
1551239268Sgonzo			goto out;
1552239268Sgonzo
1553250929Sgber		vm_page_aflag_set(m, PGA_REFERENCED);
1554239268Sgonzo
1555250928Sgber		/* Mark the page "referenced" */
1556250928Sgber		*ptep = pte | L2_S_REF;
1557239268Sgonzo		PTE_SYNC(ptep);
1558239268Sgonzo		rv = 1;
1559266050Sian		CTR1(KTR_PMAP, "pmap_fault_fix: new pte:0x%x", *ptep);
1560239268Sgonzo	}
1561239268Sgonzo
1562239268Sgonzo	/*
1563239268Sgonzo	 * We know there is a valid mapping here, so simply
1564239268Sgonzo	 * fix up the L1 if necessary.
1565239268Sgonzo	 */
1566250929Sgber	pl1pd = &pmap->pm_l1->l1_kva[l1idx];
1567250929Sgber	l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
1568239268Sgonzo	if (*pl1pd != l1pd) {
1569239268Sgonzo		*pl1pd = l1pd;
1570239268Sgonzo		PTE_SYNC(pl1pd);
1571239268Sgonzo		rv = 1;
1572239268Sgonzo	}
1573239268Sgonzo
1574239268Sgonzo#ifdef DEBUG
1575239268Sgonzo	/*
1576239268Sgonzo	 * If 'rv == 0' at this point, it generally indicates that there is a
1577239268Sgonzo	 * stale TLB entry for the faulting address. This happens when two or
1578239268Sgonzo	 * more processes are sharing an L1. Since we don't flush the TLB on
1579239268Sgonzo	 * a context switch between such processes, we can take domain faults
1580239268Sgonzo	 * for mappings which exist at the same VA in both processes. EVEN IF
1581239268Sgonzo	 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
1582239268Sgonzo	 * example.
1583239268Sgonzo	 *
1584239268Sgonzo	 * This is extremely likely to happen if pmap_enter() updated the L1
1585239268Sgonzo	 * entry for a recently entered mapping. In this case, the TLB is
1586239268Sgonzo	 * flushed for the new mapping, but there may still be TLB entries for
1587239268Sgonzo	 * other mappings belonging to other processes in the 1MB range
1588239268Sgonzo	 * covered by the L1 entry.
1589239268Sgonzo	 *
1590239268Sgonzo	 * Since 'rv == 0', we know that the L1 already contains the correct
1591239268Sgonzo	 * value, so the fault must be due to a stale TLB entry.
1592239268Sgonzo	 *
1593239268Sgonzo	 * Since we always need to flush the TLB anyway in the case where we
1594239268Sgonzo	 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
1595239268Sgonzo	 * stale TLB entries dynamically.
1596239268Sgonzo	 *
1597239268Sgonzo	 * However, the above condition can ONLY happen if the current L1 is
1598239268Sgonzo	 * being shared. If it happens when the L1 is unshared, it indicates
1599239268Sgonzo	 * that other parts of the pmap are not doing their job WRT managing
1600239268Sgonzo	 * the TLB.
1601239268Sgonzo	 */
1602250929Sgber	if (rv == 0 && pmap->pm_l1->l1_domain_use_count == 1) {
1603250929Sgber		printf("fixup: pmap %p, va 0x%08x, ftype %d - nothing to do!\n",
1604250929Sgber		    pmap, va, ftype);
1605239268Sgonzo		printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
1606239268Sgonzo		    l2, l2b, ptep, pl1pd);
1607239268Sgonzo		printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
1608239268Sgonzo		    pte, l1pd, last_fault_code);
1609239268Sgonzo#ifdef DDB
1610239268Sgonzo		Debugger();
1611239268Sgonzo#endif
1612239268Sgonzo	}
1613239268Sgonzo#endif
1614239268Sgonzo
1615254918Srajl1_section_out:
1616239268Sgonzo	cpu_tlb_flushID_SE(va);
1617239268Sgonzo	cpu_cpwait();
1618239268Sgonzo
1619239268Sgonzo	rv = 1;
1620239268Sgonzo
1621239268Sgonzoout:
1622240321Salc	rw_wunlock(&pvh_global_lock);
1623250929Sgber	PMAP_UNLOCK(pmap);
1624239268Sgonzo	return (rv);
1625239268Sgonzo}
1626239268Sgonzo
1627239268Sgonzovoid
1628239268Sgonzopmap_postinit(void)
1629239268Sgonzo{
1630239268Sgonzo	struct l2_bucket *l2b;
1631239268Sgonzo	struct l1_ttable *l1;
1632239268Sgonzo	pd_entry_t *pl1pt;
1633239268Sgonzo	pt_entry_t *ptep, pte;
1634239268Sgonzo	vm_offset_t va, eva;
1635239268Sgonzo	u_int loop, needed;
1636239268Sgonzo
1637239268Sgonzo	needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
1638239268Sgonzo	needed -= 1;
1639239268Sgonzo	l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
1640239268Sgonzo
1641239268Sgonzo	for (loop = 0; loop < needed; loop++, l1++) {
1642239268Sgonzo		/* Allocate a L1 page table */
1643239268Sgonzo		va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0,
1644239268Sgonzo		    0xffffffff, L1_TABLE_SIZE, 0);
1645239268Sgonzo
1646239268Sgonzo		if (va == 0)
1647239268Sgonzo			panic("Cannot allocate L1 KVM");
1648239268Sgonzo
1649239268Sgonzo		eva = va + L1_TABLE_SIZE;
1650239268Sgonzo		pl1pt = (pd_entry_t *)va;
1651239268Sgonzo
1652239268Sgonzo		while (va < eva) {
1653239268Sgonzo				l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1654239268Sgonzo				ptep = &l2b->l2b_kva[l2pte_index(va)];
1655239268Sgonzo				pte = *ptep;
1656239268Sgonzo				pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
1657239268Sgonzo				*ptep = pte;
1658239268Sgonzo				PTE_SYNC(ptep);
1659266353Sian				cpu_tlb_flushID_SE(va);
1660266353Sian				cpu_cpwait();
1661239268Sgonzo				va += PAGE_SIZE;
1662239268Sgonzo		}
1663239268Sgonzo		pmap_init_l1(l1, pl1pt);
1664239268Sgonzo	}
1665239268Sgonzo#ifdef DEBUG
1666239268Sgonzo	printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
1667239268Sgonzo	    needed);
1668239268Sgonzo#endif
1669239268Sgonzo}
1670239268Sgonzo
1671239268Sgonzo/*
1672239268Sgonzo * This is used to stuff certain critical values into the PCB where they
1673239268Sgonzo * can be accessed quickly from cpu_switch() et al.
1674239268Sgonzo */
1675239268Sgonzovoid
1676250929Sgberpmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb)
1677239268Sgonzo{
1678239268Sgonzo	struct l2_bucket *l2b;
1679239268Sgonzo
1680250929Sgber	pcb->pcb_pagedir = pmap->pm_l1->l1_physaddr;
1681239268Sgonzo	pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
1682250929Sgber	    (DOMAIN_CLIENT << (pmap->pm_domain * 2));
1683239268Sgonzo
1684239268Sgonzo	if (vector_page < KERNBASE) {
1685250929Sgber		pcb->pcb_pl1vec = &pmap->pm_l1->l1_kva[L1_IDX(vector_page)];
1686250929Sgber		l2b = pmap_get_l2_bucket(pmap, vector_page);
1687239268Sgonzo		pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
1688250929Sgber		    L1_C_DOM(pmap->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
1689239268Sgonzo	} else
1690239268Sgonzo		pcb->pcb_pl1vec = NULL;
1691239268Sgonzo}
1692239268Sgonzo
1693239268Sgonzovoid
1694239268Sgonzopmap_activate(struct thread *td)
1695239268Sgonzo{
1696250929Sgber	pmap_t pmap;
1697239268Sgonzo	struct pcb *pcb;
1698239268Sgonzo
1699250929Sgber	pmap = vmspace_pmap(td->td_proc->p_vmspace);
1700239268Sgonzo	pcb = td->td_pcb;
1701239268Sgonzo
1702239268Sgonzo	critical_enter();
1703250929Sgber	pmap_set_pcb_pagedir(pmap, pcb);
1704239268Sgonzo
1705239268Sgonzo	if (td == curthread) {
1706239268Sgonzo		u_int cur_dacr, cur_ttb;
1707239268Sgonzo
1708239268Sgonzo		__asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
1709239268Sgonzo		__asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr));
1710239268Sgonzo
1711239268Sgonzo		cur_ttb &= ~(L1_TABLE_SIZE - 1);
1712239268Sgonzo
1713239268Sgonzo		if (cur_ttb == (u_int)pcb->pcb_pagedir &&
1714239268Sgonzo		    cur_dacr == pcb->pcb_dacr) {
1715239268Sgonzo			/*
1716239268Sgonzo			 * No need to switch address spaces.
1717239268Sgonzo			 */
1718239268Sgonzo			critical_exit();
1719239268Sgonzo			return;
1720239268Sgonzo		}
1721239268Sgonzo
1722239268Sgonzo
1723239268Sgonzo		/*
1724239268Sgonzo		 * We MUST, I repeat, MUST fix up the L1 entry corresponding
1725239268Sgonzo		 * to 'vector_page' in the incoming L1 table before switching
1726239268Sgonzo		 * to it otherwise subsequent interrupts/exceptions (including
1727239268Sgonzo		 * domain faults!) will jump into hyperspace.
1728239268Sgonzo		 */
1729239268Sgonzo		if (pcb->pcb_pl1vec) {
1730239268Sgonzo			*pcb->pcb_pl1vec = pcb->pcb_l1vec;
1731239268Sgonzo		}
1732239268Sgonzo
1733239268Sgonzo		cpu_domains(pcb->pcb_dacr);
1734239268Sgonzo		cpu_setttb(pcb->pcb_pagedir);
1735239268Sgonzo	}
1736239268Sgonzo	critical_exit();
1737239268Sgonzo}
1738239268Sgonzo
1739239268Sgonzostatic int
1740239268Sgonzopmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va)
1741239268Sgonzo{
1742239268Sgonzo	pd_entry_t *pdep, pde;
1743239268Sgonzo	pt_entry_t *ptep, pte;
1744239268Sgonzo	vm_offset_t pa;
1745239268Sgonzo	int rv = 0;
1746239268Sgonzo
1747239268Sgonzo	/*
1748239268Sgonzo	 * Make sure the descriptor itself has the correct cache mode
1749239268Sgonzo	 */
1750239268Sgonzo	pdep = &kl1[L1_IDX(va)];
1751239268Sgonzo	pde = *pdep;
1752239268Sgonzo
1753239268Sgonzo	if (l1pte_section_p(pde)) {
1754239268Sgonzo		if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
1755239268Sgonzo			*pdep = (pde & ~L1_S_CACHE_MASK) |
1756239268Sgonzo			    pte_l1_s_cache_mode_pt;
1757239268Sgonzo			PTE_SYNC(pdep);
1758239268Sgonzo			rv = 1;
1759239268Sgonzo		}
1760239268Sgonzo	} else {
1761239268Sgonzo		pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
1762239268Sgonzo		ptep = (pt_entry_t *)kernel_pt_lookup(pa);
1763239268Sgonzo		if (ptep == NULL)
1764239268Sgonzo			panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
1765239268Sgonzo
1766239268Sgonzo		ptep = &ptep[l2pte_index(va)];
1767239268Sgonzo		pte = *ptep;
1768239268Sgonzo		if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
1769239268Sgonzo			*ptep = (pte & ~L2_S_CACHE_MASK) |
1770239268Sgonzo			    pte_l2_s_cache_mode_pt;
1771239268Sgonzo			PTE_SYNC(ptep);
1772239268Sgonzo			rv = 1;
1773239268Sgonzo		}
1774239268Sgonzo	}
1775239268Sgonzo
1776239268Sgonzo	return (rv);
1777239268Sgonzo}
1778239268Sgonzo
1779239268Sgonzostatic void
1780239268Sgonzopmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
1781239268Sgonzo    pt_entry_t **ptep)
1782239268Sgonzo{
1783239268Sgonzo	vm_offset_t va = *availp;
1784239268Sgonzo	struct l2_bucket *l2b;
1785239268Sgonzo
1786239268Sgonzo	if (ptep) {
1787239268Sgonzo		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1788239268Sgonzo		if (l2b == NULL)
1789239268Sgonzo			panic("pmap_alloc_specials: no l2b for 0x%x", va);
1790239268Sgonzo
1791239268Sgonzo		*ptep = &l2b->l2b_kva[l2pte_index(va)];
1792239268Sgonzo	}
1793239268Sgonzo
1794239268Sgonzo	*vap = va;
1795239268Sgonzo	*availp = va + (PAGE_SIZE * pages);
1796239268Sgonzo}
1797239268Sgonzo
1798239268Sgonzo/*
1799239268Sgonzo *	Bootstrap the system enough to run with virtual memory.
1800239268Sgonzo *
1801239268Sgonzo *	On the arm this is called after mapping has already been enabled
1802239268Sgonzo *	and just syncs the pmap module with what has already been done.
1803239268Sgonzo *	[We can't call it easily with mapping off since the kernel is not
1804239268Sgonzo *	mapped with PA == VA, hence we would have to relocate every address
1805239268Sgonzo *	from the linked base (virtual) address "KERNBASE" to the actual
1806239268Sgonzo *	(physical) address starting relative to 0]
1807239268Sgonzo */
1808239268Sgonzo#define PMAP_STATIC_L2_SIZE 16
1809239268Sgonzo
1810239268Sgonzovoid
1811247046Salcpmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
1812239268Sgonzo{
1813239268Sgonzo	static struct l1_ttable static_l1;
1814239268Sgonzo	static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
1815239268Sgonzo	struct l1_ttable *l1 = &static_l1;
1816239268Sgonzo	struct l2_dtable *l2;
1817239268Sgonzo	struct l2_bucket *l2b;
1818266353Sian	struct czpages *czp;
1819239268Sgonzo	pd_entry_t pde;
1820239268Sgonzo	pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va;
1821239268Sgonzo	pt_entry_t *ptep;
1822239268Sgonzo	vm_paddr_t pa;
1823239268Sgonzo	vm_offset_t va;
1824239268Sgonzo	vm_size_t size;
1825266353Sian	int i, l1idx, l2idx, l2next = 0;
1826239268Sgonzo
1827239268Sgonzo	PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n",
1828247046Salc	    firstaddr, vm_max_kernel_address));
1829239268Sgonzo
1830239268Sgonzo	virtual_avail = firstaddr;
1831239268Sgonzo	kernel_pmap->pm_l1 = l1;
1832239268Sgonzo	kernel_l1pa = l1pt->pv_pa;
1833239268Sgonzo
1834239268Sgonzo	/*
1835239268Sgonzo	 * Scan the L1 translation table created by initarm() and create
1836239268Sgonzo	 * the required metadata for all valid mappings found in it.
1837239268Sgonzo	 */
1838239268Sgonzo	for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
1839239268Sgonzo		pde = kernel_l1pt[l1idx];
1840239268Sgonzo
1841239268Sgonzo		/*
1842239268Sgonzo		 * We're only interested in Coarse mappings.
1843239268Sgonzo		 * pmap_extract() can deal with section mappings without
1844239268Sgonzo		 * recourse to checking L2 metadata.
1845239268Sgonzo		 */
1846239268Sgonzo		if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
1847239268Sgonzo			continue;
1848239268Sgonzo
1849239268Sgonzo		/*
1850239268Sgonzo		 * Lookup the KVA of this L2 descriptor table
1851239268Sgonzo		 */
1852239268Sgonzo		pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
1853239268Sgonzo		ptep = (pt_entry_t *)kernel_pt_lookup(pa);
1854239268Sgonzo
1855239268Sgonzo		if (ptep == NULL) {
1856239268Sgonzo			panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
1857239268Sgonzo			    (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa);
1858239268Sgonzo		}
1859239268Sgonzo
1860239268Sgonzo		/*
1861239268Sgonzo		 * Fetch the associated L2 metadata structure.
1862239268Sgonzo		 * Allocate a new one if necessary.
1863239268Sgonzo		 */
1864239268Sgonzo		if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
1865239268Sgonzo			if (l2next == PMAP_STATIC_L2_SIZE)
1866239268Sgonzo				panic("pmap_bootstrap: out of static L2s");
1867239268Sgonzo			kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 =
1868239268Sgonzo			    &static_l2[l2next++];
1869239268Sgonzo		}
1870239268Sgonzo
1871239268Sgonzo		/*
1872239268Sgonzo		 * One more L1 slot tracked...
1873239268Sgonzo		 */
1874239268Sgonzo		l2->l2_occupancy++;
1875239268Sgonzo
1876239268Sgonzo		/*
1877239268Sgonzo		 * Fill in the details of the L2 descriptor in the
1878239268Sgonzo		 * appropriate bucket.
1879239268Sgonzo		 */
1880239268Sgonzo		l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
1881239268Sgonzo		l2b->l2b_kva = ptep;
1882239268Sgonzo		l2b->l2b_phys = pa;
1883239268Sgonzo		l2b->l2b_l1idx = l1idx;
1884239268Sgonzo
1885239268Sgonzo		/*
1886239268Sgonzo		 * Establish an initial occupancy count for this descriptor
1887239268Sgonzo		 */
1888239268Sgonzo		for (l2idx = 0;
1889239268Sgonzo		    l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1890239268Sgonzo		    l2idx++) {
1891239268Sgonzo			if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
1892239268Sgonzo				l2b->l2b_occupancy++;
1893239268Sgonzo			}
1894239268Sgonzo		}
1895239268Sgonzo
1896239268Sgonzo		/*
1897239268Sgonzo		 * Make sure the descriptor itself has the correct cache mode.
1898239268Sgonzo		 * If not, fix it, but whine about the problem. Port-meisters
1899239268Sgonzo		 * should consider this a clue to fix up their initarm()
1900239268Sgonzo		 * function. :)
1901239268Sgonzo		 */
1902239268Sgonzo		if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) {
1903239268Sgonzo			printf("pmap_bootstrap: WARNING! wrong cache mode for "
1904239268Sgonzo			    "L2 pte @ %p\n", ptep);
1905239268Sgonzo		}
1906239268Sgonzo	}
1907239268Sgonzo
1908239268Sgonzo
1909239268Sgonzo	/*
1910239268Sgonzo	 * Ensure the primary (kernel) L1 has the correct cache mode for
1911239268Sgonzo	 * a page table. Bitch if it is not correctly set.
1912239268Sgonzo	 */
1913239268Sgonzo	for (va = (vm_offset_t)kernel_l1pt;
1914239268Sgonzo	    va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
1915239268Sgonzo		if (pmap_set_pt_cache_mode(kernel_l1pt, va))
1916239268Sgonzo			printf("pmap_bootstrap: WARNING! wrong cache mode for "
1917239268Sgonzo			    "primary L1 @ 0x%x\n", va);
1918239268Sgonzo	}
1919239268Sgonzo
1920239268Sgonzo	cpu_dcache_wbinv_all();
1921239268Sgonzo	cpu_l2cache_wbinv_all();
1922239268Sgonzo	cpu_tlb_flushID();
1923239268Sgonzo	cpu_cpwait();
1924239268Sgonzo
1925239268Sgonzo	PMAP_LOCK_INIT(kernel_pmap);
1926239268Sgonzo	CPU_FILL(&kernel_pmap->pm_active);
1927239268Sgonzo	kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
1928250634Sgber	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1929239268Sgonzo
1930239268Sgonzo	/*
1931240321Salc	 * Initialize the global pv list lock.
1932240321Salc	 */
1933240983Salc	rw_init(&pvh_global_lock, "pmap pv global");
1934240321Salc
1935240321Salc	/*
1936239268Sgonzo	 * Reserve some special page table entries/VA space for temporary
1937266353Sian	 * mapping of pages that are being copied or zeroed.
1938239268Sgonzo	 */
1939266353Sian	for (czp = cpu_czpages, i = 0; i < MAXCPU; ++i, ++czp) {
1940266353Sian		mtx_init(&czp->lock, "czpages", NULL, MTX_DEF);
1941266353Sian		pmap_alloc_specials(&virtual_avail, 1, &czp->srcva, &czp->srcptep);
1942266353Sian		pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)czp->srcptep);
1943266353Sian		pmap_alloc_specials(&virtual_avail, 1, &czp->dstva, &czp->dstptep);
1944266353Sian		pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)czp->dstptep);
1945266353Sian	}
1946239268Sgonzo
1947247046Salc	size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) /
1948247046Salc	    L1_S_SIZE;
1949239268Sgonzo	pmap_alloc_specials(&virtual_avail,
1950239268Sgonzo	    round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
1951239268Sgonzo	    &pmap_kernel_l2ptp_kva, NULL);
1952239268Sgonzo
1953239268Sgonzo	size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
1954239268Sgonzo	pmap_alloc_specials(&virtual_avail,
1955239268Sgonzo	    round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
1956239268Sgonzo	    &pmap_kernel_l2dtable_kva, NULL);
1957239268Sgonzo
1958239268Sgonzo	pmap_alloc_specials(&virtual_avail,
1959239268Sgonzo	    1, (vm_offset_t*)&_tmppt, NULL);
1960239268Sgonzo	pmap_alloc_specials(&virtual_avail,
1961239268Sgonzo	    MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL);
1962239268Sgonzo	SLIST_INIT(&l1_list);
1963239268Sgonzo	TAILQ_INIT(&l1_lru_list);
1964239268Sgonzo	mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF);
1965239268Sgonzo	pmap_init_l1(l1, kernel_l1pt);
1966239268Sgonzo	cpu_dcache_wbinv_all();
1967239268Sgonzo	cpu_l2cache_wbinv_all();
1968266353Sian	cpu_tlb_flushID();
1969266353Sian	cpu_cpwait();
1970239268Sgonzo
1971239268Sgonzo	virtual_avail = round_page(virtual_avail);
1972247046Salc	virtual_end = vm_max_kernel_address;
1973239268Sgonzo	kernel_vm_end = pmap_curmaxkvaddr;
1974239268Sgonzo
1975239268Sgonzo	pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);
1976239268Sgonzo}
1977239268Sgonzo
1978239268Sgonzo/***************************************************
1979239268Sgonzo * Pmap allocation/deallocation routines.
1980239268Sgonzo ***************************************************/
1981239268Sgonzo
1982239268Sgonzo/*
1983239268Sgonzo * Release any resources held by the given physical map.
1984239268Sgonzo * Called when a pmap initialized by pmap_pinit is being released.
1985239268Sgonzo * Should only be called if the map contains no valid mappings.
1986239268Sgonzo */
1987239268Sgonzovoid
1988239268Sgonzopmap_release(pmap_t pmap)
1989239268Sgonzo{
1990239268Sgonzo	struct pcb *pcb;
1991239268Sgonzo
1992239268Sgonzo	cpu_tlb_flushID();
1993239268Sgonzo	cpu_cpwait();
1994239268Sgonzo	if (vector_page < KERNBASE) {
1995239268Sgonzo		struct pcb *curpcb = PCPU_GET(curpcb);
1996239268Sgonzo		pcb = thread0.td_pcb;
1997239268Sgonzo		if (pmap_is_current(pmap)) {
1998239268Sgonzo			/*
1999239268Sgonzo			 * Frob the L1 entry corresponding to the vector
2000239268Sgonzo			 * page so that it contains the kernel pmap's domain
2001239268Sgonzo			 * number. This will ensure pmap_remove() does not
2002239268Sgonzo			 * pull the current vector page out from under us.
2003239268Sgonzo			 */
2004239268Sgonzo			critical_enter();
2005239268Sgonzo			*pcb->pcb_pl1vec = pcb->pcb_l1vec;
2006239268Sgonzo			cpu_domains(pcb->pcb_dacr);
2007239268Sgonzo			cpu_setttb(pcb->pcb_pagedir);
2008239268Sgonzo			critical_exit();
2009239268Sgonzo		}
2010239268Sgonzo		pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE);
2011239268Sgonzo		/*
2012239268Sgonzo		 * Make sure cpu_switch(), et al, DTRT. This is safe to do
2013239268Sgonzo		 * since this process has no remaining mappings of its own.
2014239268Sgonzo		 */
2015239268Sgonzo		curpcb->pcb_pl1vec = pcb->pcb_pl1vec;
2016239268Sgonzo		curpcb->pcb_l1vec = pcb->pcb_l1vec;
2017239268Sgonzo		curpcb->pcb_dacr = pcb->pcb_dacr;
2018239268Sgonzo		curpcb->pcb_pagedir = pcb->pcb_pagedir;
2019239268Sgonzo
2020239268Sgonzo	}
2021239268Sgonzo	pmap_free_l1(pmap);
2022239268Sgonzo
2023239268Sgonzo	dprintf("pmap_release()\n");
2024239268Sgonzo}
2025239268Sgonzo
2026239268Sgonzo
2027239268Sgonzo
2028239268Sgonzo/*
2029239268Sgonzo * Helper function for pmap_grow_l2_bucket()
2030239268Sgonzo */
2031239268Sgonzostatic __inline int
2032239268Sgonzopmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
2033239268Sgonzo{
2034239268Sgonzo	struct l2_bucket *l2b;
2035239268Sgonzo	pt_entry_t *ptep;
2036239268Sgonzo	vm_paddr_t pa;
2037250929Sgber	struct vm_page *m;
2038239268Sgonzo
2039250929Sgber	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
2040250929Sgber	if (m == NULL)
2041239268Sgonzo		return (1);
2042250929Sgber	pa = VM_PAGE_TO_PHYS(m);
2043239268Sgonzo
2044239268Sgonzo	if (pap)
2045239268Sgonzo		*pap = pa;
2046239268Sgonzo
2047239268Sgonzo	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2048239268Sgonzo
2049239268Sgonzo	ptep = &l2b->l2b_kva[l2pte_index(va)];
2050250928Sgber	*ptep = L2_S_PROTO | pa | cache_mode | L2_S_REF;
2051239268Sgonzo	pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE, 0);
2052239268Sgonzo	PTE_SYNC(ptep);
2053266353Sian	cpu_tlb_flushD_SE(va);
2054266353Sian	cpu_cpwait();
2055239268Sgonzo
2056239268Sgonzo	return (0);
2057239268Sgonzo}
2058239268Sgonzo
2059239268Sgonzo/*
2060239268Sgonzo * This is the same as pmap_alloc_l2_bucket(), except that it is only
2061239268Sgonzo * used by pmap_growkernel().
2062239268Sgonzo */
2063239268Sgonzostatic __inline struct l2_bucket *
2064250929Sgberpmap_grow_l2_bucket(pmap_t pmap, vm_offset_t va)
2065239268Sgonzo{
2066239268Sgonzo	struct l2_dtable *l2;
2067239268Sgonzo	struct l2_bucket *l2b;
2068239268Sgonzo	struct l1_ttable *l1;
2069239268Sgonzo	pd_entry_t *pl1pd;
2070239268Sgonzo	u_short l1idx;
2071239268Sgonzo	vm_offset_t nva;
2072239268Sgonzo
2073239268Sgonzo	l1idx = L1_IDX(va);
2074239268Sgonzo
2075250929Sgber	if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
2076239268Sgonzo		/*
2077239268Sgonzo		 * No mapping at this address, as there is
2078239268Sgonzo		 * no entry in the L1 table.
2079239268Sgonzo		 * Need to allocate a new l2_dtable.
2080239268Sgonzo		 */
2081239268Sgonzo		nva = pmap_kernel_l2dtable_kva;
2082239268Sgonzo		if ((nva & PAGE_MASK) == 0) {
2083239268Sgonzo			/*
2084239268Sgonzo			 * Need to allocate a backing page
2085239268Sgonzo			 */
2086239268Sgonzo			if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
2087239268Sgonzo				return (NULL);
2088239268Sgonzo		}
2089239268Sgonzo
2090239268Sgonzo		l2 = (struct l2_dtable *)nva;
2091239268Sgonzo		nva += sizeof(struct l2_dtable);
2092239268Sgonzo
2093239268Sgonzo		if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva &
2094239268Sgonzo		    PAGE_MASK)) {
2095239268Sgonzo			/*
2096239268Sgonzo			 * The new l2_dtable straddles a page boundary.
2097239268Sgonzo			 * Map in another page to cover it.
2098239268Sgonzo			 */
2099239268Sgonzo			if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
2100239268Sgonzo				return (NULL);
2101239268Sgonzo		}
2102239268Sgonzo
2103239268Sgonzo		pmap_kernel_l2dtable_kva = nva;
2104239268Sgonzo
2105239268Sgonzo		/*
2106239268Sgonzo		 * Link it into the parent pmap
2107239268Sgonzo		 */
2108250929Sgber		pmap->pm_l2[L2_IDX(l1idx)] = l2;
2109239268Sgonzo		memset(l2, 0, sizeof(*l2));
2110239268Sgonzo	}
2111239268Sgonzo
2112239268Sgonzo	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
2113239268Sgonzo
2114239268Sgonzo	/*
2115239268Sgonzo	 * Fetch pointer to the L2 page table associated with the address.
2116239268Sgonzo	 */
2117239268Sgonzo	if (l2b->l2b_kva == NULL) {
2118239268Sgonzo		pt_entry_t *ptep;
2119239268Sgonzo
2120239268Sgonzo		/*
2121239268Sgonzo		 * No L2 page table has been allocated. Chances are, this
2122239268Sgonzo		 * is because we just allocated the l2_dtable, above.
2123239268Sgonzo		 */
2124239268Sgonzo		nva = pmap_kernel_l2ptp_kva;
2125239268Sgonzo		ptep = (pt_entry_t *)nva;
2126239268Sgonzo		if ((nva & PAGE_MASK) == 0) {
2127239268Sgonzo			/*
2128239268Sgonzo			 * Need to allocate a backing page
2129239268Sgonzo			 */
2130239268Sgonzo			if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
2131239268Sgonzo			    &pmap_kernel_l2ptp_phys))
2132239268Sgonzo				return (NULL);
2133239268Sgonzo		}
2134239268Sgonzo		memset(ptep, 0, L2_TABLE_SIZE_REAL);
2135239268Sgonzo		l2->l2_occupancy++;
2136239268Sgonzo		l2b->l2b_kva = ptep;
2137239268Sgonzo		l2b->l2b_l1idx = l1idx;
2138239268Sgonzo		l2b->l2b_phys = pmap_kernel_l2ptp_phys;
2139239268Sgonzo
2140239268Sgonzo		pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
2141239268Sgonzo		pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
2142239268Sgonzo	}
2143239268Sgonzo
2144239268Sgonzo	/* Distribute new L1 entry to all other L1s */
2145239268Sgonzo	SLIST_FOREACH(l1, &l1_list, l1_link) {
2146239268Sgonzo			pl1pd = &l1->l1_kva[L1_IDX(va)];
2147239268Sgonzo			*pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
2148239268Sgonzo			    L1_C_PROTO;
2149239268Sgonzo			PTE_SYNC(pl1pd);
2150239268Sgonzo	}
2151266357Sian	cpu_tlb_flushID_SE(va);
2152266357Sian	cpu_cpwait();
2153239268Sgonzo
2154239268Sgonzo	return (l2b);
2155239268Sgonzo}
2156239268Sgonzo
2157239268Sgonzo
2158239268Sgonzo/*
2159239268Sgonzo * grow the number of kernel page table entries, if needed
2160239268Sgonzo */
2161239268Sgonzovoid
2162239268Sgonzopmap_growkernel(vm_offset_t addr)
2163239268Sgonzo{
2164250929Sgber	pmap_t kpmap = pmap_kernel();
2165239268Sgonzo
2166239268Sgonzo	if (addr <= pmap_curmaxkvaddr)
2167239268Sgonzo		return;		/* we are OK */
2168239268Sgonzo
2169239268Sgonzo	/*
2170239268Sgonzo	 * whoops!   we need to add kernel PTPs
2171239268Sgonzo	 */
2172239268Sgonzo
2173239268Sgonzo	/* Map 1MB at a time */
2174239268Sgonzo	for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE)
2175250929Sgber		pmap_grow_l2_bucket(kpmap, pmap_curmaxkvaddr);
2176239268Sgonzo
2177239268Sgonzo	kernel_vm_end = pmap_curmaxkvaddr;
2178239268Sgonzo}
2179239268Sgonzo
2180254918Sraj/*
2181254918Sraj * Returns TRUE if the given page is mapped individually or as part of
2182254918Sraj * a 1MB section.  Otherwise, returns FALSE.
2183254918Sraj */
2184254918Srajboolean_t
2185254918Srajpmap_page_is_mapped(vm_page_t m)
2186254918Sraj{
2187254918Sraj	boolean_t rv;
2188239268Sgonzo
2189254918Sraj	if ((m->oflags & VPO_UNMANAGED) != 0)
2190254918Sraj		return (FALSE);
2191254918Sraj	rw_wlock(&pvh_global_lock);
2192254918Sraj	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
2193254918Sraj	    ((m->flags & PG_FICTITIOUS) == 0 &&
2194254918Sraj	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
2195254918Sraj	rw_wunlock(&pvh_global_lock);
2196254918Sraj	return (rv);
2197254918Sraj}
2198254918Sraj
2199239268Sgonzo/*
2200239268Sgonzo * Remove all pages from specified address space
2201239268Sgonzo * this aids process exit speeds.  Also, this code
2202239268Sgonzo * is special cased for current process only, but
2203239268Sgonzo * can have the more generic (and slightly slower)
2204239268Sgonzo * mode enabled.  This is much faster than pmap_remove
2205239268Sgonzo * in the case of running down an entire address space.
2206239268Sgonzo */
2207239268Sgonzovoid
2208239268Sgonzopmap_remove_pages(pmap_t pmap)
2209239268Sgonzo{
2210250634Sgber	struct pv_entry *pv;
2211250634Sgber 	struct l2_bucket *l2b = NULL;
2212254918Sraj	struct pv_chunk *pc, *npc;
2213254918Sraj	struct md_page *pvh;
2214254918Sraj	pd_entry_t *pl1pd, l1pd;
2215250929Sgber 	pt_entry_t *ptep;
2216254918Sraj 	vm_page_t m, mt;
2217254918Sraj	vm_offset_t va;
2218250634Sgber	uint32_t inuse, bitmask;
2219250634Sgber	int allfree, bit, field, idx;
2220250634Sgber
2221250634Sgber 	rw_wlock(&pvh_global_lock);
2222250634Sgber 	PMAP_LOCK(pmap);
2223239268Sgonzo
2224250634Sgber	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2225250634Sgber		allfree = 1;
2226250634Sgber		for (field = 0; field < _NPCM; field++) {
2227250634Sgber			inuse = ~pc->pc_map[field] & pc_freemask[field];
2228250634Sgber			while (inuse != 0) {
2229250634Sgber				bit = ffs(inuse) - 1;
2230250634Sgber				bitmask = 1ul << bit;
2231250634Sgber				idx = field * sizeof(inuse) * NBBY + bit;
2232250634Sgber				pv = &pc->pc_pventry[idx];
2233254918Sraj				va = pv->pv_va;
2234250634Sgber				inuse &= ~bitmask;
2235250634Sgber				if (pv->pv_flags & PVF_WIRED) {
2236250634Sgber					/* Cannot remove wired pages now. */
2237250634Sgber					allfree = 0;
2238250634Sgber					continue;
2239250634Sgber				}
2240254918Sraj				pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
2241254918Sraj				l1pd = *pl1pd;
2242254918Sraj				l2b = pmap_get_l2_bucket(pmap, va);
2243254918Sraj				if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
2244254918Sraj					pvh = pa_to_pvh(l1pd & L1_S_FRAME);
2245254918Sraj					TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
2246254918Sraj					if (TAILQ_EMPTY(&pvh->pv_list)) {
2247254918Sraj						m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME);
2248254918Sraj						KASSERT((vm_offset_t)m >= KERNBASE,
2249254918Sraj						    ("Trying to access non-existent page "
2250254918Sraj						     "va %x l1pd %x", trunc_1mpage(va), l1pd));
2251254918Sraj						for (mt = m; mt < &m[L2_PTE_NUM_TOTAL]; mt++) {
2252254918Sraj							if (TAILQ_EMPTY(&mt->md.pv_list))
2253254918Sraj								vm_page_aflag_clear(mt, PGA_WRITEABLE);
2254254918Sraj						}
2255254918Sraj					}
2256254918Sraj					if (l2b != NULL) {
2257254918Sraj						KASSERT(l2b->l2b_occupancy == L2_PTE_NUM_TOTAL,
2258254918Sraj						    ("pmap_remove_pages: l2_bucket occupancy error"));
2259254918Sraj						pmap_free_l2_bucket(pmap, l2b, L2_PTE_NUM_TOTAL);
2260254918Sraj					}
2261254918Sraj					pmap->pm_stats.resident_count -= L2_PTE_NUM_TOTAL;
2262254918Sraj					*pl1pd = 0;
2263254918Sraj					PTE_SYNC(pl1pd);
2264254918Sraj				} else {
2265254918Sraj					KASSERT(l2b != NULL,
2266254918Sraj					    ("No L2 bucket in pmap_remove_pages"));
2267254918Sraj					ptep = &l2b->l2b_kva[l2pte_index(va)];
2268254918Sraj					m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
2269254918Sraj					KASSERT((vm_offset_t)m >= KERNBASE,
2270254918Sraj					    ("Trying to access non-existent page "
2271254918Sraj					     "va %x pte %x", va, *ptep));
2272254918Sraj					TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2273254918Sraj					if (TAILQ_EMPTY(&m->md.pv_list) &&
2274254918Sraj					    (m->flags & PG_FICTITIOUS) == 0) {
2275254918Sraj						pvh = pa_to_pvh(l2pte_pa(*ptep));
2276254918Sraj						if (TAILQ_EMPTY(&pvh->pv_list))
2277254918Sraj							vm_page_aflag_clear(m, PGA_WRITEABLE);
2278254918Sraj					}
2279254918Sraj					*ptep = 0;
2280254918Sraj					PTE_SYNC(ptep);
2281254918Sraj					pmap_free_l2_bucket(pmap, l2b, 1);
2282254918Sraj					pmap->pm_stats.resident_count--;
2283254918Sraj				}
2284250634Sgber
2285250634Sgber				/* Mark free */
2286250634Sgber				PV_STAT(pv_entry_frees++);
2287250634Sgber				PV_STAT(pv_entry_spare++);
2288250634Sgber				pv_entry_count--;
2289250634Sgber				pc->pc_map[field] |= bitmask;
2290250634Sgber			}
2291239268Sgonzo		}
2292250634Sgber		if (allfree) {
2293250634Sgber			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2294250634Sgber			pmap_free_pv_chunk(pc);
2295250634Sgber		}
2296250634Sgber
2297239268Sgonzo	}
2298250634Sgber
2299250634Sgber 	rw_wunlock(&pvh_global_lock);
2300250634Sgber 	cpu_tlb_flushID();
2301250634Sgber 	cpu_cpwait();
2302250634Sgber 	PMAP_UNLOCK(pmap);
2303239268Sgonzo}
2304239268Sgonzo
2305239268Sgonzo
2306239268Sgonzo/***************************************************
2307239268Sgonzo * Low level mapping routines.....
2308239268Sgonzo ***************************************************/
2309239268Sgonzo
2310239268Sgonzo#ifdef ARM_HAVE_SUPERSECTIONS
2311239268Sgonzo/* Map a super section into the KVA. */
2312239268Sgonzo
2313239268Sgonzovoid
2314239268Sgonzopmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags)
2315239268Sgonzo{
2316239268Sgonzo	pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) |
2317239268Sgonzo	    (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL,
2318254918Sraj	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) |
2319254918Sraj	    L1_S_DOM(PMAP_DOMAIN_KERNEL);
2320239268Sgonzo	struct l1_ttable *l1;
2321239268Sgonzo	vm_offset_t va0, va_end;
2322239268Sgonzo
2323239268Sgonzo	KASSERT(((va | pa) & L1_SUP_OFFSET) == 0,
2324239268Sgonzo	    ("Not a valid super section mapping"));
2325239268Sgonzo	if (flags & SECTION_CACHE)
2326239268Sgonzo		pd |= pte_l1_s_cache_mode;
2327239268Sgonzo	else if (flags & SECTION_PT)
2328239268Sgonzo		pd |= pte_l1_s_cache_mode_pt;
2329239268Sgonzo
2330239268Sgonzo	va0 = va & L1_SUP_FRAME;
2331239268Sgonzo	va_end = va + L1_SUP_SIZE;
2332239268Sgonzo	SLIST_FOREACH(l1, &l1_list, l1_link) {
2333239268Sgonzo		va = va0;
2334239268Sgonzo		for (; va < va_end; va += L1_S_SIZE) {
2335239268Sgonzo			l1->l1_kva[L1_IDX(va)] = pd;
2336239268Sgonzo			PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
2337239268Sgonzo		}
2338239268Sgonzo	}
2339239268Sgonzo}
2340239268Sgonzo#endif
2341239268Sgonzo
2342239268Sgonzo/* Map a section into the KVA. */
2343239268Sgonzo
2344239268Sgonzovoid
2345239268Sgonzopmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags)
2346239268Sgonzo{
2347239268Sgonzo	pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL,
2348254918Sraj	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) | L1_S_REF |
2349254918Sraj	    L1_S_DOM(PMAP_DOMAIN_KERNEL);
2350239268Sgonzo	struct l1_ttable *l1;
2351239268Sgonzo
2352239268Sgonzo	KASSERT(((va | pa) & L1_S_OFFSET) == 0,
2353239268Sgonzo	    ("Not a valid section mapping"));
2354239268Sgonzo	if (flags & SECTION_CACHE)
2355239268Sgonzo		pd |= pte_l1_s_cache_mode;
2356239268Sgonzo	else if (flags & SECTION_PT)
2357239268Sgonzo		pd |= pte_l1_s_cache_mode_pt;
2358239268Sgonzo
2359239268Sgonzo	SLIST_FOREACH(l1, &l1_list, l1_link) {
2360239268Sgonzo		l1->l1_kva[L1_IDX(va)] = pd;
2361239268Sgonzo		PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
2362239268Sgonzo	}
2363266353Sian	cpu_tlb_flushID_SE(va);
2364266353Sian	cpu_cpwait();
2365239268Sgonzo}
2366239268Sgonzo
2367239268Sgonzo/*
2368239268Sgonzo * Make a temporary mapping for a physical address.  This is only intended
2369239268Sgonzo * to be used for panic dumps.
2370239268Sgonzo */
2371239268Sgonzovoid *
2372239268Sgonzopmap_kenter_temp(vm_paddr_t pa, int i)
2373239268Sgonzo{
2374239268Sgonzo	vm_offset_t va;
2375239268Sgonzo
2376239268Sgonzo	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2377239268Sgonzo	pmap_kenter(va, pa);
2378239268Sgonzo	return ((void *)crashdumpmap);
2379239268Sgonzo}
2380239268Sgonzo
2381239268Sgonzo/*
2382239268Sgonzo * add a wired page to the kva
2383239268Sgonzo * note that in order for the mapping to take effect -- you
2384239268Sgonzo * should do a invltlb after doing the pmap_kenter...
2385239268Sgonzo */
2386239268Sgonzostatic PMAP_INLINE void
2387239268Sgonzopmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
2388239268Sgonzo{
2389239268Sgonzo	struct l2_bucket *l2b;
2390250929Sgber	pt_entry_t *ptep;
2391239268Sgonzo	pt_entry_t opte;
2392239268Sgonzo
2393239268Sgonzo	PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
2394239268Sgonzo	    (uint32_t) va, (uint32_t) pa));
2395239268Sgonzo
2396239268Sgonzo
2397239268Sgonzo	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2398239268Sgonzo	if (l2b == NULL)
2399239268Sgonzo		l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
2400239268Sgonzo	KASSERT(l2b != NULL, ("No L2 Bucket"));
2401239268Sgonzo
2402250929Sgber	ptep = &l2b->l2b_kva[l2pte_index(va)];
2403250929Sgber	opte = *ptep;
2404239268Sgonzo
2405269103Sian	if (flags & KENTER_CACHE)
2406269103Sian		*ptep = L2_S_PROTO | l2s_mem_types[PTE_CACHE] | pa | L2_S_REF;
2407269103Sian	else if (flags & KENTER_DEVICE)
2408269103Sian		*ptep = L2_S_PROTO | l2s_mem_types[PTE_DEVICE] | pa | L2_S_REF;
2409269103Sian	else
2410269103Sian		*ptep = L2_S_PROTO | l2s_mem_types[PTE_NOCACHE] | pa | L2_S_REF;
2411269103Sian
2412239268Sgonzo	if (flags & KENTER_CACHE) {
2413250929Sgber		pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE,
2414239268Sgonzo		    flags & KENTER_USER);
2415239268Sgonzo	} else {
2416250929Sgber		pmap_set_prot(ptep, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
2417239268Sgonzo		    0);
2418239268Sgonzo	}
2419239268Sgonzo
2420266353Sian	PTE_SYNC(ptep);
2421266353Sian	if (l2pte_valid(opte)) {
2422266353Sian		if (L2_S_EXECUTABLE(opte) || L2_S_EXECUTABLE(*ptep))
2423266353Sian			cpu_tlb_flushID_SE(va);
2424266353Sian		else
2425266353Sian			cpu_tlb_flushD_SE(va);
2426266353Sian	} else {
2427266353Sian		if (opte == 0)
2428266353Sian			l2b->l2b_occupancy++;
2429266353Sian	}
2430266357Sian	cpu_cpwait();
2431266353Sian
2432239268Sgonzo	PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
2433250929Sgber	    (uint32_t) ptep, opte, *ptep));
2434239268Sgonzo}
2435239268Sgonzo
2436239268Sgonzovoid
2437239268Sgonzopmap_kenter(vm_offset_t va, vm_paddr_t pa)
2438239268Sgonzo{
2439239268Sgonzo	pmap_kenter_internal(va, pa, KENTER_CACHE);
2440239268Sgonzo}
2441239268Sgonzo
2442239268Sgonzovoid
2443239268Sgonzopmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
2444239268Sgonzo{
2445239268Sgonzo
2446239268Sgonzo	pmap_kenter_internal(va, pa, 0);
2447239268Sgonzo}
2448239268Sgonzo
2449239268Sgonzovoid
2450259364Sianpmap_kenter_device(vm_offset_t va, vm_paddr_t pa)
2451259364Sian{
2452259364Sian
2453269103Sian	pmap_kenter_internal(va, pa, KENTER_DEVICE);
2454259364Sian}
2455259364Sian
2456259364Sianvoid
2457239268Sgonzopmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
2458239268Sgonzo{
2459239268Sgonzo
2460239268Sgonzo	pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER);
2461239268Sgonzo	/*
2462239268Sgonzo	 * Call pmap_fault_fixup now, to make sure we'll have no exception
2463239268Sgonzo	 * at the first use of the new address, or bad things will happen,
2464239268Sgonzo	 * as we use one of these addresses in the exception handlers.
2465239268Sgonzo	 */
2466239268Sgonzo	pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1);
2467239268Sgonzo}
2468239268Sgonzo
2469240983Salcvm_paddr_t
2470240983Salcpmap_kextract(vm_offset_t va)
2471240983Salc{
2472240983Salc
2473266050Sian	if (kernel_vm_end == 0)
2474266050Sian		return (0);
2475240983Salc	return (pmap_extract_locked(kernel_pmap, va));
2476240983Salc}
2477240983Salc
2478239268Sgonzo/*
2479239268Sgonzo * remove a page from the kernel pagetables
2480239268Sgonzo */
2481239268Sgonzovoid
2482239268Sgonzopmap_kremove(vm_offset_t va)
2483239268Sgonzo{
2484239268Sgonzo	struct l2_bucket *l2b;
2485250929Sgber	pt_entry_t *ptep, opte;
2486239268Sgonzo
2487239268Sgonzo	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2488239268Sgonzo	if (!l2b)
2489239268Sgonzo		return;
2490239268Sgonzo	KASSERT(l2b != NULL, ("No L2 Bucket"));
2491250929Sgber	ptep = &l2b->l2b_kva[l2pte_index(va)];
2492250929Sgber	opte = *ptep;
2493239268Sgonzo	if (l2pte_valid(opte)) {
2494239268Sgonzo		va = va & ~PAGE_MASK;
2495250929Sgber		*ptep = 0;
2496250929Sgber		PTE_SYNC(ptep);
2497266353Sian		if (L2_S_EXECUTABLE(opte))
2498266353Sian			cpu_tlb_flushID_SE(va);
2499266353Sian		else
2500266353Sian			cpu_tlb_flushD_SE(va);
2501266353Sian		cpu_cpwait();
2502239268Sgonzo	}
2503239268Sgonzo}
2504239268Sgonzo
2505239268Sgonzo
2506239268Sgonzo/*
2507239268Sgonzo *	Used to map a range of physical addresses into kernel
2508239268Sgonzo *	virtual address space.
2509239268Sgonzo *
2510239268Sgonzo *	The value passed in '*virt' is a suggested virtual address for
2511239268Sgonzo *	the mapping. Architectures which can support a direct-mapped
2512239268Sgonzo *	physical to virtual region can return the appropriate address
2513239268Sgonzo *	within that region, leaving '*virt' unchanged. Other
2514239268Sgonzo *	architectures should map the pages starting at '*virt' and
2515239268Sgonzo *	update '*virt' with the first usable address after the mapped
2516239268Sgonzo *	region.
2517239268Sgonzo */
2518239268Sgonzovm_offset_t
2519239268Sgonzopmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
2520239268Sgonzo{
2521239268Sgonzo	vm_offset_t sva = *virt;
2522239268Sgonzo	vm_offset_t va = sva;
2523239268Sgonzo
2524239268Sgonzo	PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, "
2525239268Sgonzo	    "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end,
2526239268Sgonzo	    prot));
2527239268Sgonzo
2528239268Sgonzo	while (start < end) {
2529239268Sgonzo		pmap_kenter(va, start);
2530239268Sgonzo		va += PAGE_SIZE;
2531239268Sgonzo		start += PAGE_SIZE;
2532239268Sgonzo	}
2533239268Sgonzo	*virt = va;
2534239268Sgonzo	return (sva);
2535239268Sgonzo}
2536239268Sgonzo
2537239268Sgonzo/*
2538239268Sgonzo * Add a list of wired pages to the kva
2539239268Sgonzo * this routine is only used for temporary
2540239268Sgonzo * kernel mappings that do not need to have
2541239268Sgonzo * page modification or references recorded.
2542239268Sgonzo * Note that old mappings are simply written
2543239268Sgonzo * over.  The page *must* be wired.
2544239268Sgonzo */
2545239268Sgonzovoid
2546239268Sgonzopmap_qenter(vm_offset_t va, vm_page_t *m, int count)
2547239268Sgonzo{
2548239268Sgonzo	int i;
2549239268Sgonzo
2550239268Sgonzo	for (i = 0; i < count; i++) {
2551239268Sgonzo		pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]),
2552239268Sgonzo		    KENTER_CACHE);
2553239268Sgonzo		va += PAGE_SIZE;
2554239268Sgonzo	}
2555239268Sgonzo}
2556239268Sgonzo
2557239268Sgonzo
2558239268Sgonzo/*
2559239268Sgonzo * this routine jerks page mappings from the
2560239268Sgonzo * kernel -- it is meant only for temporary mappings.
2561239268Sgonzo */
2562239268Sgonzovoid
2563239268Sgonzopmap_qremove(vm_offset_t va, int count)
2564239268Sgonzo{
2565239268Sgonzo	int i;
2566239268Sgonzo
2567239268Sgonzo	for (i = 0; i < count; i++) {
2568239268Sgonzo		if (vtophys(va))
2569239268Sgonzo			pmap_kremove(va);
2570239268Sgonzo
2571239268Sgonzo		va += PAGE_SIZE;
2572239268Sgonzo	}
2573239268Sgonzo}
2574239268Sgonzo
2575239268Sgonzo
2576239268Sgonzo/*
2577239268Sgonzo * pmap_object_init_pt preloads the ptes for a given object
2578239268Sgonzo * into the specified pmap.  This eliminates the blast of soft
2579239268Sgonzo * faults on process startup and immediately after an mmap.
2580239268Sgonzo */
2581239268Sgonzovoid
2582239268Sgonzopmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
2583239268Sgonzo    vm_pindex_t pindex, vm_size_t size)
2584239268Sgonzo{
2585239268Sgonzo
2586248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(object);
2587239268Sgonzo	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2588239268Sgonzo	    ("pmap_object_init_pt: non-device object"));
2589239268Sgonzo}
2590239268Sgonzo
2591239268Sgonzo
2592239268Sgonzo/*
2593239268Sgonzo *	pmap_is_prefaultable:
2594239268Sgonzo *
2595239268Sgonzo *	Return whether or not the specified virtual address is elgible
2596239268Sgonzo *	for prefault.
2597239268Sgonzo */
2598239268Sgonzoboolean_t
2599239268Sgonzopmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2600239268Sgonzo{
2601250929Sgber	pd_entry_t *pdep;
2602250929Sgber	pt_entry_t *ptep;
2603239268Sgonzo
2604250929Sgber	if (!pmap_get_pde_pte(pmap, addr, &pdep, &ptep))
2605239268Sgonzo		return (FALSE);
2606254918Sraj	KASSERT((pdep != NULL && (l1pte_section_p(*pdep) || ptep != NULL)),
2607254918Sraj	    ("Valid mapping but no pte ?"));
2608254918Sraj	if (*pdep != 0 && !l1pte_section_p(*pdep))
2609254918Sraj		if (*ptep == 0)
2610254918Sraj			return (TRUE);
2611239268Sgonzo	return (FALSE);
2612239268Sgonzo}
2613239268Sgonzo
2614239268Sgonzo/*
2615239268Sgonzo * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
2616239268Sgonzo * Returns TRUE if the mapping exists, else FALSE.
2617239268Sgonzo *
2618239268Sgonzo * NOTE: This function is only used by a couple of arm-specific modules.
2619239268Sgonzo * It is not safe to take any pmap locks here, since we could be right
2620239268Sgonzo * in the middle of debugging the pmap anyway...
2621239268Sgonzo *
2622239268Sgonzo * It is possible for this routine to return FALSE even though a valid
2623239268Sgonzo * mapping does exist. This is because we don't lock, so the metadata
2624239268Sgonzo * state may be inconsistent.
2625239268Sgonzo *
2626239268Sgonzo * NOTE: We can return a NULL *ptp in the case where the L1 pde is
2627239268Sgonzo * a "section" mapping.
2628239268Sgonzo */
2629239268Sgonzoboolean_t
2630250929Sgberpmap_get_pde_pte(pmap_t pmap, vm_offset_t va, pd_entry_t **pdp,
2631250929Sgber    pt_entry_t **ptp)
2632239268Sgonzo{
2633239268Sgonzo	struct l2_dtable *l2;
2634239268Sgonzo	pd_entry_t *pl1pd, l1pd;
2635239268Sgonzo	pt_entry_t *ptep;
2636239268Sgonzo	u_short l1idx;
2637239268Sgonzo
2638250929Sgber	if (pmap->pm_l1 == NULL)
2639239268Sgonzo		return (FALSE);
2640239268Sgonzo
2641239268Sgonzo	l1idx = L1_IDX(va);
2642250929Sgber	*pdp = pl1pd = &pmap->pm_l1->l1_kva[l1idx];
2643239268Sgonzo	l1pd = *pl1pd;
2644239268Sgonzo
2645239268Sgonzo	if (l1pte_section_p(l1pd)) {
2646239268Sgonzo		*ptp = NULL;
2647239268Sgonzo		return (TRUE);
2648239268Sgonzo	}
2649239268Sgonzo
2650250929Sgber	if (pmap->pm_l2 == NULL)
2651239268Sgonzo		return (FALSE);
2652239268Sgonzo
2653250929Sgber	l2 = pmap->pm_l2[L2_IDX(l1idx)];
2654239268Sgonzo
2655239268Sgonzo	if (l2 == NULL ||
2656239268Sgonzo	    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
2657239268Sgonzo		return (FALSE);
2658239268Sgonzo	}
2659239268Sgonzo
2660239268Sgonzo	*ptp = &ptep[l2pte_index(va)];
2661239268Sgonzo	return (TRUE);
2662239268Sgonzo}
2663239268Sgonzo
2664239268Sgonzo/*
2665239268Sgonzo *      Routine:        pmap_remove_all
2666239268Sgonzo *      Function:
2667239268Sgonzo *              Removes this physical page from
2668239268Sgonzo *              all physical maps in which it resides.
2669239268Sgonzo *              Reflects back modify bits to the pager.
2670239268Sgonzo *
2671239268Sgonzo *      Notes:
2672239268Sgonzo *              Original versions of this routine were very
2673239268Sgonzo *              inefficient because they iteratively called
2674239268Sgonzo *              pmap_remove (slow...)
2675239268Sgonzo */
2676239268Sgonzovoid
2677239268Sgonzopmap_remove_all(vm_page_t m)
2678239268Sgonzo{
2679254918Sraj	struct md_page *pvh;
2680239268Sgonzo	pv_entry_t pv;
2681250634Sgber	pmap_t pmap;
2682239268Sgonzo	pt_entry_t *ptep;
2683239268Sgonzo	struct l2_bucket *l2b;
2684239268Sgonzo	boolean_t flush = FALSE;
2685250929Sgber	pmap_t curpmap;
2686250930Sgber	u_int is_exec = 0;
2687239268Sgonzo
2688254918Sraj	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2689254918Sraj	    ("pmap_remove_all: page %p is not managed", m));
2690240321Salc	rw_wlock(&pvh_global_lock);
2691254918Sraj	if ((m->flags & PG_FICTITIOUS) != 0)
2692254918Sraj		goto small_mappings;
2693254918Sraj	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2694254918Sraj	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2695254918Sraj		pmap = PV_PMAP(pv);
2696254918Sraj		PMAP_LOCK(pmap);
2697254918Sraj		pd_entry_t *pl1pd;
2698254918Sraj		pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(pv->pv_va)];
2699254918Sraj		KASSERT((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO,
2700254918Sraj		    ("pmap_remove_all: valid section mapping expected"));
2701254918Sraj		(void)pmap_demote_section(pmap, pv->pv_va);
2702254918Sraj		PMAP_UNLOCK(pmap);
2703254918Sraj	}
2704254918Srajsmall_mappings:
2705250929Sgber	curpmap = vmspace_pmap(curproc->p_vmspace);
2706239268Sgonzo	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2707250634Sgber		pmap = PV_PMAP(pv);
2708250929Sgber		if (flush == FALSE && (pmap == curpmap ||
2709250634Sgber		    pmap == pmap_kernel()))
2710239268Sgonzo			flush = TRUE;
2711239268Sgonzo
2712250634Sgber		PMAP_LOCK(pmap);
2713250634Sgber		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
2714239268Sgonzo		KASSERT(l2b != NULL, ("No l2 bucket"));
2715239268Sgonzo		ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2716250930Sgber		is_exec |= PTE_BEEN_EXECD(*ptep);
2717239268Sgonzo		*ptep = 0;
2718250634Sgber		if (pmap_is_current(pmap))
2719239268Sgonzo			PTE_SYNC(ptep);
2720250634Sgber		pmap_free_l2_bucket(pmap, l2b, 1);
2721250634Sgber		pmap->pm_stats.resident_count--;
2722254531Sraj		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2723254531Sraj		if (pv->pv_flags & PVF_WIRED)
2724254531Sraj			pmap->pm_stats.wired_count--;
2725250634Sgber		pmap_free_pv_entry(pmap, pv);
2726250634Sgber		PMAP_UNLOCK(pmap);
2727239268Sgonzo	}
2728239268Sgonzo
2729239268Sgonzo	if (flush) {
2730250930Sgber		if (is_exec)
2731239268Sgonzo			cpu_tlb_flushID();
2732239268Sgonzo		else
2733239268Sgonzo			cpu_tlb_flushD();
2734266353Sian		cpu_cpwait();
2735239268Sgonzo	}
2736254531Sraj	vm_page_aflag_clear(m, PGA_WRITEABLE);
2737240321Salc	rw_wunlock(&pvh_global_lock);
2738239268Sgonzo}
2739239268Sgonzo
2740239268Sgonzoint
2741239268Sgonzopmap_change_attr(vm_offset_t sva, vm_size_t len, int mode)
2742239268Sgonzo{
2743239268Sgonzo	vm_offset_t base, offset, tmpva;
2744239268Sgonzo	vm_size_t size;
2745239268Sgonzo	struct l2_bucket *l2b;
2746239268Sgonzo	pt_entry_t *ptep, pte;
2747239268Sgonzo	vm_offset_t next_bucket;
2748239268Sgonzo
2749239268Sgonzo	PMAP_LOCK(kernel_pmap);
2750239268Sgonzo
2751239268Sgonzo	base = trunc_page(sva);
2752239268Sgonzo	offset = sva & PAGE_MASK;
2753239268Sgonzo	size = roundup(offset + len, PAGE_SIZE);
2754239268Sgonzo
2755239268Sgonzo#ifdef checkit
2756239268Sgonzo	/*
2757239268Sgonzo	 * Only supported on kernel virtual addresses, including the direct
2758239268Sgonzo	 * map but excluding the recursive map.
2759239268Sgonzo	 */
2760243109Scognet	if (base < DMAP_MIN_ADDRESS) {
2761243109Scognet		PMAP_UNLOCK(kernel_pmap);
2762239268Sgonzo		return (EINVAL);
2763243109Scognet	}
2764239268Sgonzo#endif
2765239268Sgonzo	for (tmpva = base; tmpva < base + size; ) {
2766239268Sgonzo		next_bucket = L2_NEXT_BUCKET(tmpva);
2767239268Sgonzo		if (next_bucket > base + size)
2768239268Sgonzo			next_bucket = base + size;
2769239268Sgonzo
2770239268Sgonzo		l2b = pmap_get_l2_bucket(kernel_pmap, tmpva);
2771239268Sgonzo		if (l2b == NULL) {
2772239268Sgonzo			tmpva = next_bucket;
2773239268Sgonzo			continue;
2774239268Sgonzo		}
2775239268Sgonzo
2776239268Sgonzo		ptep = &l2b->l2b_kva[l2pte_index(tmpva)];
2777239268Sgonzo
2778243109Scognet		if (*ptep == 0) {
2779243109Scognet			PMAP_UNLOCK(kernel_pmap);
2780239268Sgonzo			return(EINVAL);
2781243109Scognet		}
2782239268Sgonzo
2783239268Sgonzo		pte = *ptep &~ L2_S_CACHE_MASK;
2784239268Sgonzo		cpu_idcache_wbinv_range(tmpva, PAGE_SIZE);
2785245146Sgonzo		pmap_l2cache_wbinv_range(tmpva, pte & L2_S_FRAME, PAGE_SIZE);
2786239268Sgonzo		*ptep = pte;
2787239268Sgonzo		cpu_tlb_flushID_SE(tmpva);
2788266353Sian		cpu_cpwait();
2789239268Sgonzo
2790239268Sgonzo		dprintf("%s: for va:%x ptep:%x pte:%x\n",
2791239268Sgonzo		    __func__, tmpva, (uint32_t)ptep, pte);
2792239268Sgonzo		tmpva += PAGE_SIZE;
2793239268Sgonzo	}
2794239268Sgonzo
2795239268Sgonzo	PMAP_UNLOCK(kernel_pmap);
2796239268Sgonzo
2797239268Sgonzo	return (0);
2798239268Sgonzo}
2799239268Sgonzo
2800239268Sgonzo/*
2801239268Sgonzo *	Set the physical protection on the
2802239268Sgonzo *	specified range of this map as requested.
2803239268Sgonzo */
2804239268Sgonzovoid
2805250929Sgberpmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2806239268Sgonzo{
2807239268Sgonzo	struct l2_bucket *l2b;
2808254918Sraj	struct md_page *pvh;
2809254918Sraj	struct pv_entry *pve;
2810254918Sraj	pd_entry_t *pl1pd, l1pd;
2811239268Sgonzo	pt_entry_t *ptep, pte;
2812239268Sgonzo	vm_offset_t next_bucket;
2813250930Sgber	u_int is_exec, is_refd;
2814239268Sgonzo	int flush;
2815239268Sgonzo
2816239268Sgonzo	if ((prot & VM_PROT_READ) == 0) {
2817250929Sgber		pmap_remove(pmap, sva, eva);
2818239268Sgonzo		return;
2819239268Sgonzo	}
2820239268Sgonzo
2821239268Sgonzo	if (prot & VM_PROT_WRITE) {
2822239268Sgonzo		/*
2823239268Sgonzo		 * If this is a read->write transition, just ignore it and let
2824239268Sgonzo		 * vm_fault() take care of it later.
2825239268Sgonzo		 */
2826239268Sgonzo		return;
2827239268Sgonzo	}
2828239268Sgonzo
2829240321Salc	rw_wlock(&pvh_global_lock);
2830250929Sgber	PMAP_LOCK(pmap);
2831239268Sgonzo
2832239268Sgonzo	/*
2833239268Sgonzo	 * OK, at this point, we know we're doing write-protect operation.
2834239268Sgonzo	 * If the pmap is active, write-back the range.
2835239268Sgonzo	 */
2836239268Sgonzo
2837239268Sgonzo	flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
2838250930Sgber	is_exec = is_refd = 0;
2839239268Sgonzo
2840239268Sgonzo	while (sva < eva) {
2841239268Sgonzo		next_bucket = L2_NEXT_BUCKET(sva);
2842254918Sraj		/*
2843254918Sraj		 * Check for large page.
2844254918Sraj		 */
2845254918Sraj		pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
2846254918Sraj		l1pd = *pl1pd;
2847254918Sraj		if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
2848254918Sraj			KASSERT(pmap != pmap_kernel(),
2849254918Sraj			    ("pmap_protect: trying to modify "
2850254918Sraj			    "kernel section protections"));
2851254918Sraj			/*
2852254918Sraj			 * Are we protecting the entire large page? If not,
2853254918Sraj			 * demote the mapping and fall through.
2854254918Sraj			 */
2855254918Sraj			if (sva + L1_S_SIZE == L2_NEXT_BUCKET(sva) &&
2856254918Sraj			    eva >= L2_NEXT_BUCKET(sva)) {
2857254918Sraj				l1pd &= ~(L1_S_PROT_MASK | L1_S_XN);
2858254918Sraj				if (!(prot & VM_PROT_EXECUTE))
2859254918Sraj					*pl1pd |= L1_S_XN;
2860254918Sraj				/*
2861254918Sraj				 * At this point we are always setting
2862254918Sraj				 * write-protect bit.
2863254918Sraj				 */
2864254918Sraj				l1pd |= L1_S_APX;
2865254918Sraj				/* All managed superpages are user pages. */
2866254918Sraj				l1pd |= L1_S_PROT_U;
2867254918Sraj				*pl1pd = l1pd;
2868254918Sraj				PTE_SYNC(pl1pd);
2869254918Sraj				pvh = pa_to_pvh(l1pd & L1_S_FRAME);
2870254918Sraj				pve = pmap_find_pv(pvh, pmap,
2871254918Sraj				    trunc_1mpage(sva));
2872254918Sraj				pve->pv_flags &= ~PVF_WRITE;
2873254918Sraj				sva = next_bucket;
2874254918Sraj				continue;
2875254918Sraj			} else if (!pmap_demote_section(pmap, sva)) {
2876254918Sraj				/* The large page mapping was destroyed. */
2877254918Sraj				sva = next_bucket;
2878254918Sraj				continue;
2879254918Sraj			}
2880254918Sraj		}
2881239268Sgonzo		if (next_bucket > eva)
2882239268Sgonzo			next_bucket = eva;
2883250929Sgber		l2b = pmap_get_l2_bucket(pmap, sva);
2884239268Sgonzo		if (l2b == NULL) {
2885239268Sgonzo			sva = next_bucket;
2886239268Sgonzo			continue;
2887239268Sgonzo		}
2888239268Sgonzo
2889239268Sgonzo		ptep = &l2b->l2b_kva[l2pte_index(sva)];
2890239268Sgonzo
2891239268Sgonzo		while (sva < next_bucket) {
2892239268Sgonzo			if ((pte = *ptep) != 0 && L2_S_WRITABLE(pte)) {
2893250929Sgber				struct vm_page *m;
2894239268Sgonzo
2895250929Sgber				m = PHYS_TO_VM_PAGE(l2pte_pa(pte));
2896250929Sgber				pmap_set_prot(ptep, prot,
2897250929Sgber				    !(pmap == pmap_kernel()));
2898239268Sgonzo				PTE_SYNC(ptep);
2899239268Sgonzo
2900250930Sgber				pmap_modify_pv(m, pmap, sva, PVF_WRITE, 0);
2901239268Sgonzo
2902239268Sgonzo				if (flush >= 0) {
2903239268Sgonzo					flush++;
2904250930Sgber					is_exec |= PTE_BEEN_EXECD(pte);
2905250930Sgber					is_refd |= PTE_BEEN_REFD(pte);
2906250930Sgber				} else {
2907250930Sgber					if (PTE_BEEN_EXECD(pte))
2908250930Sgber						cpu_tlb_flushID_SE(sva);
2909250930Sgber					else if (PTE_BEEN_REFD(pte))
2910250930Sgber						cpu_tlb_flushD_SE(sva);
2911250930Sgber				}
2912239268Sgonzo			}
2913239268Sgonzo
2914239268Sgonzo			sva += PAGE_SIZE;
2915239268Sgonzo			ptep++;
2916239268Sgonzo		}
2917239268Sgonzo	}
2918239268Sgonzo
2919239268Sgonzo
2920239268Sgonzo	if (flush) {
2921250930Sgber		if (is_exec)
2922239268Sgonzo			cpu_tlb_flushID();
2923239268Sgonzo		else
2924250930Sgber		if (is_refd)
2925239268Sgonzo			cpu_tlb_flushD();
2926266353Sian		cpu_cpwait();
2927239268Sgonzo	}
2928240321Salc	rw_wunlock(&pvh_global_lock);
2929239268Sgonzo
2930250929Sgber	PMAP_UNLOCK(pmap);
2931239268Sgonzo}
2932239268Sgonzo
2933239268Sgonzo
2934239268Sgonzo/*
2935239268Sgonzo *	Insert the given physical page (p) at
2936239268Sgonzo *	the specified virtual address (v) in the
2937239268Sgonzo *	target physical map with the protection requested.
2938239268Sgonzo *
2939239268Sgonzo *	If specified, the page will be wired down, meaning
2940239268Sgonzo *	that the related pte can not be reclaimed.
2941239268Sgonzo *
2942239268Sgonzo *	NB:  This is the only routine which MAY NOT lazy-evaluate
2943239268Sgonzo *	or lose information.  That is, this routine must actually
2944239268Sgonzo *	insert this page into the given map NOW.
2945239268Sgonzo */
2946239268Sgonzo
2947270439Skibint
2948270439Skibpmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2949270439Skib    u_int flags, int8_t psind __unused)
2950239268Sgonzo{
2951266199Sian	struct l2_bucket *l2b;
2952270439Skib	int rv;
2953239268Sgonzo
2954240321Salc	rw_wlock(&pvh_global_lock);
2955239268Sgonzo	PMAP_LOCK(pmap);
2956270439Skib	rv = pmap_enter_locked(pmap, va, m, prot, flags);
2957270439Skib	if (rv == KERN_SUCCESS) {
2958270439Skib		/*
2959270439Skib		 * If both the l2b_occupancy and the reservation are fully
2960270439Skib		 * populated, then attempt promotion.
2961270439Skib		 */
2962270439Skib		l2b = pmap_get_l2_bucket(pmap, va);
2963270439Skib		if (l2b != NULL && l2b->l2b_occupancy == L2_PTE_NUM_TOTAL &&
2964270439Skib		    sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
2965270439Skib		    vm_reserv_level_iffullpop(m) == 0)
2966270439Skib			pmap_promote_section(pmap, va);
2967270439Skib	}
2968239268Sgonzo	PMAP_UNLOCK(pmap);
2969240321Salc	rw_wunlock(&pvh_global_lock);
2970270439Skib	return (rv);
2971239268Sgonzo}
2972239268Sgonzo
2973239268Sgonzo/*
2974240803Salc *	The pvh global and pmap locks must be held.
2975239268Sgonzo */
2976270439Skibstatic int
2977270439Skibpmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2978270439Skib    u_int flags)
2979239268Sgonzo{
2980239268Sgonzo	struct l2_bucket *l2b = NULL;
2981250929Sgber	struct vm_page *om;
2982239268Sgonzo	struct pv_entry *pve = NULL;
2983254918Sraj	pd_entry_t *pl1pd, l1pd;
2984239268Sgonzo	pt_entry_t *ptep, npte, opte;
2985239268Sgonzo	u_int nflags;
2986250930Sgber	u_int is_exec, is_refd;
2987239268Sgonzo	vm_paddr_t pa;
2988239268Sgonzo	u_char user;
2989239268Sgonzo
2990239268Sgonzo	PMAP_ASSERT_LOCKED(pmap);
2991240321Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2992239268Sgonzo	if (va == vector_page) {
2993239268Sgonzo		pa = systempage.pv_pa;
2994239268Sgonzo		m = NULL;
2995239268Sgonzo	} else {
2996270439Skib		if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2997270439Skib			VM_OBJECT_ASSERT_LOCKED(m->object);
2998239268Sgonzo		pa = VM_PAGE_TO_PHYS(m);
2999239268Sgonzo	}
3000239268Sgonzo
3001254918Sraj	pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
3002266199Sian	if ((va < VM_MAXUSER_ADDRESS) &&
3003266199Sian	    (*pl1pd & L1_TYPE_MASK) == L1_S_PROTO) {
3004266199Sian		(void)pmap_demote_section(pmap, va);
3005266199Sian	}
3006254918Sraj
3007239268Sgonzo	user = 0;
3008239268Sgonzo	/*
3009239268Sgonzo	 * Make sure userland mappings get the right permissions
3010239268Sgonzo	 */
3011239268Sgonzo	if (pmap != pmap_kernel() && va != vector_page)
3012239268Sgonzo		user = 1;
3013239268Sgonzo
3014239268Sgonzo	nflags = 0;
3015239268Sgonzo
3016239268Sgonzo	if (prot & VM_PROT_WRITE)
3017239268Sgonzo		nflags |= PVF_WRITE;
3018270439Skib	if ((flags & PMAP_ENTER_WIRED) != 0)
3019239268Sgonzo		nflags |= PVF_WIRED;
3020239268Sgonzo
3021250929Sgber	PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, "
3022270439Skib	    "prot = %x, flags = %x\n", (uint32_t) pmap, va, (uint32_t) m,
3023270439Skib	    prot, flags));
3024239268Sgonzo
3025239268Sgonzo	if (pmap == pmap_kernel()) {
3026239268Sgonzo		l2b = pmap_get_l2_bucket(pmap, va);
3027239268Sgonzo		if (l2b == NULL)
3028239268Sgonzo			l2b = pmap_grow_l2_bucket(pmap, va);
3029239268Sgonzo	} else {
3030239268Sgonzodo_l2b_alloc:
3031239268Sgonzo		l2b = pmap_alloc_l2_bucket(pmap, va);
3032239268Sgonzo		if (l2b == NULL) {
3033270439Skib			if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
3034239268Sgonzo				PMAP_UNLOCK(pmap);
3035240321Salc				rw_wunlock(&pvh_global_lock);
3036239268Sgonzo				VM_WAIT;
3037240321Salc				rw_wlock(&pvh_global_lock);
3038239268Sgonzo				PMAP_LOCK(pmap);
3039239268Sgonzo				goto do_l2b_alloc;
3040239268Sgonzo			}
3041270439Skib			return (KERN_RESOURCE_SHORTAGE);
3042239268Sgonzo		}
3043239268Sgonzo	}
3044239268Sgonzo
3045266199Sian	pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
3046266199Sian	if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
3047266199Sian		panic("pmap_enter: attempt to enter on 1MB page, va: %#x", va);
3048266199Sian
3049239268Sgonzo	ptep = &l2b->l2b_kva[l2pte_index(va)];
3050239268Sgonzo
3051239268Sgonzo	opte = *ptep;
3052239268Sgonzo	npte = pa;
3053250930Sgber	is_exec = is_refd = 0;
3054250930Sgber
3055239268Sgonzo	if (opte) {
3056250931Sgber		if (l2pte_pa(opte) == pa) {
3057250931Sgber			/*
3058250931Sgber			 * We're changing the attrs of an existing mapping.
3059250931Sgber			 */
3060250931Sgber			if (m != NULL)
3061250931Sgber				pmap_modify_pv(m, pmap, va,
3062250931Sgber				    PVF_WRITE | PVF_WIRED, nflags);
3063250931Sgber			is_exec |= PTE_BEEN_EXECD(opte);
3064250931Sgber			is_refd |= PTE_BEEN_REFD(opte);
3065250931Sgber			goto validate;
3066250931Sgber		}
3067250931Sgber		if ((om = PHYS_TO_VM_PAGE(l2pte_pa(opte)))) {
3068250931Sgber			/*
3069250931Sgber			 * Replacing an existing mapping with a new one.
3070250931Sgber			 * It is part of our managed memory so we
3071250931Sgber			 * must remove it from the PV list
3072250931Sgber			 */
3073250931Sgber			if ((pve = pmap_remove_pv(om, pmap, va))) {
3074250931Sgber				is_exec |= PTE_BEEN_EXECD(opte);
3075250931Sgber				is_refd |= PTE_BEEN_REFD(opte);
3076250931Sgber
3077250931Sgber				if (m && ((m->oflags & VPO_UNMANAGED)))
3078250931Sgber					pmap_free_pv_entry(pmap, pve);
3079250931Sgber			}
3080250931Sgber		}
3081239268Sgonzo
3082239268Sgonzo	} else {
3083239268Sgonzo		/*
3084250931Sgber		 * Keep the stats up to date
3085239268Sgonzo		 */
3086250931Sgber		l2b->l2b_occupancy++;
3087250931Sgber		pmap->pm_stats.resident_count++;
3088239268Sgonzo	}
3089239268Sgonzo
3090250931Sgber	/*
3091250931Sgber	 * Enter on the PV list if part of our managed memory.
3092250931Sgber	 */
3093250931Sgber	if ((m && !(m->oflags & VPO_UNMANAGED))) {
3094250931Sgber		if ((!pve) && (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
3095250931Sgber			panic("pmap_enter: no pv entries");
3096250931Sgber
3097250931Sgber		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
3098250931Sgber		("pmap_enter: managed mapping within the clean submap"));
3099250931Sgber		KASSERT(pve != NULL, ("No pv"));
3100250931Sgber		pmap_enter_pv(m, pve, pmap, va, nflags);
3101250931Sgber	}
3102250931Sgber
3103250931Sgbervalidate:
3104250928Sgber	/* Make the new PTE valid */
3105250928Sgber	npte |= L2_S_PROTO;
3106250928Sgber#ifdef SMP
3107250928Sgber	npte |= L2_SHARED;
3108250928Sgber#endif
3109250928Sgber	/* Set defaults first - kernel read access */
3110250297Sgber	npte |= L2_APX;
3111239268Sgonzo	npte |= L2_S_PROT_R;
3112250931Sgber	/* Set "referenced" flag */
3113250931Sgber	npte |= L2_S_REF;
3114250928Sgber
3115250928Sgber	/* Now tune APs as desired */
3116250297Sgber	if (user)
3117250297Sgber		npte |= L2_S_PROT_U;
3118250931Sgber	/*
3119250931Sgber	 * If this is not a vector_page
3120250931Sgber	 * then continue setting mapping parameters
3121250931Sgber	 */
3122250931Sgber	if (m != NULL) {
3123266058Sian		if ((m->oflags & VPO_UNMANAGED) == 0) {
3124266058Sian			if (prot & (VM_PROT_ALL)) {
3125250931Sgber				vm_page_aflag_set(m, PGA_REFERENCED);
3126266058Sian			} else {
3127266058Sian				/*
3128266058Sian				 * Need to do page referenced emulation.
3129266058Sian				 */
3130266058Sian				npte &= ~L2_S_REF;
3131266058Sian			}
3132250928Sgber		}
3133239268Sgonzo
3134250931Sgber		if (prot & VM_PROT_WRITE) {
3135250931Sgber			if ((m->oflags & VPO_UNMANAGED) == 0) {
3136250931Sgber				vm_page_aflag_set(m, PGA_WRITEABLE);
3137250931Sgber				/*
3138266058Sian				 * XXX: Skip modified bit emulation for now.
3139266058Sian				 *	The emulation reveals problems
3140266058Sian				 *	that result in random failures
3141266058Sian				 *	during memory allocation on some
3142266058Sian				 *	platforms.
3143266058Sian				 *	Therefore, the page is marked RW
3144266058Sian				 *	immediately.
3145250931Sgber				 */
3146266058Sian				npte &= ~(L2_APX);
3147266058Sian				vm_page_dirty(m);
3148266058Sian			} else
3149266058Sian				npte &= ~(L2_APX);
3150239268Sgonzo		}
3151250931Sgber		if (!(prot & VM_PROT_EXECUTE))
3152250931Sgber			npte |= L2_XN;
3153239268Sgonzo
3154250931Sgber		if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
3155250931Sgber			npte |= pte_l2_s_cache_mode;
3156239268Sgonzo	}
3157239268Sgonzo
3158239268Sgonzo	CTR5(KTR_PMAP,"enter: pmap:%p va:%x prot:%x pte:%x->%x",
3159239268Sgonzo	    pmap, va, prot, opte, npte);
3160239268Sgonzo	/*
3161239268Sgonzo	 * If this is just a wiring change, the two PTEs will be
3162239268Sgonzo	 * identical, so there's no need to update the page table.
3163239268Sgonzo	 */
3164239268Sgonzo	if (npte != opte) {
3165239268Sgonzo		boolean_t is_cached = pmap_is_current(pmap);
3166239268Sgonzo
3167239268Sgonzo		*ptep = npte;
3168239268Sgonzo		PTE_SYNC(ptep);
3169239268Sgonzo		if (is_cached) {
3170239268Sgonzo			/*
3171239268Sgonzo			 * We only need to frob the cache/tlb if this pmap
3172239268Sgonzo			 * is current
3173239268Sgonzo			 */
3174239268Sgonzo			if (L1_IDX(va) != L1_IDX(vector_page) &&
3175239268Sgonzo			    l2pte_valid(npte)) {
3176239268Sgonzo				/*
3177239268Sgonzo				 * This mapping is likely to be accessed as
3178239268Sgonzo				 * soon as we return to userland. Fix up the
3179239268Sgonzo				 * L1 entry to avoid taking another
3180239268Sgonzo				 * page/domain fault.
3181239268Sgonzo				 */
3182250929Sgber				l1pd = l2b->l2b_phys |
3183250929Sgber				    L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
3184239268Sgonzo				if (*pl1pd != l1pd) {
3185239268Sgonzo					*pl1pd = l1pd;
3186239268Sgonzo					PTE_SYNC(pl1pd);
3187239268Sgonzo				}
3188239268Sgonzo			}
3189239268Sgonzo		}
3190239268Sgonzo
3191250930Sgber		if (is_exec)
3192239268Sgonzo			cpu_tlb_flushID_SE(va);
3193250930Sgber		else if (is_refd)
3194239268Sgonzo			cpu_tlb_flushD_SE(va);
3195266353Sian		cpu_cpwait();
3196239268Sgonzo	}
3197245146Sgonzo
3198245146Sgonzo	if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
3199245146Sgonzo		cpu_icache_sync_range(va, PAGE_SIZE);
3200270439Skib	return (KERN_SUCCESS);
3201239268Sgonzo}
3202239268Sgonzo
3203239268Sgonzo/*
3204239268Sgonzo * Maps a sequence of resident pages belonging to the same object.
3205239268Sgonzo * The sequence begins with the given page m_start.  This page is
3206239268Sgonzo * mapped at the given virtual address start.  Each subsequent page is
3207239268Sgonzo * mapped at a virtual address that is offset from start by the same
3208239268Sgonzo * amount as the page is offset from m_start within the object.  The
3209239268Sgonzo * last page in the sequence is the page with the largest offset from
3210239268Sgonzo * m_start that can be mapped at a virtual address less than the given
3211239268Sgonzo * virtual address end.  Not every virtual page between start and end
3212239268Sgonzo * is mapped; only those for which a resident page exists with the
3213239268Sgonzo * corresponding offset from m_start are mapped.
3214239268Sgonzo */
3215239268Sgonzovoid
3216239268Sgonzopmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3217239268Sgonzo    vm_page_t m_start, vm_prot_t prot)
3218239268Sgonzo{
3219254918Sraj	vm_offset_t va;
3220239268Sgonzo	vm_page_t m;
3221239268Sgonzo	vm_pindex_t diff, psize;
3222239268Sgonzo
3223250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
3224250884Sattilio
3225239268Sgonzo	psize = atop(end - start);
3226239268Sgonzo	m = m_start;
3227270439Skib	prot &= VM_PROT_READ | VM_PROT_EXECUTE;
3228240321Salc	rw_wlock(&pvh_global_lock);
3229239268Sgonzo	PMAP_LOCK(pmap);
3230239268Sgonzo	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3231254918Sraj		va = start + ptoa(diff);
3232254918Sraj		if ((va & L1_S_OFFSET) == 0 && L2_NEXT_BUCKET(va) <= end &&
3233269072Skib		    m->psind == 1 && sp_enabled &&
3234254918Sraj		    pmap_enter_section(pmap, va, m, prot))
3235254918Sraj			m = &m[L1_S_SIZE / PAGE_SIZE - 1];
3236254918Sraj		else
3237270439Skib			pmap_enter_locked(pmap, va, m, prot,
3238270439Skib			    PMAP_ENTER_NOSLEEP);
3239239268Sgonzo		m = TAILQ_NEXT(m, listq);
3240239268Sgonzo	}
3241239268Sgonzo	PMAP_UNLOCK(pmap);
3242240321Salc	rw_wunlock(&pvh_global_lock);
3243239268Sgonzo}
3244239268Sgonzo
3245239268Sgonzo/*
3246239268Sgonzo * this code makes some *MAJOR* assumptions:
3247239268Sgonzo * 1. Current pmap & pmap exists.
3248239268Sgonzo * 2. Not wired.
3249239268Sgonzo * 3. Read access.
3250239268Sgonzo * 4. No page table pages.
3251239268Sgonzo * but is *MUCH* faster than pmap_enter...
3252239268Sgonzo */
3253239268Sgonzo
3254239268Sgonzovoid
3255239268Sgonzopmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3256239268Sgonzo{
3257239268Sgonzo
3258270439Skib	prot &= VM_PROT_READ | VM_PROT_EXECUTE;
3259240321Salc	rw_wlock(&pvh_global_lock);
3260239268Sgonzo	PMAP_LOCK(pmap);
3261270439Skib	pmap_enter_locked(pmap, va, m, prot, PMAP_ENTER_NOSLEEP);
3262239268Sgonzo	PMAP_UNLOCK(pmap);
3263240321Salc	rw_wunlock(&pvh_global_lock);
3264239268Sgonzo}
3265239268Sgonzo
3266239268Sgonzo/*
3267270920Skib *	Clear the wired attribute from the mappings for the specified range of
3268270920Skib *	addresses in the given pmap.  Every valid mapping within that range
3269270920Skib *	must have the wired attribute set.  In contrast, invalid mappings
3270270920Skib *	cannot have the wired attribute set, so they are ignored.
3271270920Skib *
3272270920Skib *	XXX Wired mappings of unmanaged pages cannot be counted by this pmap
3273270920Skib *	implementation.
3274239268Sgonzo */
3275239268Sgonzovoid
3276270920Skibpmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3277239268Sgonzo{
3278239268Sgonzo	struct l2_bucket *l2b;
3279254918Sraj	struct md_page *pvh;
3280270920Skib	pd_entry_t l1pd;
3281239268Sgonzo	pt_entry_t *ptep, pte;
3282270920Skib	pv_entry_t pv;
3283270920Skib	vm_offset_t next_bucket;
3284270920Skib	vm_paddr_t pa;
3285250929Sgber	vm_page_t m;
3286270920Skib
3287240321Salc	rw_wlock(&pvh_global_lock);
3288239268Sgonzo	PMAP_LOCK(pmap);
3289270920Skib	while (sva < eva) {
3290270920Skib		next_bucket = L2_NEXT_BUCKET(sva);
3291270920Skib		l1pd = pmap->pm_l1->l1_kva[L1_IDX(sva)];
3292270920Skib		if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
3293270920Skib			pa = l1pd & L1_S_FRAME;
3294270920Skib			m = PHYS_TO_VM_PAGE(pa);
3295270920Skib			KASSERT(m != NULL && (m->oflags & VPO_UNMANAGED) == 0,
3296270920Skib			    ("pmap_unwire: unmanaged 1mpage %p", m));
3297270920Skib			pvh = pa_to_pvh(pa);
3298270920Skib			pv = pmap_find_pv(pvh, pmap, trunc_1mpage(sva));
3299270920Skib			if ((pv->pv_flags & PVF_WIRED) == 0)
3300270920Skib				panic("pmap_unwire: pv %p isn't wired", pv);
3301270920Skib
3302270920Skib			/*
3303270920Skib			 * Are we unwiring the entire large page? If not,
3304270920Skib			 * demote the mapping and fall through.
3305270920Skib			 */
3306270920Skib			if (sva + L1_S_SIZE == next_bucket &&
3307270920Skib			    eva >= next_bucket) {
3308270920Skib				pv->pv_flags &= ~PVF_WIRED;
3309270920Skib				pmap->pm_stats.wired_count -= L2_PTE_NUM_TOTAL;
3310270920Skib				sva = next_bucket;
3311270920Skib				continue;
3312270920Skib			} else if (!pmap_demote_section(pmap, sva))
3313270920Skib				panic("pmap_unwire: demotion failed");
3314270920Skib		}
3315270920Skib		if (next_bucket > eva)
3316270920Skib			next_bucket = eva;
3317270920Skib		l2b = pmap_get_l2_bucket(pmap, sva);
3318270920Skib		if (l2b == NULL) {
3319270920Skib			sva = next_bucket;
3320270920Skib			continue;
3321270920Skib		}
3322270920Skib		for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket;
3323270920Skib		    sva += PAGE_SIZE, ptep++) {
3324270920Skib			if ((pte = *ptep) == 0 ||
3325270920Skib			    (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL ||
3326270920Skib			    (m->oflags & VPO_UNMANAGED) != 0)
3327270920Skib				continue;
3328270920Skib			pv = pmap_find_pv(&m->md, pmap, sva);
3329270920Skib			if ((pv->pv_flags & PVF_WIRED) == 0)
3330270920Skib				panic("pmap_unwire: pv %p isn't wired", pv);
3331270920Skib			pv->pv_flags &= ~PVF_WIRED;
3332270920Skib			pmap->pm_stats.wired_count--;
3333270920Skib		}
3334254918Sraj	}
3335240321Salc	rw_wunlock(&pvh_global_lock);
3336270920Skib 	PMAP_UNLOCK(pmap);
3337239268Sgonzo}
3338239268Sgonzo
3339239268Sgonzo
3340239268Sgonzo/*
3341239268Sgonzo *	Copy the range specified by src_addr/len
3342239268Sgonzo *	from the source map to the range dst_addr/len
3343239268Sgonzo *	in the destination map.
3344239268Sgonzo *
3345239268Sgonzo *	This routine is only advisory and need not do anything.
3346239268Sgonzo */
3347239268Sgonzovoid
3348239268Sgonzopmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
3349239268Sgonzo    vm_size_t len, vm_offset_t src_addr)
3350239268Sgonzo{
3351239268Sgonzo}
3352239268Sgonzo
3353239268Sgonzo
3354239268Sgonzo/*
3355239268Sgonzo *	Routine:	pmap_extract
3356239268Sgonzo *	Function:
3357239268Sgonzo *		Extract the physical page address associated
3358239268Sgonzo *		with the given map/virtual_address pair.
3359239268Sgonzo */
3360239268Sgonzovm_paddr_t
3361240983Salcpmap_extract(pmap_t pmap, vm_offset_t va)
3362239268Sgonzo{
3363240983Salc	vm_paddr_t pa;
3364240983Salc
3365266050Sian	if (kernel_vm_end != 0)
3366266050Sian		PMAP_LOCK(pmap);
3367240983Salc	pa = pmap_extract_locked(pmap, va);
3368266050Sian	if (kernel_vm_end != 0)
3369266050Sian		PMAP_UNLOCK(pmap);
3370240983Salc	return (pa);
3371240983Salc}
3372240983Salc
3373240983Salcstatic vm_paddr_t
3374240983Salcpmap_extract_locked(pmap_t pmap, vm_offset_t va)
3375240983Salc{
3376239268Sgonzo	struct l2_dtable *l2;
3377239268Sgonzo	pd_entry_t l1pd;
3378239268Sgonzo	pt_entry_t *ptep, pte;
3379239268Sgonzo	vm_paddr_t pa;
3380239268Sgonzo	u_int l1idx;
3381240983Salc
3382266050Sian	if (kernel_vm_end != 0 && pmap != kernel_pmap)
3383240983Salc		PMAP_ASSERT_LOCKED(pmap);
3384239268Sgonzo	l1idx = L1_IDX(va);
3385240983Salc	l1pd = pmap->pm_l1->l1_kva[l1idx];
3386239268Sgonzo	if (l1pte_section_p(l1pd)) {
3387239268Sgonzo		/* XXX: what to do about the bits > 32 ? */
3388239268Sgonzo		if (l1pd & L1_S_SUPERSEC)
3389239268Sgonzo			pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
3390239268Sgonzo		else
3391239268Sgonzo			pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3392239268Sgonzo	} else {
3393239268Sgonzo		/*
3394239268Sgonzo		 * Note that we can't rely on the validity of the L1
3395239268Sgonzo		 * descriptor as an indication that a mapping exists.
3396239268Sgonzo		 * We have to look it up in the L2 dtable.
3397239268Sgonzo		 */
3398240983Salc		l2 = pmap->pm_l2[L2_IDX(l1idx)];
3399239268Sgonzo		if (l2 == NULL ||
3400240983Salc		    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL)
3401239268Sgonzo			return (0);
3402240983Salc		pte = ptep[l2pte_index(va)];
3403240983Salc		if (pte == 0)
3404239268Sgonzo			return (0);
3405239268Sgonzo		switch (pte & L2_TYPE_MASK) {
3406239268Sgonzo		case L2_TYPE_L:
3407239268Sgonzo			pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3408239268Sgonzo			break;
3409239268Sgonzo		default:
3410239268Sgonzo			pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3411239268Sgonzo			break;
3412239268Sgonzo		}
3413239268Sgonzo	}
3414239268Sgonzo	return (pa);
3415239268Sgonzo}
3416239268Sgonzo
3417239268Sgonzo/*
3418239268Sgonzo * Atomically extract and hold the physical page with the given
3419239268Sgonzo * pmap and virtual address pair if that mapping permits the given
3420239268Sgonzo * protection.
3421239268Sgonzo *
3422239268Sgonzo */
3423239268Sgonzovm_page_t
3424239268Sgonzopmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3425239268Sgonzo{
3426239268Sgonzo	struct l2_dtable *l2;
3427239268Sgonzo	pd_entry_t l1pd;
3428239268Sgonzo	pt_entry_t *ptep, pte;
3429239268Sgonzo	vm_paddr_t pa, paddr;
3430239268Sgonzo	vm_page_t m = NULL;
3431239268Sgonzo	u_int l1idx;
3432239268Sgonzo	l1idx = L1_IDX(va);
3433239268Sgonzo	paddr = 0;
3434239268Sgonzo
3435239268Sgonzo	PMAP_LOCK(pmap);
3436239268Sgonzoretry:
3437239268Sgonzo	l1pd = pmap->pm_l1->l1_kva[l1idx];
3438239268Sgonzo	if (l1pte_section_p(l1pd)) {
3439239268Sgonzo		/* XXX: what to do about the bits > 32 ? */
3440239268Sgonzo		if (l1pd & L1_S_SUPERSEC)
3441239268Sgonzo			pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
3442239268Sgonzo		else
3443239268Sgonzo			pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3444239268Sgonzo		if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
3445239268Sgonzo			goto retry;
3446239268Sgonzo		if (L1_S_WRITABLE(l1pd) || (prot & VM_PROT_WRITE) == 0) {
3447239268Sgonzo			m = PHYS_TO_VM_PAGE(pa);
3448239268Sgonzo			vm_page_hold(m);
3449239268Sgonzo		}
3450239268Sgonzo	} else {
3451239268Sgonzo		/*
3452239268Sgonzo		 * Note that we can't rely on the validity of the L1
3453239268Sgonzo		 * descriptor as an indication that a mapping exists.
3454239268Sgonzo		 * We have to look it up in the L2 dtable.
3455239268Sgonzo		 */
3456239268Sgonzo		l2 = pmap->pm_l2[L2_IDX(l1idx)];
3457239268Sgonzo
3458239268Sgonzo		if (l2 == NULL ||
3459239268Sgonzo		    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3460239268Sgonzo			PMAP_UNLOCK(pmap);
3461239268Sgonzo			return (NULL);
3462239268Sgonzo		}
3463239268Sgonzo
3464239268Sgonzo		ptep = &ptep[l2pte_index(va)];
3465239268Sgonzo		pte = *ptep;
3466239268Sgonzo
3467239268Sgonzo		if (pte == 0) {
3468239268Sgonzo			PMAP_UNLOCK(pmap);
3469239268Sgonzo			return (NULL);
3470239268Sgonzo		} else if ((prot & VM_PROT_WRITE) && (pte & L2_APX)) {
3471239268Sgonzo			PMAP_UNLOCK(pmap);
3472239268Sgonzo			return (NULL);
3473239268Sgonzo		} else {
3474239268Sgonzo			switch (pte & L2_TYPE_MASK) {
3475239268Sgonzo			case L2_TYPE_L:
3476239268Sgonzo				panic("extract and hold section mapping");
3477239268Sgonzo				break;
3478239268Sgonzo			default:
3479239268Sgonzo				pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3480239268Sgonzo				break;
3481239268Sgonzo			}
3482239268Sgonzo			if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
3483239268Sgonzo				goto retry;
3484239268Sgonzo			m = PHYS_TO_VM_PAGE(pa);
3485239268Sgonzo			vm_page_hold(m);
3486239268Sgonzo		}
3487239268Sgonzo
3488239268Sgonzo	}
3489239268Sgonzo
3490239268Sgonzo	PMAP_UNLOCK(pmap);
3491239268Sgonzo	PA_UNLOCK_COND(paddr);
3492239268Sgonzo	return (m);
3493239268Sgonzo}
3494239268Sgonzo
3495239268Sgonzo/*
3496239268Sgonzo * Initialize a preallocated and zeroed pmap structure,
3497239268Sgonzo * such as one in a vmspace structure.
3498239268Sgonzo */
3499239268Sgonzo
3500239268Sgonzoint
3501239268Sgonzopmap_pinit(pmap_t pmap)
3502239268Sgonzo{
3503239268Sgonzo	PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
3504239268Sgonzo
3505239268Sgonzo	pmap_alloc_l1(pmap);
3506239268Sgonzo	bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
3507239268Sgonzo
3508239268Sgonzo	CPU_ZERO(&pmap->pm_active);
3509239268Sgonzo
3510250634Sgber	TAILQ_INIT(&pmap->pm_pvchunk);
3511239268Sgonzo	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
3512239268Sgonzo	pmap->pm_stats.resident_count = 1;
3513239268Sgonzo	if (vector_page < KERNBASE) {
3514239268Sgonzo		pmap_enter(pmap, vector_page,
3515270439Skib		    PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ,
3516270439Skib		    PMAP_ENTER_WIRED, 0);
3517239268Sgonzo	}
3518239268Sgonzo	return (1);
3519239268Sgonzo}
3520239268Sgonzo
3521239268Sgonzo
3522239268Sgonzo/***************************************************
3523254918Sraj * Superpage management routines.
3524254918Sraj ***************************************************/
3525254918Sraj
3526254918Srajstatic PMAP_INLINE struct pv_entry *
3527254918Srajpmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3528254918Sraj{
3529254918Sraj	pv_entry_t pv;
3530254918Sraj
3531254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
3532254918Sraj
3533254918Sraj	pv = pmap_find_pv(pvh, pmap, va);
3534254918Sraj	if (pv != NULL)
3535254918Sraj		TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
3536254918Sraj
3537254918Sraj	return (pv);
3538254918Sraj}
3539254918Sraj
3540254918Srajstatic void
3541254918Srajpmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3542254918Sraj{
3543254918Sraj	pv_entry_t pv;
3544254918Sraj
3545254918Sraj	pv = pmap_pvh_remove(pvh, pmap, va);
3546254918Sraj	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
3547254918Sraj	pmap_free_pv_entry(pmap, pv);
3548254918Sraj}
3549254918Sraj
3550254918Srajstatic boolean_t
3551254918Srajpmap_pv_insert_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3552254918Sraj{
3553254918Sraj	struct md_page *pvh;
3554254918Sraj	pv_entry_t pv;
3555254918Sraj
3556254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
3557254918Sraj	if (pv_entry_count < pv_entry_high_water &&
3558254918Sraj	    (pv = pmap_get_pv_entry(pmap, TRUE)) != NULL) {
3559254918Sraj		pv->pv_va = va;
3560254918Sraj		pvh = pa_to_pvh(pa);
3561254918Sraj		TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
3562254918Sraj		return (TRUE);
3563254918Sraj	} else
3564254918Sraj		return (FALSE);
3565254918Sraj}
3566254918Sraj
3567254918Sraj/*
3568254918Sraj * Create the pv entries for each of the pages within a superpage.
3569254918Sraj */
3570254918Srajstatic void
3571254918Srajpmap_pv_demote_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3572254918Sraj{
3573254918Sraj	struct md_page *pvh;
3574254918Sraj	pv_entry_t pve, pv;
3575254918Sraj	vm_offset_t va_last;
3576254918Sraj	vm_page_t m;
3577254918Sraj
3578254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
3579254918Sraj	KASSERT((pa & L1_S_OFFSET) == 0,
3580254918Sraj	    ("pmap_pv_demote_section: pa is not 1mpage aligned"));
3581254918Sraj
3582254918Sraj	/*
3583254918Sraj	 * Transfer the 1mpage's pv entry for this mapping to the first
3584254918Sraj	 * page's pv list.
3585254918Sraj	 */
3586254918Sraj	pvh = pa_to_pvh(pa);
3587254918Sraj	va = trunc_1mpage(va);
3588254918Sraj	pv = pmap_pvh_remove(pvh, pmap, va);
3589254918Sraj	KASSERT(pv != NULL, ("pmap_pv_demote_section: pv not found"));
3590254918Sraj	m = PHYS_TO_VM_PAGE(pa);
3591254918Sraj	TAILQ_INSERT_HEAD(&m->md.pv_list, pv, pv_list);
3592254918Sraj	/* Instantiate the remaining pv entries. */
3593254918Sraj	va_last = L2_NEXT_BUCKET(va) - PAGE_SIZE;
3594254918Sraj	do {
3595254918Sraj		m++;
3596254918Sraj		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3597254918Sraj		    ("pmap_pv_demote_section: page %p is not managed", m));
3598254918Sraj		va += PAGE_SIZE;
3599254918Sraj		pve = pmap_get_pv_entry(pmap, FALSE);
3600254918Sraj		pmap_enter_pv(m, pve, pmap, va, pv->pv_flags);
3601254918Sraj	} while (va < va_last);
3602254918Sraj}
3603254918Sraj
3604254918Srajstatic void
3605254918Srajpmap_pv_promote_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3606254918Sraj{
3607254918Sraj	struct md_page *pvh;
3608254918Sraj	pv_entry_t pv;
3609254918Sraj	vm_offset_t va_last;
3610254918Sraj	vm_page_t m;
3611254918Sraj
3612254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
3613254918Sraj	KASSERT((pa & L1_S_OFFSET) == 0,
3614254918Sraj	    ("pmap_pv_promote_section: pa is not 1mpage aligned"));
3615254918Sraj
3616254918Sraj	/*
3617254918Sraj	 * Transfer the first page's pv entry for this mapping to the
3618254918Sraj	 * 1mpage's pv list.  Aside from avoiding the cost of a call
3619254918Sraj	 * to get_pv_entry(), a transfer avoids the possibility that
3620254918Sraj	 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim()
3621254918Sraj	 * removes one of the mappings that is being promoted.
3622254918Sraj	 */
3623254918Sraj	m = PHYS_TO_VM_PAGE(pa);
3624254918Sraj	va = trunc_1mpage(va);
3625254918Sraj	pv = pmap_pvh_remove(&m->md, pmap, va);
3626254918Sraj	KASSERT(pv != NULL, ("pmap_pv_promote_section: pv not found"));
3627254918Sraj	pvh = pa_to_pvh(pa);
3628254918Sraj	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
3629254918Sraj	/* Free the remaining pv entries in the newly mapped section pages */
3630254918Sraj	va_last = L2_NEXT_BUCKET(va) - PAGE_SIZE;
3631254918Sraj	do {
3632254918Sraj		m++;
3633254918Sraj		va += PAGE_SIZE;
3634254918Sraj		/*
3635254918Sraj		 * Don't care the flags, first pv contains sufficient
3636254918Sraj		 * information for all of the pages so nothing is really lost.
3637254918Sraj		 */
3638254918Sraj		pmap_pvh_free(&m->md, pmap, va);
3639254918Sraj	} while (va < va_last);
3640254918Sraj}
3641254918Sraj
3642254918Sraj/*
3643254918Sraj * Tries to create a 1MB page mapping.  Returns TRUE if successful and
3644254918Sraj * FALSE otherwise.  Fails if (1) page is unmanageg, kernel pmap or vectors
3645254918Sraj * page, (2) a mapping already exists at the specified virtual address, or
3646254918Sraj * (3) a pv entry cannot be allocated without reclaiming another pv entry.
3647254918Sraj */
3648254918Srajstatic boolean_t
3649254918Srajpmap_enter_section(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3650254918Sraj{
3651254918Sraj	pd_entry_t *pl1pd;
3652254918Sraj	vm_offset_t pa;
3653254918Sraj	struct l2_bucket *l2b;
3654254918Sraj
3655254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
3656254918Sraj	PMAP_ASSERT_LOCKED(pmap);
3657254918Sraj
3658254918Sraj	/* Skip kernel, vectors page and unmanaged mappings */
3659254918Sraj	if ((pmap == pmap_kernel()) || (L1_IDX(va) == L1_IDX(vector_page)) ||
3660254918Sraj	    ((m->oflags & VPO_UNMANAGED) != 0)) {
3661254918Sraj		CTR2(KTR_PMAP, "pmap_enter_section: failure for va %#lx"
3662254918Sraj		    " in pmap %p", va, pmap);
3663254918Sraj		return (FALSE);
3664254918Sraj	}
3665254918Sraj	/*
3666254918Sraj	 * Check whether this is a valid section superpage entry or
3667254918Sraj	 * there is a l2_bucket associated with that L1 page directory.
3668254918Sraj	 */
3669254918Sraj	va = trunc_1mpage(va);
3670254918Sraj	pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
3671254918Sraj	l2b = pmap_get_l2_bucket(pmap, va);
3672254918Sraj	if ((*pl1pd & L1_S_PROTO) || (l2b != NULL)) {
3673254918Sraj		CTR2(KTR_PMAP, "pmap_enter_section: failure for va %#lx"
3674254918Sraj		    " in pmap %p", va, pmap);
3675254918Sraj		return (FALSE);
3676254918Sraj	}
3677254918Sraj	pa = VM_PAGE_TO_PHYS(m);
3678254918Sraj	/*
3679254918Sraj	 * Abort this mapping if its PV entry could not be created.
3680254918Sraj	 */
3681254918Sraj	if (!pmap_pv_insert_section(pmap, va, VM_PAGE_TO_PHYS(m))) {
3682254918Sraj		CTR2(KTR_PMAP, "pmap_enter_section: failure for va %#lx"
3683254918Sraj		    " in pmap %p", va, pmap);
3684254918Sraj		return (FALSE);
3685254918Sraj	}
3686254918Sraj	/*
3687254918Sraj	 * Increment counters.
3688254918Sraj	 */
3689254918Sraj	pmap->pm_stats.resident_count += L2_PTE_NUM_TOTAL;
3690254918Sraj	/*
3691254918Sraj	 * Despite permissions, mark the superpage read-only.
3692254918Sraj	 */
3693254918Sraj	prot &= ~VM_PROT_WRITE;
3694254918Sraj	/*
3695254918Sraj	 * Map the superpage.
3696254918Sraj	 */
3697254918Sraj	pmap_map_section(pmap, va, pa, prot, FALSE);
3698254918Sraj
3699254918Sraj	pmap_section_mappings++;
3700254918Sraj	CTR2(KTR_PMAP, "pmap_enter_section: success for va %#lx"
3701254918Sraj	    " in pmap %p", va, pmap);
3702254918Sraj	return (TRUE);
3703254918Sraj}
3704254918Sraj
3705254918Sraj/*
3706254918Sraj * pmap_remove_section: do the things to unmap a superpage in a process
3707254918Sraj */
3708254918Srajstatic void
3709254918Srajpmap_remove_section(pmap_t pmap, vm_offset_t sva)
3710254918Sraj{
3711254918Sraj	struct md_page *pvh;
3712254918Sraj	struct l2_bucket *l2b;
3713254918Sraj	pd_entry_t *pl1pd, l1pd;
3714254918Sraj	vm_offset_t eva, va;
3715254918Sraj	vm_page_t m;
3716254918Sraj
3717254918Sraj	PMAP_ASSERT_LOCKED(pmap);
3718254918Sraj	if ((pmap == pmap_kernel()) || (L1_IDX(sva) == L1_IDX(vector_page)))
3719254918Sraj		return;
3720254918Sraj
3721254918Sraj	KASSERT((sva & L1_S_OFFSET) == 0,
3722254918Sraj	    ("pmap_remove_section: sva is not 1mpage aligned"));
3723254918Sraj
3724254918Sraj	pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
3725254918Sraj	l1pd = *pl1pd;
3726254918Sraj
3727254918Sraj	m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME);
3728254918Sraj	KASSERT((m != NULL && ((m->oflags & VPO_UNMANAGED) == 0)),
3729254918Sraj	    ("pmap_remove_section: no corresponding vm_page or "
3730254918Sraj	    "page unmanaged"));
3731254918Sraj
3732254918Sraj	pmap->pm_stats.resident_count -= L2_PTE_NUM_TOTAL;
3733254918Sraj	pvh = pa_to_pvh(l1pd & L1_S_FRAME);
3734254918Sraj	pmap_pvh_free(pvh, pmap, sva);
3735254918Sraj	eva = L2_NEXT_BUCKET(sva);
3736254918Sraj	for (va = sva, m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME);
3737254918Sraj	    va < eva; va += PAGE_SIZE, m++) {
3738254918Sraj		/*
3739254918Sraj		 * Mark base pages referenced but skip marking them dirty.
3740254918Sraj		 * If the superpage is writeable, hence all base pages were
3741254918Sraj		 * already marked as dirty in pmap_fault_fixup() before
3742254918Sraj		 * promotion. Reference bit however, might not have been set
3743254918Sraj		 * for each base page when the superpage was created at once,
3744254918Sraj		 * not as a result of promotion.
3745254918Sraj		 */
3746254918Sraj		if (L1_S_REFERENCED(l1pd))
3747254918Sraj			vm_page_aflag_set(m, PGA_REFERENCED);
3748254918Sraj		if (TAILQ_EMPTY(&m->md.pv_list) &&
3749254918Sraj		    TAILQ_EMPTY(&pvh->pv_list))
3750254918Sraj			vm_page_aflag_clear(m, PGA_WRITEABLE);
3751254918Sraj	}
3752254918Sraj
3753254918Sraj	l2b = pmap_get_l2_bucket(pmap, sva);
3754254918Sraj	if (l2b != NULL) {
3755254918Sraj		KASSERT(l2b->l2b_occupancy == L2_PTE_NUM_TOTAL,
3756254918Sraj		    ("pmap_remove_section: l2_bucket occupancy error"));
3757254918Sraj		pmap_free_l2_bucket(pmap, l2b, L2_PTE_NUM_TOTAL);
3758254918Sraj	}
3759266199Sian	/* Now invalidate L1 slot */
3760266199Sian	*pl1pd = 0;
3761266199Sian	PTE_SYNC(pl1pd);
3762266199Sian	if (L1_S_EXECUTABLE(l1pd))
3763266199Sian		cpu_tlb_flushID_SE(sva);
3764266199Sian	else
3765266199Sian		cpu_tlb_flushD_SE(sva);
3766266353Sian	cpu_cpwait();
3767254918Sraj}
3768254918Sraj
3769254918Sraj/*
3770254918Sraj * Tries to promote the 256, contiguous 4KB page mappings that are
3771254918Sraj * within a single l2_bucket to a single 1MB section mapping.
3772254918Sraj * For promotion to occur, two conditions must be met: (1) the 4KB page
3773254918Sraj * mappings must map aligned, contiguous physical memory and (2) the 4KB page
3774254918Sraj * mappings must have identical characteristics.
3775254918Sraj */
3776254918Srajstatic void
3777254918Srajpmap_promote_section(pmap_t pmap, vm_offset_t va)
3778254918Sraj{
3779254918Sraj	pt_entry_t *firstptep, firstpte, oldpte, pa, *pte;
3780254918Sraj	vm_page_t m, oldm;
3781254918Sraj	vm_offset_t first_va, old_va;
3782254918Sraj	struct l2_bucket *l2b = NULL;
3783254918Sraj	vm_prot_t prot;
3784254918Sraj	struct pv_entry *pve, *first_pve;
3785254918Sraj
3786254918Sraj	PMAP_ASSERT_LOCKED(pmap);
3787254918Sraj
3788254918Sraj	prot = VM_PROT_ALL;
3789254918Sraj	/*
3790254918Sraj	 * Skip promoting kernel pages. This is justified by following:
3791254918Sraj	 * 1. Kernel is already mapped using section mappings in each pmap
3792254918Sraj	 * 2. Managed mappings within the kernel are not to be promoted anyway
3793254918Sraj	 */
3794254918Sraj	if (pmap == pmap_kernel()) {
3795254918Sraj		pmap_section_p_failures++;
3796254918Sraj		CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
3797254918Sraj		    " in pmap %p", va, pmap);
3798254918Sraj		return;
3799254918Sraj	}
3800254918Sraj	/* Do not attemp to promote vectors pages */
3801254918Sraj	if (L1_IDX(va) == L1_IDX(vector_page)) {
3802254918Sraj		pmap_section_p_failures++;
3803254918Sraj		CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
3804254918Sraj		    " in pmap %p", va, pmap);
3805254918Sraj		return;
3806254918Sraj	}
3807254918Sraj	/*
3808254918Sraj	 * Examine the first PTE in the specified l2_bucket. Abort if this PTE
3809254918Sraj	 * is either invalid, unused, or does not map the first 4KB physical
3810254918Sraj	 * page within 1MB page.
3811254918Sraj	 */
3812254918Sraj	first_va = trunc_1mpage(va);
3813254918Sraj	l2b = pmap_get_l2_bucket(pmap, first_va);
3814254918Sraj	KASSERT(l2b != NULL, ("pmap_promote_section: trying to promote "
3815254918Sraj	    "not existing l2 bucket"));
3816254918Sraj	firstptep = &l2b->l2b_kva[0];
3817254918Sraj
3818254918Sraj	firstpte = *firstptep;
3819254918Sraj	if ((l2pte_pa(firstpte) & L1_S_OFFSET) != 0) {
3820254918Sraj		pmap_section_p_failures++;
3821254918Sraj		CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
3822254918Sraj		    " in pmap %p", va, pmap);
3823254918Sraj		return;
3824254918Sraj	}
3825254918Sraj
3826254918Sraj	if ((firstpte & (L2_S_PROTO | L2_S_REF)) != (L2_S_PROTO | L2_S_REF)) {
3827254918Sraj		pmap_section_p_failures++;
3828254918Sraj		CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
3829254918Sraj		    " in pmap %p", va, pmap);
3830254918Sraj		return;
3831254918Sraj	}
3832254918Sraj	/*
3833254918Sraj	 * ARM uses pv_entry to mark particular mapping WIRED so don't promote
3834254918Sraj	 * unmanaged pages since it is impossible to determine, whether the
3835254918Sraj	 * page is wired or not if there is no corresponding pv_entry.
3836254918Sraj	 */
3837254918Sraj	m = PHYS_TO_VM_PAGE(l2pte_pa(firstpte));
3838254918Sraj	if (m && ((m->oflags & VPO_UNMANAGED) != 0)) {
3839254918Sraj		pmap_section_p_failures++;
3840254918Sraj		CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
3841254918Sraj		    " in pmap %p", va, pmap);
3842254918Sraj		return;
3843254918Sraj	}
3844254918Sraj	first_pve = pmap_find_pv(&m->md, pmap, first_va);
3845254918Sraj	/*
3846254918Sraj	 * PTE is modified only on write due to modified bit
3847254918Sraj	 * emulation. If the entry is referenced and writable
3848254918Sraj	 * then it is modified and we don't clear write enable.
3849254918Sraj	 * Otherwise, writing is disabled in PTE anyway and
3850254918Sraj	 * we just configure protections for the section mapping
3851254918Sraj	 * that is going to be created.
3852254918Sraj	 */
3853266199Sian	if ((first_pve->pv_flags & PVF_WRITE) != 0) {
3854266199Sian		if (!L2_S_WRITABLE(firstpte)) {
3855266199Sian			first_pve->pv_flags &= ~PVF_WRITE;
3856266199Sian			prot &= ~VM_PROT_WRITE;
3857266199Sian		}
3858266199Sian	} else
3859254918Sraj		prot &= ~VM_PROT_WRITE;
3860254918Sraj
3861254918Sraj	if (!L2_S_EXECUTABLE(firstpte))
3862254918Sraj		prot &= ~VM_PROT_EXECUTE;
3863254918Sraj
3864254918Sraj	/*
3865254918Sraj	 * Examine each of the other PTEs in the specified l2_bucket.
3866254918Sraj	 * Abort if this PTE maps an unexpected 4KB physical page or
3867254918Sraj	 * does not have identical characteristics to the first PTE.
3868254918Sraj	 */
3869254918Sraj	pa = l2pte_pa(firstpte) + ((L2_PTE_NUM_TOTAL - 1) * PAGE_SIZE);
3870254918Sraj	old_va = L2_NEXT_BUCKET(first_va) - PAGE_SIZE;
3871254918Sraj
3872254918Sraj	for (pte = (firstptep + L2_PTE_NUM_TOTAL - 1); pte > firstptep; pte--) {
3873254918Sraj		oldpte = *pte;
3874254918Sraj		if (l2pte_pa(oldpte) != pa) {
3875254918Sraj			pmap_section_p_failures++;
3876254918Sraj			CTR2(KTR_PMAP, "pmap_promote_section: failure for "
3877254918Sraj			    "va %#x in pmap %p", va, pmap);
3878254918Sraj			return;
3879254918Sraj		}
3880254918Sraj		if ((oldpte & L2_S_PROMOTE) != (firstpte & L2_S_PROMOTE)) {
3881254918Sraj			pmap_section_p_failures++;
3882254918Sraj			CTR2(KTR_PMAP, "pmap_promote_section: failure for "
3883254918Sraj			    "va %#x in pmap %p", va, pmap);
3884254918Sraj			return;
3885254918Sraj		}
3886254918Sraj		oldm = PHYS_TO_VM_PAGE(l2pte_pa(oldpte));
3887254918Sraj		if (oldm && ((oldm->oflags & VPO_UNMANAGED) != 0)) {
3888254918Sraj			pmap_section_p_failures++;
3889254918Sraj			CTR2(KTR_PMAP, "pmap_promote_section: failure for "
3890254918Sraj			    "va %#x in pmap %p", va, pmap);
3891254918Sraj			return;
3892254918Sraj		}
3893254918Sraj
3894254918Sraj		pve = pmap_find_pv(&oldm->md, pmap, old_va);
3895254918Sraj		if (pve == NULL) {
3896254918Sraj			pmap_section_p_failures++;
3897254918Sraj			CTR2(KTR_PMAP, "pmap_promote_section: failure for "
3898254918Sraj			    "va %#x old_va  %x - no pve", va, old_va);
3899254918Sraj			return;
3900254918Sraj		}
3901254918Sraj
3902254918Sraj		if (!L2_S_WRITABLE(oldpte) && (pve->pv_flags & PVF_WRITE))
3903254918Sraj			pve->pv_flags &= ~PVF_WRITE;
3904266199Sian		if (pve->pv_flags != first_pve->pv_flags) {
3905266199Sian			pmap_section_p_failures++;
3906266199Sian			CTR2(KTR_PMAP, "pmap_promote_section: failure for "
3907266199Sian			    "va %#x in pmap %p", va, pmap);
3908266199Sian			return;
3909266199Sian		}
3910254918Sraj
3911254918Sraj		old_va -= PAGE_SIZE;
3912254918Sraj		pa -= PAGE_SIZE;
3913254918Sraj	}
3914254918Sraj	/*
3915254918Sraj	 * Promote the pv entries.
3916254918Sraj	 */
3917254918Sraj	pmap_pv_promote_section(pmap, first_va, l2pte_pa(firstpte));
3918254918Sraj	/*
3919254918Sraj	 * Map the superpage.
3920254918Sraj	 */
3921254918Sraj	pmap_map_section(pmap, first_va, l2pte_pa(firstpte), prot, TRUE);
3922266199Sian	/*
3923266199Sian	 * Invalidate all possible TLB mappings for small
3924266199Sian	 * pages within the newly created superpage.
3925266199Sian	 * Rely on the first PTE's attributes since they
3926266199Sian	 * have to be consistent across all of the base pages
3927266199Sian	 * within the superpage. If page is not executable it
3928266199Sian	 * is at least referenced.
3929266199Sian	 * The fastest way to do that is to invalidate whole
3930266199Sian	 * TLB at once instead of executing 256 CP15 TLB
3931266199Sian	 * invalidations by single entry. TLBs usually maintain
3932266199Sian	 * several dozen entries so loss of unrelated entries is
3933266199Sian	 * still a less agresive approach.
3934266199Sian	 */
3935266199Sian	if (L2_S_EXECUTABLE(firstpte))
3936266199Sian		cpu_tlb_flushID();
3937266199Sian	else
3938266199Sian		cpu_tlb_flushD();
3939266353Sian	cpu_cpwait();
3940266199Sian
3941254918Sraj	pmap_section_promotions++;
3942254918Sraj	CTR2(KTR_PMAP, "pmap_promote_section: success for va %#x"
3943254918Sraj	    " in pmap %p", first_va, pmap);
3944254918Sraj}
3945254918Sraj
3946254918Sraj/*
3947254918Sraj * Fills a l2_bucket with mappings to consecutive physical pages.
3948254918Sraj */
3949254918Srajstatic void
3950254918Srajpmap_fill_l2b(struct l2_bucket *l2b, pt_entry_t newpte)
3951254918Sraj{
3952254918Sraj	pt_entry_t *ptep;
3953254918Sraj	int i;
3954254918Sraj
3955254918Sraj	for (i = 0; i < L2_PTE_NUM_TOTAL; i++) {
3956254918Sraj		ptep = &l2b->l2b_kva[i];
3957254918Sraj		*ptep = newpte;
3958254918Sraj		PTE_SYNC(ptep);
3959254918Sraj
3960254918Sraj		newpte += PAGE_SIZE;
3961254918Sraj	}
3962254918Sraj
3963254918Sraj	l2b->l2b_occupancy = L2_PTE_NUM_TOTAL;
3964254918Sraj}
3965254918Sraj
3966254918Sraj/*
3967254918Sraj * Tries to demote a 1MB section mapping. If demotion fails, the
3968254918Sraj * 1MB section mapping is invalidated.
3969254918Sraj */
3970254918Srajstatic boolean_t
3971254918Srajpmap_demote_section(pmap_t pmap, vm_offset_t va)
3972254918Sraj{
3973254918Sraj	struct l2_bucket *l2b;
3974254918Sraj	struct pv_entry *l1pdpve;
3975254918Sraj	struct md_page *pvh;
3976266199Sian	pd_entry_t *pl1pd, l1pd, newl1pd;
3977254918Sraj	pt_entry_t *firstptep, newpte;
3978254918Sraj	vm_offset_t pa;
3979254918Sraj	vm_page_t m;
3980254918Sraj
3981254918Sraj	PMAP_ASSERT_LOCKED(pmap);
3982254918Sraj	/*
3983254918Sraj	 * According to assumptions described in pmap_promote_section,
3984254918Sraj	 * kernel is and always should be mapped using 1MB section mappings.
3985254918Sraj	 * What more, managed kernel pages were not to be promoted.
3986254918Sraj	 */
3987254918Sraj	KASSERT(pmap != pmap_kernel() && L1_IDX(va) != L1_IDX(vector_page),
3988254918Sraj	    ("pmap_demote_section: forbidden section mapping"));
3989254918Sraj
3990254918Sraj	va = trunc_1mpage(va);
3991254918Sraj	pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
3992254918Sraj	l1pd = *pl1pd;
3993254918Sraj	KASSERT((l1pd & L1_TYPE_MASK) == L1_S_PROTO,
3994254918Sraj	    ("pmap_demote_section: not section or invalid section"));
3995254918Sraj
3996254918Sraj	pa = l1pd & L1_S_FRAME;
3997254918Sraj	m = PHYS_TO_VM_PAGE(pa);
3998254918Sraj	KASSERT((m != NULL && (m->oflags & VPO_UNMANAGED) == 0),
3999254918Sraj	    ("pmap_demote_section: no vm_page for selected superpage or"
4000254918Sraj	     "unmanaged"));
4001254918Sraj
4002254918Sraj	pvh = pa_to_pvh(pa);
4003254918Sraj	l1pdpve = pmap_find_pv(pvh, pmap, va);
4004254918Sraj	KASSERT(l1pdpve != NULL, ("pmap_demote_section: no pv entry for "
4005254918Sraj	    "managed page"));
4006254918Sraj
4007254918Sraj	l2b = pmap_get_l2_bucket(pmap, va);
4008254918Sraj	if (l2b == NULL) {
4009254918Sraj		KASSERT((l1pdpve->pv_flags & PVF_WIRED) == 0,
4010254918Sraj		    ("pmap_demote_section: No l2_bucket for wired mapping"));
4011254918Sraj		/*
4012254918Sraj		 * Invalidate the 1MB section mapping and return
4013254918Sraj		 * "failure" if the mapping was never accessed or the
4014254918Sraj		 * allocation of the new l2_bucket fails.
4015254918Sraj		 */
4016254918Sraj		if (!L1_S_REFERENCED(l1pd) ||
4017254918Sraj		    (l2b = pmap_alloc_l2_bucket(pmap, va)) == NULL) {
4018254918Sraj			/* Unmap and invalidate superpage. */
4019254918Sraj			pmap_remove_section(pmap, trunc_1mpage(va));
4020254918Sraj			CTR2(KTR_PMAP, "pmap_demote_section: failure for "
4021254918Sraj			    "va %#x in pmap %p", va, pmap);
4022254918Sraj			return (FALSE);
4023254918Sraj		}
4024254918Sraj	}
4025254918Sraj
4026254918Sraj	/*
4027254918Sraj	 * Now we should have corresponding l2_bucket available.
4028254918Sraj	 * Let's process it to recreate 256 PTEs for each base page
4029254918Sraj	 * within superpage.
4030254918Sraj	 */
4031254918Sraj	newpte = pa | L1_S_DEMOTE(l1pd);
4032254918Sraj	if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
4033254918Sraj		newpte |= pte_l2_s_cache_mode;
4034254918Sraj
4035254918Sraj	/*
4036254918Sraj	 * If the l2_bucket is new, initialize it.
4037254918Sraj	 */
4038254918Sraj	if (l2b->l2b_occupancy == 0)
4039254918Sraj		pmap_fill_l2b(l2b, newpte);
4040254918Sraj	else {
4041254918Sraj		firstptep = &l2b->l2b_kva[0];
4042254918Sraj		KASSERT(l2pte_pa(*firstptep) == (pa),
4043254918Sraj		    ("pmap_demote_section: firstpte and newpte map different "
4044254918Sraj		     "physical addresses"));
4045254918Sraj		/*
4046254918Sraj		 * If the mapping has changed attributes, update the page table
4047254918Sraj		 * entries.
4048254918Sraj		 */
4049254918Sraj		if ((*firstptep & L2_S_PROMOTE) != (L1_S_DEMOTE(l1pd)))
4050254918Sraj			pmap_fill_l2b(l2b, newpte);
4051254918Sraj	}
4052254918Sraj	/* Demote PV entry */
4053254918Sraj	pmap_pv_demote_section(pmap, va, pa);
4054254918Sraj
4055254918Sraj	/* Now fix-up L1 */
4056266199Sian	newl1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
4057266199Sian	*pl1pd = newl1pd;
4058254918Sraj	PTE_SYNC(pl1pd);
4059266199Sian	/* Invalidate old TLB mapping */
4060266199Sian	if (L1_S_EXECUTABLE(l1pd))
4061266199Sian		cpu_tlb_flushID_SE(va);
4062266199Sian	else if (L1_S_REFERENCED(l1pd))
4063266199Sian		cpu_tlb_flushD_SE(va);
4064266353Sian	cpu_cpwait();
4065254918Sraj
4066254918Sraj	pmap_section_demotions++;
4067254918Sraj	CTR2(KTR_PMAP, "pmap_demote_section: success for va %#x"
4068254918Sraj	    " in pmap %p", va, pmap);
4069254918Sraj	return (TRUE);
4070254918Sraj}
4071254918Sraj
4072254918Sraj/***************************************************
4073239268Sgonzo * page management routines.
4074239268Sgonzo ***************************************************/
4075239268Sgonzo
4076250634Sgber/*
4077250634Sgber * We are in a serious low memory condition.  Resort to
4078250634Sgber * drastic measures to free some pages so we can allocate
4079250634Sgber * another pv entry chunk.
4080250634Sgber */
4081250634Sgberstatic vm_page_t
4082250634Sgberpmap_pv_reclaim(pmap_t locked_pmap)
4083250634Sgber{
4084250634Sgber	struct pch newtail;
4085250634Sgber	struct pv_chunk *pc;
4086250634Sgber	struct l2_bucket *l2b = NULL;
4087250634Sgber	pmap_t pmap;
4088254918Sraj	pd_entry_t *pl1pd;
4089250929Sgber	pt_entry_t *ptep;
4090250634Sgber	pv_entry_t pv;
4091250634Sgber	vm_offset_t va;
4092250634Sgber	vm_page_t free, m, m_pc;
4093250634Sgber	uint32_t inuse;
4094250634Sgber	int bit, field, freed, idx;
4095239268Sgonzo
4096250634Sgber	PMAP_ASSERT_LOCKED(locked_pmap);
4097250634Sgber	pmap = NULL;
4098250634Sgber	free = m_pc = NULL;
4099250634Sgber	TAILQ_INIT(&newtail);
4100250634Sgber	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
4101250634Sgber	    free == NULL)) {
4102250634Sgber		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
4103250634Sgber		if (pmap != pc->pc_pmap) {
4104250634Sgber			if (pmap != NULL) {
4105250634Sgber				cpu_tlb_flushID();
4106250634Sgber				cpu_cpwait();
4107250634Sgber				if (pmap != locked_pmap)
4108250634Sgber					PMAP_UNLOCK(pmap);
4109250634Sgber			}
4110250634Sgber			pmap = pc->pc_pmap;
4111250634Sgber			/* Avoid deadlock and lock recursion. */
4112250634Sgber			if (pmap > locked_pmap)
4113250634Sgber				PMAP_LOCK(pmap);
4114250634Sgber			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
4115250634Sgber				pmap = NULL;
4116250634Sgber				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
4117250634Sgber				continue;
4118250634Sgber			}
4119250634Sgber		}
4120250634Sgber
4121250634Sgber		/*
4122250634Sgber		 * Destroy every non-wired, 4 KB page mapping in the chunk.
4123250634Sgber		 */
4124250634Sgber		freed = 0;
4125250634Sgber		for (field = 0; field < _NPCM; field++) {
4126250634Sgber			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
4127250634Sgber			    inuse != 0; inuse &= ~(1UL << bit)) {
4128250634Sgber				bit = ffs(inuse) - 1;
4129250634Sgber				idx = field * sizeof(inuse) * NBBY + bit;
4130250634Sgber				pv = &pc->pc_pventry[idx];
4131254918Sraj				va = pv->pv_va;
4132254918Sraj
4133254918Sraj				pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
4134254918Sraj				if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
4135254918Sraj					continue;
4136250634Sgber				if (pv->pv_flags & PVF_WIRED)
4137250634Sgber					continue;
4138250634Sgber
4139250634Sgber				l2b = pmap_get_l2_bucket(pmap, va);
4140250634Sgber				KASSERT(l2b != NULL, ("No l2 bucket"));
4141250929Sgber				ptep = &l2b->l2b_kva[l2pte_index(va)];
4142250929Sgber				m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
4143250634Sgber				KASSERT((vm_offset_t)m >= KERNBASE,
4144250634Sgber				    ("Trying to access non-existent page "
4145253052Semaste				     "va %x pte %x", va, *ptep));
4146250929Sgber				*ptep = 0;
4147250929Sgber				PTE_SYNC(ptep);
4148254531Sraj				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
4149254531Sraj				if (TAILQ_EMPTY(&m->md.pv_list))
4150254531Sraj					vm_page_aflag_clear(m, PGA_WRITEABLE);
4151250634Sgber				pc->pc_map[field] |= 1UL << bit;
4152250634Sgber				freed++;
4153250634Sgber			}
4154250634Sgber		}
4155250634Sgber
4156250634Sgber		if (freed == 0) {
4157250634Sgber			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
4158250634Sgber			continue;
4159250634Sgber		}
4160250634Sgber		/* Every freed mapping is for a 4 KB page. */
4161250634Sgber		pmap->pm_stats.resident_count -= freed;
4162250634Sgber		PV_STAT(pv_entry_frees += freed);
4163250634Sgber		PV_STAT(pv_entry_spare += freed);
4164250634Sgber		pv_entry_count -= freed;
4165250634Sgber		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4166250634Sgber		for (field = 0; field < _NPCM; field++)
4167250634Sgber			if (pc->pc_map[field] != pc_freemask[field]) {
4168250634Sgber				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
4169250634Sgber				    pc_list);
4170250634Sgber				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
4171250634Sgber
4172250634Sgber				/*
4173250634Sgber				 * One freed pv entry in locked_pmap is
4174250634Sgber				 * sufficient.
4175250634Sgber				 */
4176250634Sgber				if (pmap == locked_pmap)
4177250634Sgber					goto out;
4178250634Sgber				break;
4179250634Sgber			}
4180250634Sgber		if (field == _NPCM) {
4181250634Sgber			PV_STAT(pv_entry_spare -= _NPCPV);
4182250634Sgber			PV_STAT(pc_chunk_count--);
4183250634Sgber			PV_STAT(pc_chunk_frees++);
4184250634Sgber			/* Entire chunk is free; return it. */
4185250634Sgber			m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
4186250634Sgber			pmap_qremove((vm_offset_t)pc, 1);
4187250634Sgber			pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
4188250634Sgber			break;
4189250634Sgber		}
4190250634Sgber	}
4191250634Sgberout:
4192250634Sgber	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
4193250634Sgber	if (pmap != NULL) {
4194250634Sgber		cpu_tlb_flushID();
4195250634Sgber		cpu_cpwait();
4196250634Sgber		if (pmap != locked_pmap)
4197250634Sgber			PMAP_UNLOCK(pmap);
4198250634Sgber	}
4199250634Sgber	return (m_pc);
4200250634Sgber}
4201250634Sgber
4202250634Sgber/*
4203250634Sgber * free the pv_entry back to the free list
4204250634Sgber */
4205239268Sgonzostatic void
4206250634Sgberpmap_free_pv_entry(pmap_t pmap, pv_entry_t pv)
4207239268Sgonzo{
4208250634Sgber	struct pv_chunk *pc;
4209250634Sgber	int bit, field, idx;
4210250634Sgber
4211250634Sgber	rw_assert(&pvh_global_lock, RA_WLOCKED);
4212250634Sgber	PMAP_ASSERT_LOCKED(pmap);
4213250634Sgber	PV_STAT(pv_entry_frees++);
4214250634Sgber	PV_STAT(pv_entry_spare++);
4215239268Sgonzo	pv_entry_count--;
4216250634Sgber	pc = pv_to_chunk(pv);
4217250634Sgber	idx = pv - &pc->pc_pventry[0];
4218250634Sgber	field = idx / (sizeof(u_long) * NBBY);
4219250634Sgber	bit = idx % (sizeof(u_long) * NBBY);
4220250634Sgber	pc->pc_map[field] |= 1ul << bit;
4221250634Sgber	for (idx = 0; idx < _NPCM; idx++)
4222250634Sgber		if (pc->pc_map[idx] != pc_freemask[idx]) {
4223250634Sgber			/*
4224250634Sgber			 * 98% of the time, pc is already at the head of the
4225250634Sgber			 * list.  If it isn't already, move it to the head.
4226250634Sgber			 */
4227250634Sgber			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
4228250634Sgber			    pc)) {
4229250634Sgber				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4230250634Sgber				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
4231250634Sgber				    pc_list);
4232250634Sgber			}
4233250634Sgber			return;
4234250634Sgber		}
4235250634Sgber	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4236250634Sgber	pmap_free_pv_chunk(pc);
4237239268Sgonzo}
4238239268Sgonzo
4239250634Sgberstatic void
4240250634Sgberpmap_free_pv_chunk(struct pv_chunk *pc)
4241250634Sgber{
4242250634Sgber	vm_page_t m;
4243239268Sgonzo
4244250634Sgber	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
4245250634Sgber	PV_STAT(pv_entry_spare -= _NPCPV);
4246250634Sgber	PV_STAT(pc_chunk_count--);
4247250634Sgber	PV_STAT(pc_chunk_frees++);
4248250634Sgber	/* entire chunk is free, return it */
4249250634Sgber	m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
4250250634Sgber	pmap_qremove((vm_offset_t)pc, 1);
4251250634Sgber	vm_page_unwire(m, 0);
4252250634Sgber	vm_page_free(m);
4253250634Sgber	pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
4254250634Sgber
4255250634Sgber}
4256250634Sgber
4257239268Sgonzostatic pv_entry_t
4258250634Sgberpmap_get_pv_entry(pmap_t pmap, boolean_t try)
4259239268Sgonzo{
4260250634Sgber	static const struct timeval printinterval = { 60, 0 };
4261250634Sgber	static struct timeval lastprint;
4262250634Sgber	struct pv_chunk *pc;
4263250634Sgber	pv_entry_t pv;
4264250634Sgber	vm_page_t m;
4265250634Sgber	int bit, field, idx;
4266239268Sgonzo
4267250634Sgber	rw_assert(&pvh_global_lock, RA_WLOCKED);
4268250634Sgber	PMAP_ASSERT_LOCKED(pmap);
4269250634Sgber	PV_STAT(pv_entry_allocs++);
4270239268Sgonzo	pv_entry_count++;
4271250634Sgber
4272239268Sgonzo	if (pv_entry_count > pv_entry_high_water)
4273250634Sgber		if (ratecheck(&lastprint, &printinterval))
4274250634Sgber			printf("%s: Approaching the limit on PV entries.\n",
4275250634Sgber			    __func__);
4276250634Sgberretry:
4277250634Sgber	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
4278250634Sgber	if (pc != NULL) {
4279250634Sgber		for (field = 0; field < _NPCM; field++) {
4280250634Sgber			if (pc->pc_map[field]) {
4281250634Sgber				bit = ffs(pc->pc_map[field]) - 1;
4282250634Sgber				break;
4283250634Sgber			}
4284250634Sgber		}
4285250634Sgber		if (field < _NPCM) {
4286250634Sgber			idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
4287250634Sgber			pv = &pc->pc_pventry[idx];
4288250634Sgber			pc->pc_map[field] &= ~(1ul << bit);
4289250634Sgber			/* If this was the last item, move it to tail */
4290250634Sgber			for (field = 0; field < _NPCM; field++)
4291250634Sgber				if (pc->pc_map[field] != 0) {
4292250634Sgber					PV_STAT(pv_entry_spare--);
4293250634Sgber					return (pv);	/* not full, return */
4294250634Sgber				}
4295250634Sgber			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4296250634Sgber			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
4297250634Sgber			PV_STAT(pv_entry_spare--);
4298250634Sgber			return (pv);
4299250634Sgber		}
4300250634Sgber	}
4301250634Sgber	/*
4302250634Sgber	 * Access to the ptelist "pv_vafree" is synchronized by the pvh
4303250634Sgber	 * global lock.  If "pv_vafree" is currently non-empty, it will
4304250634Sgber	 * remain non-empty until pmap_ptelist_alloc() completes.
4305250634Sgber	 */
4306250634Sgber	if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
4307250634Sgber	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
4308250634Sgber		if (try) {
4309250634Sgber			pv_entry_count--;
4310250634Sgber			PV_STAT(pc_chunk_tryfail++);
4311250634Sgber			return (NULL);
4312250634Sgber		}
4313250634Sgber		m = pmap_pv_reclaim(pmap);
4314250634Sgber		if (m == NULL)
4315250634Sgber			goto retry;
4316250634Sgber	}
4317250634Sgber	PV_STAT(pc_chunk_count++);
4318250634Sgber	PV_STAT(pc_chunk_allocs++);
4319250634Sgber	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
4320250634Sgber	pmap_qenter((vm_offset_t)pc, &m, 1);
4321250634Sgber	pc->pc_pmap = pmap;
4322250634Sgber	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
4323250634Sgber	for (field = 1; field < _NPCM; field++)
4324250634Sgber		pc->pc_map[field] = pc_freemask[field];
4325250634Sgber	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
4326250634Sgber	pv = &pc->pc_pventry[0];
4327250634Sgber	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
4328250634Sgber	PV_STAT(pv_entry_spare += _NPCPV - 1);
4329250634Sgber	return (pv);
4330239268Sgonzo}
4331239268Sgonzo
4332239268Sgonzo/*
4333239268Sgonzo *	Remove the given range of addresses from the specified map.
4334239268Sgonzo *
4335239268Sgonzo *	It is assumed that the start and end are properly
4336239268Sgonzo *	rounded to the page size.
4337239268Sgonzo */
4338239268Sgonzo#define	PMAP_REMOVE_CLEAN_LIST_SIZE	3
4339239268Sgonzovoid
4340250929Sgberpmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4341239268Sgonzo{
4342239268Sgonzo	struct l2_bucket *l2b;
4343239268Sgonzo	vm_offset_t next_bucket;
4344254918Sraj	pd_entry_t *pl1pd, l1pd;
4345239268Sgonzo	pt_entry_t *ptep;
4346239268Sgonzo	u_int total;
4347239268Sgonzo	u_int mappings, is_exec, is_refd;
4348239268Sgonzo	int flushall = 0;
4349239268Sgonzo
4350239268Sgonzo
4351239268Sgonzo	/*
4352239268Sgonzo	 * we lock in the pmap => pv_head direction
4353239268Sgonzo	 */
4354239268Sgonzo
4355240321Salc	rw_wlock(&pvh_global_lock);
4356250929Sgber	PMAP_LOCK(pmap);
4357239268Sgonzo	total = 0;
4358239268Sgonzo	while (sva < eva) {
4359239268Sgonzo		/*
4360254918Sraj		 * Check for large page.
4361254918Sraj		 */
4362254918Sraj		pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
4363254918Sraj		l1pd = *pl1pd;
4364254918Sraj		if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
4365254918Sraj			KASSERT((l1pd & L1_S_DOM_MASK) !=
4366254918Sraj			    L1_S_DOM(PMAP_DOMAIN_KERNEL), ("pmap_remove: "
4367254918Sraj			    "Trying to remove kernel section mapping"));
4368254918Sraj			/*
4369254918Sraj			 * Are we removing the entire large page?  If not,
4370254918Sraj			 * demote the mapping and fall through.
4371254918Sraj			 */
4372254918Sraj			if (sva + L1_S_SIZE == L2_NEXT_BUCKET(sva) &&
4373254918Sraj			    eva >= L2_NEXT_BUCKET(sva)) {
4374254918Sraj				pmap_remove_section(pmap, sva);
4375254918Sraj				sva = L2_NEXT_BUCKET(sva);
4376254918Sraj				continue;
4377254918Sraj			} else if (!pmap_demote_section(pmap, sva)) {
4378254918Sraj				/* The large page mapping was destroyed. */
4379254918Sraj				sva = L2_NEXT_BUCKET(sva);
4380254918Sraj				continue;
4381254918Sraj			}
4382254918Sraj		}
4383254918Sraj		/*
4384239268Sgonzo		 * Do one L2 bucket's worth at a time.
4385239268Sgonzo		 */
4386239268Sgonzo		next_bucket = L2_NEXT_BUCKET(sva);
4387239268Sgonzo		if (next_bucket > eva)
4388239268Sgonzo			next_bucket = eva;
4389239268Sgonzo
4390250929Sgber		l2b = pmap_get_l2_bucket(pmap, sva);
4391239268Sgonzo		if (l2b == NULL) {
4392239268Sgonzo			sva = next_bucket;
4393239268Sgonzo			continue;
4394239268Sgonzo		}
4395239268Sgonzo
4396239268Sgonzo		ptep = &l2b->l2b_kva[l2pte_index(sva)];
4397239268Sgonzo		mappings = 0;
4398239268Sgonzo
4399239268Sgonzo		while (sva < next_bucket) {
4400250929Sgber			struct vm_page *m;
4401239268Sgonzo			pt_entry_t pte;
4402239268Sgonzo			vm_paddr_t pa;
4403239268Sgonzo
4404239268Sgonzo			pte = *ptep;
4405239268Sgonzo
4406239268Sgonzo			if (pte == 0) {
4407239268Sgonzo				/*
4408239268Sgonzo				 * Nothing here, move along
4409239268Sgonzo				 */
4410239268Sgonzo				sva += PAGE_SIZE;
4411239268Sgonzo				ptep++;
4412239268Sgonzo				continue;
4413239268Sgonzo			}
4414239268Sgonzo
4415250929Sgber			pmap->pm_stats.resident_count--;
4416239268Sgonzo			pa = l2pte_pa(pte);
4417239268Sgonzo			is_exec = 0;
4418239268Sgonzo			is_refd = 1;
4419239268Sgonzo
4420239268Sgonzo			/*
4421239268Sgonzo			 * Update flags. In a number of circumstances,
4422239268Sgonzo			 * we could cluster a lot of these and do a
4423239268Sgonzo			 * number of sequential pages in one go.
4424239268Sgonzo			 */
4425250929Sgber			if ((m = PHYS_TO_VM_PAGE(pa)) != NULL) {
4426239268Sgonzo				struct pv_entry *pve;
4427239268Sgonzo
4428250929Sgber				pve = pmap_remove_pv(m, pmap, sva);
4429239268Sgonzo				if (pve) {
4430250930Sgber					is_exec = PTE_BEEN_EXECD(pte);
4431250930Sgber					is_refd = PTE_BEEN_REFD(pte);
4432250929Sgber					pmap_free_pv_entry(pmap, pve);
4433239268Sgonzo				}
4434239268Sgonzo			}
4435239268Sgonzo
4436266353Sian			*ptep = 0;
4437266353Sian			PTE_SYNC(ptep);
4438250929Sgber			if (pmap_is_current(pmap)) {
4439239268Sgonzo				total++;
4440239268Sgonzo				if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) {
4441239268Sgonzo					if (is_exec)
4442239268Sgonzo						cpu_tlb_flushID_SE(sva);
4443239268Sgonzo					else if (is_refd)
4444239268Sgonzo						cpu_tlb_flushD_SE(sva);
4445250929Sgber				} else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE)
4446239268Sgonzo					flushall = 1;
4447239268Sgonzo			}
4448239268Sgonzo
4449239268Sgonzo			sva += PAGE_SIZE;
4450239268Sgonzo			ptep++;
4451239268Sgonzo			mappings++;
4452239268Sgonzo		}
4453239268Sgonzo
4454250929Sgber		pmap_free_l2_bucket(pmap, l2b, mappings);
4455239268Sgonzo	}
4456239268Sgonzo
4457240321Salc	rw_wunlock(&pvh_global_lock);
4458239268Sgonzo	if (flushall)
4459239268Sgonzo		cpu_tlb_flushID();
4460266353Sian	cpu_cpwait();
4461266353Sian
4462250929Sgber	PMAP_UNLOCK(pmap);
4463239268Sgonzo}
4464239268Sgonzo
4465239268Sgonzo/*
4466239268Sgonzo * pmap_zero_page()
4467239268Sgonzo *
4468239268Sgonzo * Zero a given physical page by mapping it at a page hook point.
4469239268Sgonzo * In doing the zero page op, the page we zero is mapped cachable, as with
4470239268Sgonzo * StrongARM accesses to non-cached pages are non-burst making writing
4471239268Sgonzo * _any_ bulk data very slow.
4472239268Sgonzo */
4473239268Sgonzostatic void
4474250929Sgberpmap_zero_page_gen(vm_page_t m, int off, int size)
4475239268Sgonzo{
4476266353Sian	struct czpages *czp;
4477239268Sgonzo
4478266353Sian	KASSERT(TAILQ_EMPTY(&m->md.pv_list),
4479266353Sian	    ("pmap_zero_page_gen: page has mappings"));
4480266353Sian
4481250929Sgber	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
4482239268Sgonzo
4483266353Sian	sched_pin();
4484266353Sian	czp = &cpu_czpages[PCPU_GET(cpuid)];
4485266353Sian	mtx_lock(&czp->lock);
4486266353Sian
4487239268Sgonzo	/*
4488266353Sian	 * Hook in the page, zero it.
4489239268Sgonzo	 */
4490266353Sian	*czp->dstptep = L2_S_PROTO | phys | pte_l2_s_cache_mode | L2_S_REF;
4491266353Sian	pmap_set_prot(czp->dstptep, VM_PROT_WRITE, 0);
4492266353Sian	PTE_SYNC(czp->dstptep);
4493266353Sian	cpu_tlb_flushD_SE(czp->dstva);
4494239268Sgonzo	cpu_cpwait();
4495266353Sian
4496239268Sgonzo	if (off || size != PAGE_SIZE)
4497266353Sian		bzero((void *)(czp->dstva + off), size);
4498239268Sgonzo	else
4499266353Sian		bzero_page(czp->dstva);
4500239268Sgonzo
4501245146Sgonzo	/*
4502266353Sian	 * Although aliasing is not possible, if we use temporary mappings with
4503266353Sian	 * memory that will be mapped later as non-cached or with write-through
4504266353Sian	 * caches, we might end up overwriting it when calling wbinv_all.  So
4505266353Sian	 * make sure caches are clean after the operation.
4506245146Sgonzo	 */
4507266353Sian	cpu_idcache_wbinv_range(czp->dstva, size);
4508266353Sian	pmap_l2cache_wbinv_range(czp->dstva, phys, size);
4509245146Sgonzo
4510266353Sian	mtx_unlock(&czp->lock);
4511266353Sian	sched_unpin();
4512239268Sgonzo}
4513239268Sgonzo
4514239268Sgonzo/*
4515239268Sgonzo *	pmap_zero_page zeros the specified hardware page by mapping
4516239268Sgonzo *	the page into KVM and using bzero to clear its contents.
4517239268Sgonzo */
4518239268Sgonzovoid
4519239268Sgonzopmap_zero_page(vm_page_t m)
4520239268Sgonzo{
4521239268Sgonzo	pmap_zero_page_gen(m, 0, PAGE_SIZE);
4522239268Sgonzo}
4523239268Sgonzo
4524239268Sgonzo
4525239268Sgonzo/*
4526239268Sgonzo *	pmap_zero_page_area zeros the specified hardware page by mapping
4527239268Sgonzo *	the page into KVM and using bzero to clear its contents.
4528239268Sgonzo *
4529239268Sgonzo *	off and size may not cover an area beyond a single hardware page.
4530239268Sgonzo */
4531239268Sgonzovoid
4532239268Sgonzopmap_zero_page_area(vm_page_t m, int off, int size)
4533239268Sgonzo{
4534239268Sgonzo
4535239268Sgonzo	pmap_zero_page_gen(m, off, size);
4536239268Sgonzo}
4537239268Sgonzo
4538239268Sgonzo
4539239268Sgonzo/*
4540239268Sgonzo *	pmap_zero_page_idle zeros the specified hardware page by mapping
4541239268Sgonzo *	the page into KVM and using bzero to clear its contents.  This
4542239268Sgonzo *	is intended to be called from the vm_pagezero process only and
4543239268Sgonzo *	outside of Giant.
4544239268Sgonzo */
4545239268Sgonzovoid
4546239268Sgonzopmap_zero_page_idle(vm_page_t m)
4547239268Sgonzo{
4548239268Sgonzo
4549239268Sgonzo	pmap_zero_page(m);
4550239268Sgonzo}
4551239268Sgonzo
4552239268Sgonzo/*
4553239268Sgonzo *	pmap_copy_page copies the specified (machine independent)
4554239268Sgonzo *	page by mapping the page into virtual memory and using
4555239268Sgonzo *	bcopy to copy the page, one machine dependent page at a
4556239268Sgonzo *	time.
4557239268Sgonzo */
4558239268Sgonzo
4559239268Sgonzo/*
4560239268Sgonzo * pmap_copy_page()
4561239268Sgonzo *
4562239268Sgonzo * Copy one physical page into another, by mapping the pages into
4563239268Sgonzo * hook points. The same comment regarding cachability as in
4564239268Sgonzo * pmap_zero_page also applies here.
4565239268Sgonzo */
4566239268Sgonzovoid
4567239268Sgonzopmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
4568239268Sgonzo{
4569266353Sian	struct czpages *czp;
4570266353Sian
4571266353Sian	sched_pin();
4572266353Sian	czp = &cpu_czpages[PCPU_GET(cpuid)];
4573266353Sian	mtx_lock(&czp->lock);
4574266353Sian
4575239268Sgonzo	/*
4576266353Sian	 * Map the pages into the page hook points, copy them, and purge the
4577266353Sian	 * cache for the appropriate page.
4578239268Sgonzo	 */
4579266353Sian	*czp->srcptep = L2_S_PROTO | src | pte_l2_s_cache_mode | L2_S_REF;
4580266353Sian	pmap_set_prot(czp->srcptep, VM_PROT_READ, 0);
4581266353Sian	PTE_SYNC(czp->srcptep);
4582266353Sian	cpu_tlb_flushD_SE(czp->srcva);
4583266353Sian	*czp->dstptep = L2_S_PROTO | dst | pte_l2_s_cache_mode | L2_S_REF;
4584266353Sian	pmap_set_prot(czp->dstptep, VM_PROT_READ | VM_PROT_WRITE, 0);
4585266353Sian	PTE_SYNC(czp->dstptep);
4586266353Sian	cpu_tlb_flushD_SE(czp->dstva);
4587266353Sian	cpu_cpwait();
4588239268Sgonzo
4589266353Sian	bcopy_page(czp->srcva, czp->dstva);
4590239268Sgonzo
4591245146Sgonzo	/*
4592266353Sian	 * Although aliasing is not possible, if we use temporary mappings with
4593266353Sian	 * memory that will be mapped later as non-cached or with write-through
4594266353Sian	 * caches, we might end up overwriting it when calling wbinv_all.  So
4595266353Sian	 * make sure caches are clean after the operation.
4596245146Sgonzo	 */
4597266353Sian	cpu_idcache_wbinv_range(czp->dstva, PAGE_SIZE);
4598266353Sian	pmap_l2cache_wbinv_range(czp->dstva, dst, PAGE_SIZE);
4599239268Sgonzo
4600266353Sian	mtx_unlock(&czp->lock);
4601266353Sian	sched_unpin();
4602239268Sgonzo}
4603239268Sgonzo
4604248508Skibint unmapped_buf_allowed = 1;
4605248508Skib
4606239268Sgonzovoid
4607248280Skibpmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4608248280Skib    vm_offset_t b_offset, int xfersize)
4609248280Skib{
4610248280Skib	vm_page_t a_pg, b_pg;
4611248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
4612248280Skib	int cnt;
4613266353Sian	struct czpages *czp;
4614248280Skib
4615266353Sian	sched_pin();
4616266353Sian	czp = &cpu_czpages[PCPU_GET(cpuid)];
4617266353Sian	mtx_lock(&czp->lock);
4618266353Sian
4619248280Skib	while (xfersize > 0) {
4620248280Skib		a_pg = ma[a_offset >> PAGE_SHIFT];
4621248280Skib		a_pg_offset = a_offset & PAGE_MASK;
4622248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4623248280Skib		b_pg = mb[b_offset >> PAGE_SHIFT];
4624248280Skib		b_pg_offset = b_offset & PAGE_MASK;
4625248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4626266353Sian		*czp->srcptep = L2_S_PROTO | VM_PAGE_TO_PHYS(a_pg) |
4627250928Sgber		    pte_l2_s_cache_mode | L2_S_REF;
4628266353Sian		pmap_set_prot(czp->srcptep, VM_PROT_READ, 0);
4629266353Sian		PTE_SYNC(czp->srcptep);
4630266353Sian		cpu_tlb_flushD_SE(czp->srcva);
4631266353Sian		*czp->dstptep = L2_S_PROTO | VM_PAGE_TO_PHYS(b_pg) |
4632250928Sgber		    pte_l2_s_cache_mode | L2_S_REF;
4633266353Sian		pmap_set_prot(czp->dstptep, VM_PROT_READ | VM_PROT_WRITE, 0);
4634266353Sian		PTE_SYNC(czp->dstptep);
4635266353Sian		cpu_tlb_flushD_SE(czp->dstva);
4636248280Skib		cpu_cpwait();
4637266353Sian		bcopy((char *)czp->srcva + a_pg_offset, (char *)czp->dstva + b_pg_offset,
4638248280Skib		    cnt);
4639266353Sian		cpu_idcache_wbinv_range(czp->dstva + b_pg_offset, cnt);
4640266353Sian		pmap_l2cache_wbinv_range(czp->dstva + b_pg_offset,
4641248280Skib		    VM_PAGE_TO_PHYS(b_pg) + b_pg_offset, cnt);
4642248280Skib		xfersize -= cnt;
4643248280Skib		a_offset += cnt;
4644248280Skib		b_offset += cnt;
4645248280Skib	}
4646266353Sian
4647266353Sian	mtx_unlock(&czp->lock);
4648266353Sian	sched_unpin();
4649248280Skib}
4650248280Skib
4651248280Skibvoid
4652239268Sgonzopmap_copy_page(vm_page_t src, vm_page_t dst)
4653239268Sgonzo{
4654239268Sgonzo
4655239268Sgonzo	if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size &&
4656239268Sgonzo	    _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst),
4657239268Sgonzo	    (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0)
4658239268Sgonzo		return;
4659239268Sgonzo
4660239268Sgonzo	pmap_copy_page_generic(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
4661239268Sgonzo}
4662239268Sgonzo
4663239268Sgonzo/*
4664239268Sgonzo * this routine returns true if a physical page resides
4665239268Sgonzo * in the given pmap.
4666239268Sgonzo */
4667239268Sgonzoboolean_t
4668239268Sgonzopmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4669239268Sgonzo{
4670254918Sraj	struct md_page *pvh;
4671239268Sgonzo	pv_entry_t pv;
4672239268Sgonzo	int loops = 0;
4673239268Sgonzo	boolean_t rv;
4674239268Sgonzo
4675239268Sgonzo	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4676239268Sgonzo	    ("pmap_page_exists_quick: page %p is not managed", m));
4677239268Sgonzo	rv = FALSE;
4678240321Salc	rw_wlock(&pvh_global_lock);
4679239268Sgonzo	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4680250634Sgber		if (PV_PMAP(pv) == pmap) {
4681239268Sgonzo			rv = TRUE;
4682239268Sgonzo			break;
4683239268Sgonzo		}
4684239268Sgonzo		loops++;
4685239268Sgonzo		if (loops >= 16)
4686239268Sgonzo			break;
4687239268Sgonzo	}
4688254918Sraj	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4689254918Sraj		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4690254918Sraj		TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
4691254918Sraj			if (PV_PMAP(pv) == pmap) {
4692254918Sraj				rv = TRUE;
4693254918Sraj				break;
4694254918Sraj			}
4695254918Sraj			loops++;
4696254918Sraj			if (loops >= 16)
4697254918Sraj				break;
4698254918Sraj		}
4699254918Sraj	}
4700240321Salc	rw_wunlock(&pvh_global_lock);
4701239268Sgonzo	return (rv);
4702239268Sgonzo}
4703239268Sgonzo
4704239268Sgonzo/*
4705239268Sgonzo *	pmap_page_wired_mappings:
4706239268Sgonzo *
4707239268Sgonzo *	Return the number of managed mappings to the given physical page
4708239268Sgonzo *	that are wired.
4709239268Sgonzo */
4710239268Sgonzoint
4711239268Sgonzopmap_page_wired_mappings(vm_page_t m)
4712239268Sgonzo{
4713239268Sgonzo	int count;
4714239268Sgonzo
4715239268Sgonzo	count = 0;
4716254918Sraj	if ((m->oflags & VPO_UNMANAGED) != 0)
4717239268Sgonzo		return (count);
4718240321Salc	rw_wlock(&pvh_global_lock);
4719254918Sraj	count = pmap_pvh_wired_mappings(&m->md, count);
4720254918Sraj	if ((m->flags & PG_FICTITIOUS) == 0) {
4721254918Sraj	    count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
4722254918Sraj	        count);
4723254918Sraj	}
4724240321Salc	rw_wunlock(&pvh_global_lock);
4725239268Sgonzo	return (count);
4726239268Sgonzo}
4727239268Sgonzo
4728239268Sgonzo/*
4729254918Sraj *	pmap_pvh_wired_mappings:
4730239268Sgonzo *
4731254918Sraj *	Return the updated number "count" of managed mappings that are wired.
4732239268Sgonzo */
4733254918Srajstatic int
4734254918Srajpmap_pvh_wired_mappings(struct md_page *pvh, int count)
4735239268Sgonzo{
4736254918Sraj	pv_entry_t pv;
4737254918Sraj
4738254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
4739254918Sraj	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
4740254918Sraj		if ((pv->pv_flags & PVF_WIRED) != 0)
4741254918Sraj			count++;
4742254918Sraj	}
4743254918Sraj	return (count);
4744254918Sraj}
4745254918Sraj
4746254918Sraj/*
4747254918Sraj * Returns TRUE if any of the given mappings were referenced and FALSE
4748254918Sraj * otherwise.  Both page and section mappings are supported.
4749254918Sraj */
4750254918Srajstatic boolean_t
4751254918Srajpmap_is_referenced_pvh(struct md_page *pvh)
4752254918Sraj{
4753250928Sgber	struct l2_bucket *l2b;
4754250928Sgber	pv_entry_t pv;
4755254918Sraj	pd_entry_t *pl1pd;
4756250929Sgber	pt_entry_t *ptep;
4757250928Sgber	pmap_t pmap;
4758250928Sgber	boolean_t rv;
4759239268Sgonzo
4760254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
4761250928Sgber	rv = FALSE;
4762254918Sraj	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
4763250928Sgber		pmap = PV_PMAP(pv);
4764250928Sgber		PMAP_LOCK(pmap);
4765254918Sraj		pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(pv->pv_va)];
4766254918Sraj		if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
4767254918Sraj			rv = L1_S_REFERENCED(*pl1pd);
4768254918Sraj		else {
4769254918Sraj			l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
4770254918Sraj			ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
4771254918Sraj			rv = L2_S_REFERENCED(*ptep);
4772254918Sraj		}
4773250928Sgber		PMAP_UNLOCK(pmap);
4774250928Sgber		if (rv)
4775250928Sgber			break;
4776250928Sgber	}
4777254918Sraj	return (rv);
4778254918Sraj}
4779254918Sraj
4780254918Sraj/*
4781254918Sraj *	pmap_is_referenced:
4782254918Sraj *
4783254918Sraj *	Return whether or not the specified physical page was referenced
4784254918Sraj *	in any physical maps.
4785254918Sraj */
4786254918Srajboolean_t
4787254918Srajpmap_is_referenced(vm_page_t m)
4788254918Sraj{
4789254918Sraj	boolean_t rv;
4790254918Sraj
4791254918Sraj	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4792254918Sraj	    ("pmap_is_referenced: page %p is not managed", m));
4793254918Sraj	rw_wlock(&pvh_global_lock);
4794254918Sraj	rv = pmap_is_referenced_pvh(&m->md) ||
4795254918Sraj	    ((m->flags & PG_FICTITIOUS) == 0 &&
4796254918Sraj	    pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4797250928Sgber	rw_wunlock(&pvh_global_lock);
4798250928Sgber	return (rv);
4799239268Sgonzo}
4800239268Sgonzo
4801239268Sgonzo/*
4802239268Sgonzo *	pmap_ts_referenced:
4803239268Sgonzo *
4804239268Sgonzo *	Return the count of reference bits for a page, clearing all of them.
4805239268Sgonzo */
4806239268Sgonzoint
4807239268Sgonzopmap_ts_referenced(vm_page_t m)
4808239268Sgonzo{
4809239268Sgonzo
4810239268Sgonzo	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4811239268Sgonzo	    ("pmap_ts_referenced: page %p is not managed", m));
4812239268Sgonzo	return (pmap_clearbit(m, PVF_REF));
4813239268Sgonzo}
4814239268Sgonzo
4815254918Sraj/*
4816254918Sraj * Returns TRUE if any of the given mappings were used to modify
4817254918Sraj * physical memory. Otherwise, returns FALSE. Both page and 1MB section
4818254918Sraj * mappings are supported.
4819254918Sraj */
4820254918Srajstatic boolean_t
4821254918Srajpmap_is_modified_pvh(struct md_page *pvh)
4822239268Sgonzo{
4823254918Sraj	pd_entry_t *pl1pd;
4824250928Sgber	struct l2_bucket *l2b;
4825250928Sgber	pv_entry_t pv;
4826250929Sgber	pt_entry_t *ptep;
4827250928Sgber	pmap_t pmap;
4828250928Sgber	boolean_t rv;
4829239268Sgonzo
4830254918Sraj	rw_assert(&pvh_global_lock, RA_WLOCKED);
4831254918Sraj	rv = FALSE;
4832254918Sraj
4833254918Sraj	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
4834254918Sraj		pmap = PV_PMAP(pv);
4835254918Sraj		PMAP_LOCK(pmap);
4836254918Sraj		pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(pv->pv_va)];
4837254918Sraj		if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
4838254918Sraj			rv = L1_S_WRITABLE(*pl1pd);
4839254918Sraj		else {
4840254918Sraj			l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
4841254918Sraj			ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
4842254918Sraj			rv = L2_S_WRITABLE(*ptep);
4843254918Sraj		}
4844254918Sraj		PMAP_UNLOCK(pmap);
4845254918Sraj		if (rv)
4846254918Sraj			break;
4847254918Sraj	}
4848254918Sraj
4849254918Sraj	return (rv);
4850254918Sraj}
4851254918Sraj
4852254918Srajboolean_t
4853254918Srajpmap_is_modified(vm_page_t m)
4854254918Sraj{
4855254918Sraj	boolean_t rv;
4856254918Sraj
4857239268Sgonzo	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4858239268Sgonzo	    ("pmap_is_modified: page %p is not managed", m));
4859250928Sgber	/*
4860254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
4861250928Sgber	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
4862254918Sraj	 * is clear, no PTEs can have APX cleared.
4863250928Sgber	 */
4864250928Sgber	VM_OBJECT_ASSERT_WLOCKED(m->object);
4865254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
4866254918Sraj		return (FALSE);
4867250928Sgber	rw_wlock(&pvh_global_lock);
4868254918Sraj	rv = pmap_is_modified_pvh(&m->md) ||
4869254918Sraj	    ((m->flags & PG_FICTITIOUS) == 0 &&
4870254918Sraj	    pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4871250928Sgber	rw_wunlock(&pvh_global_lock);
4872250928Sgber	return (rv);
4873239268Sgonzo}
4874239268Sgonzo
4875239268Sgonzo/*
4876255612Szbb *	Apply the given advice to the specified range of addresses within the
4877255612Szbb *	given pmap.  Depending on the advice, clear the referenced and/or
4878255612Szbb *	modified flags in each mapping.
4879255028Salc */
4880255028Salcvoid
4881255028Salcpmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4882255028Salc{
4883255612Szbb	struct l2_bucket *l2b;
4884255612Szbb	struct pv_entry *pve;
4885255612Szbb	pd_entry_t *pl1pd, l1pd;
4886255612Szbb	pt_entry_t *ptep, opte, pte;
4887255612Szbb	vm_offset_t next_bucket;
4888255612Szbb	vm_page_t m;
4889255612Szbb
4890255612Szbb	if (advice != MADV_DONTNEED && advice != MADV_FREE)
4891255612Szbb		return;
4892255612Szbb	rw_wlock(&pvh_global_lock);
4893255612Szbb	PMAP_LOCK(pmap);
4894255612Szbb	for (; sva < eva; sva = next_bucket) {
4895255612Szbb		next_bucket = L2_NEXT_BUCKET(sva);
4896255612Szbb		if (next_bucket < sva)
4897255612Szbb			next_bucket = eva;
4898255612Szbb		pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
4899255612Szbb		l1pd = *pl1pd;
4900255612Szbb		if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
4901255612Szbb			if (pmap == pmap_kernel())
4902255612Szbb				continue;
4903255612Szbb			if (!pmap_demote_section(pmap, sva)) {
4904255612Szbb				/*
4905255612Szbb				 * The large page mapping was destroyed.
4906255612Szbb				 */
4907255612Szbb				continue;
4908255612Szbb			}
4909255612Szbb			/*
4910255612Szbb			 * Unless the page mappings are wired, remove the
4911255612Szbb			 * mapping to a single page so that a subsequent
4912255612Szbb			 * access may repromote. Since the underlying
4913255612Szbb			 * l2_bucket is fully populated, this removal
4914255612Szbb			 * never frees an entire l2_bucket.
4915255612Szbb			 */
4916255612Szbb			l2b = pmap_get_l2_bucket(pmap, sva);
4917255612Szbb			KASSERT(l2b != NULL,
4918255612Szbb			    ("pmap_advise: no l2 bucket for "
4919255612Szbb			     "va 0x%#x, pmap 0x%p", sva, pmap));
4920255612Szbb			ptep = &l2b->l2b_kva[l2pte_index(sva)];
4921255612Szbb			opte = *ptep;
4922255612Szbb			m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
4923255612Szbb			KASSERT(m != NULL,
4924255612Szbb			    ("pmap_advise: no vm_page for demoted superpage"));
4925255612Szbb			pve = pmap_find_pv(&m->md, pmap, sva);
4926255612Szbb			KASSERT(pve != NULL,
4927255612Szbb			    ("pmap_advise: no PV entry for managed mapping"));
4928255612Szbb			if ((pve->pv_flags & PVF_WIRED) == 0) {
4929255612Szbb				pmap_free_l2_bucket(pmap, l2b, 1);
4930255612Szbb				pve = pmap_remove_pv(m, pmap, sva);
4931255612Szbb				pmap_free_pv_entry(pmap, pve);
4932255612Szbb				*ptep = 0;
4933255612Szbb				PTE_SYNC(ptep);
4934255612Szbb				if (pmap_is_current(pmap)) {
4935255612Szbb					if (PTE_BEEN_EXECD(opte))
4936255612Szbb						cpu_tlb_flushID_SE(sva);
4937255612Szbb					else if (PTE_BEEN_REFD(opte))
4938255612Szbb						cpu_tlb_flushD_SE(sva);
4939255612Szbb				}
4940255612Szbb			}
4941255612Szbb		}
4942255612Szbb		if (next_bucket > eva)
4943255612Szbb			next_bucket = eva;
4944255612Szbb		l2b = pmap_get_l2_bucket(pmap, sva);
4945255612Szbb		if (l2b == NULL)
4946255612Szbb			continue;
4947255612Szbb		for (ptep = &l2b->l2b_kva[l2pte_index(sva)];
4948255612Szbb		    sva != next_bucket; ptep++, sva += PAGE_SIZE) {
4949255612Szbb			opte = pte = *ptep;
4950255612Szbb			if ((opte & L2_S_PROTO) == 0)
4951255612Szbb				continue;
4952255612Szbb			m = PHYS_TO_VM_PAGE(l2pte_pa(opte));
4953255612Szbb			if (m == NULL || (m->oflags & VPO_UNMANAGED) != 0)
4954255612Szbb				continue;
4955255612Szbb			else if (L2_S_WRITABLE(opte)) {
4956255612Szbb				if (advice == MADV_DONTNEED) {
4957255612Szbb					/*
4958255612Szbb					 * Don't need to mark the page
4959255612Szbb					 * dirty as it was already marked as
4960255612Szbb					 * such in pmap_fault_fixup() or
4961255612Szbb					 * pmap_enter_locked().
4962255612Szbb					 * Just clear the state.
4963255612Szbb					 */
4964255612Szbb				} else
4965255612Szbb					pte |= L2_APX;
4966255612Szbb
4967255612Szbb				pte &= ~L2_S_REF;
4968255612Szbb				*ptep = pte;
4969255612Szbb				PTE_SYNC(ptep);
4970255612Szbb			} else if (L2_S_REFERENCED(opte)) {
4971255612Szbb				pte &= ~L2_S_REF;
4972255612Szbb				*ptep = pte;
4973255612Szbb				PTE_SYNC(ptep);
4974255612Szbb			} else
4975255612Szbb				continue;
4976255612Szbb			if (pmap_is_current(pmap)) {
4977255612Szbb				if (PTE_BEEN_EXECD(opte))
4978255612Szbb					cpu_tlb_flushID_SE(sva);
4979255612Szbb				else if (PTE_BEEN_REFD(opte))
4980255612Szbb					cpu_tlb_flushD_SE(sva);
4981255612Szbb			}
4982255612Szbb		}
4983255612Szbb	}
4984266357Sian	cpu_cpwait();
4985255612Szbb	rw_wunlock(&pvh_global_lock);
4986255612Szbb	PMAP_UNLOCK(pmap);
4987255028Salc}
4988255028Salc
4989255028Salc/*
4990239268Sgonzo *	Clear the modify bits on the specified physical page.
4991239268Sgonzo */
4992239268Sgonzovoid
4993239268Sgonzopmap_clear_modify(vm_page_t m)
4994239268Sgonzo{
4995239268Sgonzo
4996239268Sgonzo	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4997239268Sgonzo	    ("pmap_clear_modify: page %p is not managed", m));
4998248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
4999254138Sattilio	KASSERT(!vm_page_xbusied(m),
5000254138Sattilio	    ("pmap_clear_modify: page %p is exclusive busied", m));
5001239268Sgonzo
5002239268Sgonzo	/*
5003239268Sgonzo	 * If the page is not PGA_WRITEABLE, then no mappings can be modified.
5004239268Sgonzo	 * If the object containing the page is locked and the page is not
5005254138Sattilio	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
5006239268Sgonzo	 */
5007239268Sgonzo	if ((m->aflags & PGA_WRITEABLE) == 0)
5008239268Sgonzo		return;
5009250928Sgber	if (pmap_is_modified(m))
5010239268Sgonzo		pmap_clearbit(m, PVF_MOD);
5011239268Sgonzo}
5012239268Sgonzo
5013239268Sgonzo
5014239268Sgonzo/*
5015239268Sgonzo * Clear the write and modified bits in each of the given page's mappings.
5016239268Sgonzo */
5017239268Sgonzovoid
5018239268Sgonzopmap_remove_write(vm_page_t m)
5019239268Sgonzo{
5020239268Sgonzo	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5021239268Sgonzo	    ("pmap_remove_write: page %p is not managed", m));
5022239268Sgonzo
5023239268Sgonzo	/*
5024254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
5025254138Sattilio	 * set by another thread while the object is locked.  Thus,
5026254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
5027239268Sgonzo	 */
5028248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
5029254138Sattilio	if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0)
5030239268Sgonzo		pmap_clearbit(m, PVF_WRITE);
5031239268Sgonzo}
5032239268Sgonzo
5033239268Sgonzo
5034239268Sgonzo/*
5035239268Sgonzo * perform the pmap work for mincore
5036239268Sgonzo */
5037239268Sgonzoint
5038239268Sgonzopmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5039239268Sgonzo{
5040241054Salc	struct l2_bucket *l2b;
5041254918Sraj	pd_entry_t *pl1pd, l1pd;
5042241054Salc	pt_entry_t *ptep, pte;
5043241054Salc	vm_paddr_t pa;
5044241054Salc	vm_page_t m;
5045241054Salc	int val;
5046241054Salc	boolean_t managed;
5047239268Sgonzo
5048241054Salc	PMAP_LOCK(pmap);
5049241054Salcretry:
5050254918Sraj	pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(addr)];
5051254918Sraj	l1pd = *pl1pd;
5052254918Sraj	if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
5053254918Sraj		pa = (l1pd & L1_S_FRAME);
5054254918Sraj		val = MINCORE_SUPER | MINCORE_INCORE;
5055254918Sraj		if (L1_S_WRITABLE(l1pd))
5056254918Sraj			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5057254918Sraj		managed = FALSE;
5058254918Sraj		m = PHYS_TO_VM_PAGE(pa);
5059254918Sraj		if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0)
5060254918Sraj			managed = TRUE;
5061254918Sraj		if (managed) {
5062254918Sraj			if (L1_S_REFERENCED(l1pd))
5063254918Sraj				val |= MINCORE_REFERENCED |
5064254918Sraj				    MINCORE_REFERENCED_OTHER;
5065254918Sraj		}
5066254918Sraj	} else {
5067254918Sraj		l2b = pmap_get_l2_bucket(pmap, addr);
5068254918Sraj		if (l2b == NULL) {
5069254918Sraj			val = 0;
5070254918Sraj			goto out;
5071254918Sraj		}
5072254918Sraj		ptep = &l2b->l2b_kva[l2pte_index(addr)];
5073254918Sraj		pte = *ptep;
5074254918Sraj		if (!l2pte_valid(pte)) {
5075254918Sraj			val = 0;
5076254918Sraj			goto out;
5077254918Sraj		}
5078254918Sraj		val = MINCORE_INCORE;
5079254918Sraj		if (L2_S_WRITABLE(pte))
5080254918Sraj			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5081254918Sraj		managed = FALSE;
5082254918Sraj		pa = l2pte_pa(pte);
5083254918Sraj		m = PHYS_TO_VM_PAGE(pa);
5084254918Sraj		if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0)
5085254918Sraj			managed = TRUE;
5086254918Sraj		if (managed) {
5087254918Sraj			if (L2_S_REFERENCED(pte))
5088254918Sraj				val |= MINCORE_REFERENCED |
5089254918Sraj				    MINCORE_REFERENCED_OTHER;
5090254918Sraj		}
5091241054Salc	}
5092241054Salc	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5093241054Salc	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
5094241054Salc		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
5095241054Salc		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
5096241054Salc			goto retry;
5097241054Salc	} else
5098241054Salcout:
5099241054Salc		PA_UNLOCK_COND(*locked_pa);
5100241054Salc	PMAP_UNLOCK(pmap);
5101241054Salc	return (val);
5102239268Sgonzo}
5103239268Sgonzo
5104239268Sgonzovoid
5105250929Sgberpmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
5106239268Sgonzo{
5107239268Sgonzo}
5108239268Sgonzo
5109239268Sgonzo/*
5110239268Sgonzo *	Increase the starting virtual address of the given mapping if a
5111239268Sgonzo *	different alignment might result in more superpage mappings.
5112239268Sgonzo */
5113239268Sgonzovoid
5114239268Sgonzopmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
5115239268Sgonzo    vm_offset_t *addr, vm_size_t size)
5116239268Sgonzo{
5117266412Sian	vm_offset_t superpage_offset;
5118266412Sian
5119266412Sian	if (size < NBPDR)
5120266412Sian		return;
5121266412Sian	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5122266412Sian		offset += ptoa(object->pg_color);
5123266412Sian	superpage_offset = offset & PDRMASK;
5124266412Sian	if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
5125266412Sian	    (*addr & PDRMASK) == superpage_offset)
5126266412Sian		return;
5127266412Sian	if ((*addr & PDRMASK) < superpage_offset)
5128266412Sian		*addr = (*addr & ~PDRMASK) + superpage_offset;
5129266412Sian	else
5130266412Sian		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
5131239268Sgonzo}
5132239268Sgonzo
5133239268Sgonzo/*
5134239268Sgonzo * pmap_map_section:
5135239268Sgonzo *
5136239268Sgonzo *	Create a single section mapping.
5137239268Sgonzo */
5138239268Sgonzovoid
5139254918Srajpmap_map_section(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
5140254918Sraj    boolean_t ref)
5141239268Sgonzo{
5142254918Sraj	pd_entry_t *pl1pd, l1pd;
5143239268Sgonzo	pd_entry_t fl;
5144239268Sgonzo
5145254918Sraj	KASSERT(((va | pa) & L1_S_OFFSET) == 0,
5146254918Sraj	    ("Not a valid section mapping"));
5147239268Sgonzo
5148254918Sraj	fl = pte_l1_s_cache_mode;
5149239268Sgonzo
5150254918Sraj	pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
5151254918Sraj	l1pd = L1_S_PROTO | pa | L1_S_PROT(PTE_USER, prot) | fl |
5152254918Sraj	    L1_S_DOM(pmap->pm_domain);
5153254918Sraj
5154254918Sraj	/* Mark page referenced if this section is a result of a promotion. */
5155254918Sraj	if (ref == TRUE)
5156254918Sraj		l1pd |= L1_S_REF;
5157254918Sraj#ifdef SMP
5158254918Sraj	l1pd |= L1_SHARED;
5159254918Sraj#endif
5160254918Sraj	*pl1pd = l1pd;
5161254918Sraj	PTE_SYNC(pl1pd);
5162239268Sgonzo}
5163239268Sgonzo
5164239268Sgonzo/*
5165239268Sgonzo * pmap_link_l2pt:
5166239268Sgonzo *
5167239268Sgonzo *	Link the L2 page table specified by l2pv.pv_pa into the L1
5168239268Sgonzo *	page table at the slot for "va".
5169239268Sgonzo */
5170239268Sgonzovoid
5171239268Sgonzopmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
5172239268Sgonzo{
5173239268Sgonzo	pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
5174239268Sgonzo	u_int slot = va >> L1_S_SHIFT;
5175239268Sgonzo
5176239268Sgonzo	proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
5177239268Sgonzo
5178239268Sgonzo#ifdef VERBOSE_INIT_ARM
5179239268Sgonzo	printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va);
5180239268Sgonzo#endif
5181239268Sgonzo
5182239268Sgonzo	pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
5183239268Sgonzo	PTE_SYNC(&pde[slot]);
5184239268Sgonzo
5185239268Sgonzo	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
5186239268Sgonzo
5187239268Sgonzo}
5188239268Sgonzo
5189239268Sgonzo/*
5190239268Sgonzo * pmap_map_entry
5191239268Sgonzo *
5192239268Sgonzo *	Create a single page mapping.
5193239268Sgonzo */
5194239268Sgonzovoid
5195239268Sgonzopmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
5196239268Sgonzo    int cache)
5197239268Sgonzo{
5198239268Sgonzo	pd_entry_t *pde = (pd_entry_t *) l1pt;
5199239268Sgonzo	pt_entry_t fl;
5200250929Sgber	pt_entry_t *ptep;
5201239268Sgonzo
5202239268Sgonzo	KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin"));
5203239268Sgonzo
5204239268Sgonzo	fl = l2s_mem_types[cache];
5205239268Sgonzo
5206239268Sgonzo	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
5207239268Sgonzo		panic("pmap_map_entry: no L2 table for VA 0x%08x", va);
5208239268Sgonzo
5209250929Sgber	ptep = (pt_entry_t *)kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
5210239268Sgonzo
5211250929Sgber	if (ptep == NULL)
5212239268Sgonzo		panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
5213239268Sgonzo
5214250929Sgber	ptep[l2pte_index(va)] = L2_S_PROTO | pa | fl | L2_S_REF;
5215250929Sgber	pmap_set_prot(&ptep[l2pte_index(va)], prot, 0);
5216250929Sgber	PTE_SYNC(&ptep[l2pte_index(va)]);
5217239268Sgonzo}
5218239268Sgonzo
5219239268Sgonzo/*
5220239268Sgonzo * pmap_map_chunk:
5221239268Sgonzo *
5222239268Sgonzo *	Map a chunk of memory using the most efficient mappings
5223239268Sgonzo *	possible (section. large page, small page) into the
5224239268Sgonzo *	provided L1 and L2 tables at the specified virtual address.
5225239268Sgonzo */
5226239268Sgonzovm_size_t
5227239268Sgonzopmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
5228239268Sgonzo    vm_size_t size, int prot, int type)
5229239268Sgonzo{
5230239268Sgonzo	pd_entry_t *pde = (pd_entry_t *) l1pt;
5231250929Sgber	pt_entry_t *ptep, f1, f2s, f2l;
5232239268Sgonzo	vm_size_t resid;
5233239268Sgonzo	int i;
5234239268Sgonzo
5235239268Sgonzo	resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5236239268Sgonzo
5237239268Sgonzo	if (l1pt == 0)
5238239268Sgonzo		panic("pmap_map_chunk: no L1 table provided");
5239239268Sgonzo
5240239268Sgonzo#ifdef VERBOSE_INIT_ARM
5241239268Sgonzo	printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x "
5242239268Sgonzo	    "prot=0x%x type=%d\n", pa, va, size, resid, prot, type);
5243239268Sgonzo#endif
5244239268Sgonzo
5245239268Sgonzo	f1 = l1_mem_types[type];
5246239268Sgonzo	f2l = l2l_mem_types[type];
5247239268Sgonzo	f2s = l2s_mem_types[type];
5248239268Sgonzo
5249239268Sgonzo	size = resid;
5250239268Sgonzo
5251239268Sgonzo	while (resid > 0) {
5252239268Sgonzo		/* See if we can use a section mapping. */
5253239268Sgonzo		if (L1_S_MAPPABLE_P(va, pa, resid)) {
5254239268Sgonzo#ifdef VERBOSE_INIT_ARM
5255239268Sgonzo			printf("S");
5256239268Sgonzo#endif
5257239268Sgonzo			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
5258254918Sraj			    L1_S_PROT(PTE_KERNEL, prot | VM_PROT_EXECUTE) |
5259254918Sraj			    f1 | L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_S_REF;
5260239268Sgonzo			PTE_SYNC(&pde[va >> L1_S_SHIFT]);
5261239268Sgonzo			va += L1_S_SIZE;
5262239268Sgonzo			pa += L1_S_SIZE;
5263239268Sgonzo			resid -= L1_S_SIZE;
5264239268Sgonzo			continue;
5265239268Sgonzo		}
5266239268Sgonzo
5267239268Sgonzo		/*
5268239268Sgonzo		 * Ok, we're going to use an L2 table.  Make sure
5269239268Sgonzo		 * one is actually in the corresponding L1 slot
5270239268Sgonzo		 * for the current VA.
5271239268Sgonzo		 */
5272239268Sgonzo		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
5273239268Sgonzo			panic("pmap_map_chunk: no L2 table for VA 0x%08x", va);
5274239268Sgonzo
5275250929Sgber		ptep = (pt_entry_t *) kernel_pt_lookup(
5276239268Sgonzo		    pde[L1_IDX(va)] & L1_C_ADDR_MASK);
5277250929Sgber		if (ptep == NULL)
5278239268Sgonzo			panic("pmap_map_chunk: can't find L2 table for VA"
5279239268Sgonzo			    "0x%08x", va);
5280239268Sgonzo		/* See if we can use a L2 large page mapping. */
5281239268Sgonzo		if (L2_L_MAPPABLE_P(va, pa, resid)) {
5282239268Sgonzo#ifdef VERBOSE_INIT_ARM
5283239268Sgonzo			printf("L");
5284239268Sgonzo#endif
5285239268Sgonzo			for (i = 0; i < 16; i++) {
5286250929Sgber				ptep[l2pte_index(va) + i] =
5287239268Sgonzo				    L2_L_PROTO | pa |
5288239268Sgonzo				    L2_L_PROT(PTE_KERNEL, prot) | f2l;
5289250929Sgber				PTE_SYNC(&ptep[l2pte_index(va) + i]);
5290239268Sgonzo			}
5291239268Sgonzo			va += L2_L_SIZE;
5292239268Sgonzo			pa += L2_L_SIZE;
5293239268Sgonzo			resid -= L2_L_SIZE;
5294239268Sgonzo			continue;
5295239268Sgonzo		}
5296239268Sgonzo
5297239268Sgonzo		/* Use a small page mapping. */
5298239268Sgonzo#ifdef VERBOSE_INIT_ARM
5299239268Sgonzo		printf("P");
5300239268Sgonzo#endif
5301250929Sgber		ptep[l2pte_index(va)] = L2_S_PROTO | pa | f2s | L2_S_REF;
5302250929Sgber		pmap_set_prot(&ptep[l2pte_index(va)], prot, 0);
5303250929Sgber		PTE_SYNC(&ptep[l2pte_index(va)]);
5304239268Sgonzo		va += PAGE_SIZE;
5305239268Sgonzo		pa += PAGE_SIZE;
5306239268Sgonzo		resid -= PAGE_SIZE;
5307239268Sgonzo	}
5308239268Sgonzo#ifdef VERBOSE_INIT_ARM
5309239268Sgonzo	printf("\n");
5310239268Sgonzo#endif
5311239268Sgonzo	return (size);
5312239268Sgonzo
5313239268Sgonzo}
5314239268Sgonzo
5315239268Sgonzoint
5316239268Sgonzopmap_dmap_iscurrent(pmap_t pmap)
5317239268Sgonzo{
5318239268Sgonzo	return(pmap_is_current(pmap));
5319239268Sgonzo}
5320239268Sgonzo
5321244414Scognetvoid
5322244414Scognetpmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5323244414Scognet{
5324244414Scognet	/*
5325244414Scognet	 * Remember the memattr in a field that gets used to set the appropriate
5326244414Scognet	 * bits in the PTEs as mappings are established.
5327244414Scognet	 */
5328244414Scognet	m->md.pv_memattr = ma;
5329244414Scognet
5330244414Scognet	/*
5331244414Scognet	 * It appears that this function can only be called before any mappings
5332244414Scognet	 * for the page are established on ARM.  If this ever changes, this code
5333244414Scognet	 * will need to walk the pv_list and make each of the existing mappings
5334244414Scognet	 * uncacheable, being careful to sync caches and PTEs (and maybe
5335244414Scognet	 * invalidate TLB?) for any current mapping it modifies.
5336244414Scognet	 */
5337254536Sraj	if (TAILQ_FIRST(&m->md.pv_list) != NULL)
5338244414Scognet		panic("Can't change memattr on page with existing mappings");
5339244414Scognet}
5340