pmap-v6.c revision 338484
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1994 David Greenman
5 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
6 * Copyright (c) 2014-2016 Svatopluk Kraus <skra@FreeBSD.org>
7 * Copyright (c) 2014-2016 Michal Meloun <mmel@FreeBSD.org>
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department and William Jolitz of UUNET Technologies Inc.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
39 */
40/*-
41 * Copyright (c) 2003 Networks Associates Technology, Inc.
42 * All rights reserved.
43 *
44 * This software was developed for the FreeBSD Project by Jake Burkholder,
45 * Safeport Network Services, and Network Associates Laboratories, the
46 * Security Research Division of Network Associates, Inc. under
47 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
48 * CHATS research program.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 *    notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 *    notice, this list of conditions and the following disclaimer in the
57 *    documentation and/or other materials provided with the distribution.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 */
71
72#include <sys/cdefs.h>
73__FBSDID("$FreeBSD: stable/11/sys/arm/arm/pmap-v6.c 338484 2018-09-05 21:28:33Z kib $");
74
75/*
76 *	Manages physical address maps.
77 *
78 *	Since the information managed by this module is
79 *	also stored by the logical address mapping module,
80 *	this module may throw away valid virtual-to-physical
81 *	mappings at almost any time.  However, invalidations
82 *	of virtual-to-physical mappings must be done as
83 *	requested.
84 *
85 *	In order to cope with hardware architectures which
86 *	make virtual-to-physical map invalidates expensive,
87 *	this module may delay invalidate or reduced protection
88 *	operations until such time as they are actually
89 *	necessary.  This module is given full information as
90 *	to which processors are currently using which maps,
91 *	and to when physical maps must be made correct.
92 */
93
94#include "opt_vm.h"
95#include "opt_pmap.h"
96#include "opt_ddb.h"
97
98#include <sys/param.h>
99#include <sys/systm.h>
100#include <sys/kernel.h>
101#include <sys/ktr.h>
102#include <sys/lock.h>
103#include <sys/proc.h>
104#include <sys/rwlock.h>
105#include <sys/malloc.h>
106#include <sys/vmmeter.h>
107#include <sys/malloc.h>
108#include <sys/mman.h>
109#include <sys/sf_buf.h>
110#include <sys/smp.h>
111#include <sys/sched.h>
112#include <sys/sysctl.h>
113
114#ifdef DDB
115#include <ddb/ddb.h>
116#endif
117
118#include <machine/physmem.h>
119
120#include <vm/vm.h>
121#include <vm/uma.h>
122#include <vm/pmap.h>
123#include <vm/vm_param.h>
124#include <vm/vm_kern.h>
125#include <vm/vm_object.h>
126#include <vm/vm_map.h>
127#include <vm/vm_page.h>
128#include <vm/vm_pageout.h>
129#include <vm/vm_phys.h>
130#include <vm/vm_extern.h>
131#include <vm/vm_reserv.h>
132#include <sys/lock.h>
133#include <sys/mutex.h>
134
135#include <machine/md_var.h>
136#include <machine/pmap_var.h>
137#include <machine/cpu.h>
138#include <machine/pcb.h>
139#include <machine/sf_buf.h>
140#ifdef SMP
141#include <machine/smp.h>
142#endif
143
144#ifndef PMAP_SHPGPERPROC
145#define PMAP_SHPGPERPROC 200
146#endif
147
148#ifndef DIAGNOSTIC
149#define PMAP_INLINE	__inline
150#else
151#define PMAP_INLINE
152#endif
153
154#ifdef PMAP_DEBUG
155static void pmap_zero_page_check(vm_page_t m);
156void pmap_debug(int level);
157int pmap_pid_dump(int pid);
158
159#define PDEBUG(_lev_,_stat_) \
160	if (pmap_debug_level >= (_lev_)) \
161		((_stat_))
162#define dprintf printf
163int pmap_debug_level = 1;
164#else   /* PMAP_DEBUG */
165#define PDEBUG(_lev_,_stat_) /* Nothing */
166#define dprintf(x, arg...)
167#endif  /* PMAP_DEBUG */
168
169/*
170 *  Level 2 page tables map definion ('max' is excluded).
171 */
172
173#define PT2V_MIN_ADDRESS	((vm_offset_t)PT2MAP)
174#define PT2V_MAX_ADDRESS	((vm_offset_t)PT2MAP + PT2MAP_SIZE)
175
176#define UPT2V_MIN_ADDRESS	((vm_offset_t)PT2MAP)
177#define UPT2V_MAX_ADDRESS \
178    ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT)))
179
180/*
181 *  Promotion to a 1MB (PTE1) page mapping requires that the corresponding
182 *  4KB (PTE2) page mappings have identical settings for the following fields:
183 */
184#define PTE2_PROMOTE	(PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG |	\
185			 PTE2_NX | PTE2_RO | PTE2_U | PTE2_W |		\
186			 PTE2_ATTR_MASK)
187
188#define PTE1_PROMOTE	(PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG |	\
189			 PTE1_NX | PTE1_RO | PTE1_U | PTE1_W |		\
190			 PTE1_ATTR_MASK)
191
192#define ATTR_TO_L1(l2_attr)	((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \
193				 (((l2_attr) & L2_C)    ? L1_S_C    : 0) | \
194				 (((l2_attr) & L2_B)    ? L1_S_B    : 0) | \
195				 (((l2_attr) & PTE2_A)  ? PTE1_A    : 0) | \
196				 (((l2_attr) & PTE2_NM) ? PTE1_NM   : 0) | \
197				 (((l2_attr) & PTE2_S)  ? PTE1_S    : 0) | \
198				 (((l2_attr) & PTE2_NG) ? PTE1_NG   : 0) | \
199				 (((l2_attr) & PTE2_NX) ? PTE1_NX   : 0) | \
200				 (((l2_attr) & PTE2_RO) ? PTE1_RO   : 0) | \
201				 (((l2_attr) & PTE2_U)  ? PTE1_U    : 0) | \
202				 (((l2_attr) & PTE2_W)  ? PTE1_W    : 0))
203
204#define ATTR_TO_L2(l1_attr)	((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \
205				 (((l1_attr) & L1_S_C)    ? L2_C    : 0) | \
206				 (((l1_attr) & L1_S_B)    ? L2_B    : 0) | \
207				 (((l1_attr) & PTE1_A)    ? PTE2_A  : 0) | \
208				 (((l1_attr) & PTE1_NM)   ? PTE2_NM : 0) | \
209				 (((l1_attr) & PTE1_S)    ? PTE2_S  : 0) | \
210				 (((l1_attr) & PTE1_NG)   ? PTE2_NG : 0) | \
211				 (((l1_attr) & PTE1_NX)   ? PTE2_NX : 0) | \
212				 (((l1_attr) & PTE1_RO)   ? PTE2_RO : 0) | \
213				 (((l1_attr) & PTE1_U)    ? PTE2_U  : 0) | \
214				 (((l1_attr) & PTE1_W)    ? PTE2_W  : 0))
215
216/*
217 *  PTE2 descriptors creation macros.
218 */
219#define PTE2_ATTR_DEFAULT	vm_memattr_to_pte2(VM_MEMATTR_DEFAULT)
220#define PTE2_ATTR_PT		vm_memattr_to_pte2(pt_memattr)
221
222#define PTE2_KPT(pa)	PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_PT)
223#define PTE2_KPT_NG(pa)	PTE2_KERN_NG(pa, PTE2_AP_KRW, PTE2_ATTR_PT)
224
225#define PTE2_KRW(pa)	PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT)
226#define PTE2_KRO(pa)	PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_DEFAULT)
227
228#define PV_STATS
229#ifdef PV_STATS
230#define PV_STAT(x)	do { x ; } while (0)
231#else
232#define PV_STAT(x)	do { } while (0)
233#endif
234
235/*
236 *  The boot_pt1 is used temporary in very early boot stage as L1 page table.
237 *  We can init many things with no memory allocation thanks to its static
238 *  allocation and this brings two main advantages:
239 *  (1) other cores can be started very simply,
240 *  (2) various boot loaders can be supported as its arguments can be processed
241 *      in virtual address space and can be moved to safe location before
242 *      first allocation happened.
243 *  Only disadvantage is that boot_pt1 is used only in very early boot stage.
244 *  However, the table is uninitialized and so lays in bss. Therefore kernel
245 *  image size is not influenced.
246 *
247 *  QQQ: In the future, maybe, boot_pt1 can be used for soft reset and
248 *       CPU suspend/resume game.
249 */
250extern pt1_entry_t boot_pt1[];
251
252vm_paddr_t base_pt1;
253pt1_entry_t *kern_pt1;
254pt2_entry_t *kern_pt2tab;
255pt2_entry_t *PT2MAP;
256
257static uint32_t ttb_flags;
258static vm_memattr_t pt_memattr;
259ttb_entry_t pmap_kern_ttb;
260
261struct pmap kernel_pmap_store;
262LIST_HEAD(pmaplist, pmap);
263static struct pmaplist allpmaps;
264static struct mtx allpmaps_lock;
265
266vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
267vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
268
269static vm_offset_t kernel_vm_end_new;
270vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE;
271vm_offset_t vm_max_kernel_address;
272vm_paddr_t kernel_l1pa;
273
274static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock;
275
276/*
277 *  Data for the pv entry allocation mechanism
278 */
279static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
280static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
281static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */
282static int shpgperproc = PMAP_SHPGPERPROC;
283
284struct pv_chunk *pv_chunkbase;		/* KVA block for pv_chunks */
285int pv_maxchunks;			/* How many chunks we have KVA for */
286vm_offset_t pv_vafree;			/* freelist stored in the PTE */
287
288vm_paddr_t first_managed_pa;
289#define	pa_to_pvh(pa)	(&pv_table[pte1_index(pa - first_managed_pa)])
290
291/*
292 *  All those kernel PT submaps that BSD is so fond of
293 */
294static pt2_entry_t *CMAP3;
295static caddr_t CADDR3;
296caddr_t _tmppt = 0;
297
298struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */
299
300/*
301 *  Crashdump maps.
302 */
303static caddr_t crashdumpmap;
304
305static pt2_entry_t *PMAP1 = NULL, *PMAP2;
306static pt2_entry_t *PADDR1 = NULL, *PADDR2;
307#ifdef DDB
308static pt2_entry_t *PMAP3;
309static pt2_entry_t *PADDR3;
310static int PMAP3cpu __unused; /* for SMP only */
311#endif
312#ifdef SMP
313static int PMAP1cpu;
314static int PMAP1changedcpu;
315SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
316    &PMAP1changedcpu, 0,
317    "Number of times pmap_pte2_quick changed CPU with same PMAP1");
318#endif
319static int PMAP1changed;
320SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
321    &PMAP1changed, 0,
322    "Number of times pmap_pte2_quick changed PMAP1");
323static int PMAP1unchanged;
324SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
325    &PMAP1unchanged, 0,
326    "Number of times pmap_pte2_quick didn't change PMAP1");
327static struct mtx PMAP2mutex;
328
329static __inline void pt2_wirecount_init(vm_page_t m);
330static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p,
331    vm_offset_t va);
332void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size);
333
334/*
335 *  Function to set the debug level of the pmap code.
336 */
337#ifdef PMAP_DEBUG
338void
339pmap_debug(int level)
340{
341
342	pmap_debug_level = level;
343	dprintf("pmap_debug: level=%d\n", pmap_debug_level);
344}
345#endif /* PMAP_DEBUG */
346
347/*
348 *  This table must corespond with memory attribute configuration in vm.h.
349 *  First entry is used for normal system mapping.
350 *
351 *  Device memory is always marked as shared.
352 *  Normal memory is shared only in SMP .
353 *  Not outer shareable bits are not used yet.
354 *  Class 6 cannot be used on ARM11.
355 */
356#define TEXDEF_TYPE_SHIFT	0
357#define TEXDEF_TYPE_MASK	0x3
358#define TEXDEF_INNER_SHIFT	2
359#define TEXDEF_INNER_MASK	0x3
360#define TEXDEF_OUTER_SHIFT	4
361#define TEXDEF_OUTER_MASK	0x3
362#define TEXDEF_NOS_SHIFT	6
363#define TEXDEF_NOS_MASK		0x1
364
365#define TEX(t, i, o, s) 			\
366		((t) << TEXDEF_TYPE_SHIFT) |	\
367		((i) << TEXDEF_INNER_SHIFT) |	\
368		((o) << TEXDEF_OUTER_SHIFT | 	\
369		((s) << TEXDEF_NOS_SHIFT))
370
371static uint32_t tex_class[8] = {
372/*	    type      inner cache outer cache */
373	TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0),  /* 0 - ATTR_WB_WA	*/
374	TEX(PRRR_MEM, NMRR_NC,	  NMRR_NC,    0),  /* 1 - ATTR_NOCACHE	*/
375	TEX(PRRR_DEV, NMRR_NC,	  NMRR_NC,    0),  /* 2 - ATTR_DEVICE	*/
376	TEX(PRRR_SO,  NMRR_NC,	  NMRR_NC,    0),  /* 3 - ATTR_SO	*/
377	TEX(PRRR_MEM, NMRR_WT,	  NMRR_WT,    0),  /* 4 - ATTR_WT	*/
378	TEX(PRRR_MEM, NMRR_NC,	  NMRR_NC,    0),  /* 5 - NOT USED YET	*/
379	TEX(PRRR_MEM, NMRR_NC,	  NMRR_NC,    0),  /* 6 - NOT USED YET	*/
380	TEX(PRRR_MEM, NMRR_NC,	  NMRR_NC,    0),  /* 7 - NOT USED YET	*/
381};
382#undef TEX
383
384static uint32_t pte2_attr_tab[8] = {
385	PTE2_ATTR_WB_WA,	/* 0 - VM_MEMATTR_WB_WA */
386	PTE2_ATTR_NOCACHE,	/* 1 - VM_MEMATTR_NOCACHE */
387	PTE2_ATTR_DEVICE,	/* 2 - VM_MEMATTR_DEVICE */
388	PTE2_ATTR_SO,		/* 3 - VM_MEMATTR_SO */
389	PTE2_ATTR_WT,		/* 4 - VM_MEMATTR_WRITE_THROUGH */
390	0,			/* 5 - NOT USED YET */
391	0,			/* 6 - NOT USED YET */
392	0			/* 7 - NOT USED YET */
393};
394CTASSERT(VM_MEMATTR_WB_WA == 0);
395CTASSERT(VM_MEMATTR_NOCACHE == 1);
396CTASSERT(VM_MEMATTR_DEVICE == 2);
397CTASSERT(VM_MEMATTR_SO == 3);
398CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4);
399
400static inline uint32_t
401vm_memattr_to_pte2(vm_memattr_t ma)
402{
403
404	KASSERT((u_int)ma < 5, ("%s: bad vm_memattr_t %d", __func__, ma));
405	return (pte2_attr_tab[(u_int)ma]);
406}
407
408static inline uint32_t
409vm_page_pte2_attr(vm_page_t m)
410{
411
412	return (vm_memattr_to_pte2(m->md.pat_mode));
413}
414
415/*
416 * Convert TEX definition entry to TTB flags.
417 */
418static uint32_t
419encode_ttb_flags(int idx)
420{
421	uint32_t inner, outer, nos, reg;
422
423	inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) &
424		TEXDEF_INNER_MASK;
425	outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) &
426		TEXDEF_OUTER_MASK;
427	nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) &
428		TEXDEF_NOS_MASK;
429
430	reg = nos << 5;
431	reg |= outer << 3;
432	if (cpuinfo.coherent_walk)
433		reg |= (inner & 0x1) << 6;
434	reg |= (inner & 0x2) >> 1;
435#ifdef SMP
436	reg |= 1 << 1;
437#endif
438	return reg;
439}
440
441/*
442 *  Set TEX remapping registers in current CPU.
443 */
444void
445pmap_set_tex(void)
446{
447	uint32_t prrr, nmrr;
448	uint32_t type, inner, outer, nos;
449	int i;
450
451#ifdef PMAP_PTE_NOCACHE
452	/* XXX fixme */
453	if (cpuinfo.coherent_walk) {
454		pt_memattr = VM_MEMATTR_WB_WA;
455		ttb_flags = encode_ttb_flags(0);
456	}
457	else {
458		pt_memattr = VM_MEMATTR_NOCACHE;
459		ttb_flags = encode_ttb_flags(1);
460	}
461#else
462	pt_memattr = VM_MEMATTR_WB_WA;
463	ttb_flags = encode_ttb_flags(0);
464#endif
465
466	prrr = 0;
467	nmrr = 0;
468
469	/* Build remapping register from TEX classes. */
470	for (i = 0; i < 8; i++) {
471		type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) &
472			TEXDEF_TYPE_MASK;
473		inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) &
474			TEXDEF_INNER_MASK;
475		outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) &
476			TEXDEF_OUTER_MASK;
477		nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) &
478			TEXDEF_NOS_MASK;
479
480		prrr |= type  << (i * 2);
481		prrr |= nos   << (i + 24);
482		nmrr |= inner << (i * 2);
483		nmrr |= outer << (i * 2 + 16);
484	}
485	/* Add shareable bits for device memory. */
486	prrr |= PRRR_DS0 | PRRR_DS1;
487
488	/* Add shareable bits for normal memory in SMP case. */
489#ifdef SMP
490	prrr |= PRRR_NS1;
491#endif
492	cp15_prrr_set(prrr);
493	cp15_nmrr_set(nmrr);
494
495	/* Caches are disabled, so full TLB flush should be enough. */
496	tlb_flush_all_local();
497}
498
499/*
500 * Remap one vm_meattr class to another one. This can be useful as
501 * workaround for SOC errata, e.g. if devices must be accessed using
502 * SO memory class.
503 *
504 * !!! Please note that this function is absolutely last resort thing.
505 * It should not be used under normal circumstances. !!!
506 *
507 * Usage rules:
508 * - it shall be called after pmap_bootstrap_prepare() and before
509 *   cpu_mp_start() (thus only on boot CPU). In practice, it's expected
510 *   to be called from platform_attach() or platform_late_init().
511 *
512 * - if remapping doesn't change caching mode, or until uncached class
513 *   is remapped to any kind of cached one, then no other restriction exists.
514 *
515 * - if pmap_remap_vm_attr() changes caching mode, but both (original and
516 *   remapped) remain cached, then caller is resposible for calling
517 *   of dcache_wbinv_poc_all().
518 *
519 * - remapping of any kind of cached class to uncached is not permitted.
520 */
521void
522pmap_remap_vm_attr(vm_memattr_t old_attr, vm_memattr_t new_attr)
523{
524	int old_idx, new_idx;
525
526	/* Map VM memattrs to indexes to tex_class table. */
527	old_idx = pte2_attr_tab[(int)old_attr];
528	new_idx = pte2_attr_tab[(int)new_attr];
529
530	/* Replace TEX attribute and apply it. */
531	tex_class[old_idx] = tex_class[new_idx];
532	pmap_set_tex();
533}
534
535/*
536 * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words,
537 * KERNBASE is mapped by first L2 page table in L2 page table page. It
538 * meets same constrain due to PT2MAP being placed just under KERNBASE.
539 */
540CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0);
541CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE);
542
543/*
544 *  In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general.
545 *  For now, anyhow, the following check must be fulfilled.
546 */
547CTASSERT(PAGE_SIZE == PTE2_SIZE);
548/*
549 *  We don't want to mess up MI code with all MMU and PMAP definitions,
550 *  so some things, which depend on other ones, are defined independently.
551 *  Now, it is time to check that we don't screw up something.
552 */
553CTASSERT(PDRSHIFT == PTE1_SHIFT);
554/*
555 *  Check L1 and L2 page table entries definitions consistency.
556 */
557CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1));
558CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2));
559/*
560 *  Check L2 page tables page consistency.
561 */
562CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2));
563CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG);
564/*
565 *  Check PT2TAB consistency.
566 *  PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG.
567 *  This should be done without remainder.
568 */
569CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG));
570
571/*
572 *	A PT2MAP magic.
573 *
574 *  All level 2 page tables (PT2s) are mapped continuously and accordingly
575 *  into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can
576 *  be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page
577 *  must be used together, but not necessary at once. The first PT2 in a page
578 *  must map things on correctly aligned address and the others must follow
579 *  in right order.
580 */
581#define NB_IN_PT2TAB	(PT2TAB_ENTRIES * sizeof(pt2_entry_t))
582#define NPT2_IN_PT2TAB	(NB_IN_PT2TAB / NB_IN_PT2)
583#define NPG_IN_PT2TAB	(NB_IN_PT2TAB / PAGE_SIZE)
584
585/*
586 *  Check PT2TAB consistency.
587 *  NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2.
588 *  NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE.
589 *  The both should be done without remainder.
590 */
591CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2));
592CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE));
593/*
594 *  The implementation was made general, however, with the assumption
595 *  bellow in mind. In case of another value of NPG_IN_PT2TAB,
596 *  the code should be once more rechecked.
597 */
598CTASSERT(NPG_IN_PT2TAB == 1);
599
600/*
601 *  Get offset of PT2 in a page
602 *  associated with given PT1 index.
603 */
604static __inline u_int
605page_pt2off(u_int pt1_idx)
606{
607
608	return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2);
609}
610
611/*
612 *  Get physical address of PT2
613 *  associated with given PT2s page and PT1 index.
614 */
615static __inline vm_paddr_t
616page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx)
617{
618
619	return (pgpa + page_pt2off(pt1_idx));
620}
621
622/*
623 *  Get first entry of PT2
624 *  associated with given PT2s page and PT1 index.
625 */
626static __inline pt2_entry_t *
627page_pt2(vm_offset_t pgva, u_int pt1_idx)
628{
629
630	return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx)));
631}
632
633/*
634 *  Get virtual address of PT2s page (mapped in PT2MAP)
635 *  which holds PT2 which holds entry which maps given virtual address.
636 */
637static __inline vm_offset_t
638pt2map_pt2pg(vm_offset_t va)
639{
640
641	va &= ~(NPT2_IN_PG * PTE1_SIZE - 1);
642	return ((vm_offset_t)pt2map_entry(va));
643}
644
645/*****************************************************************************
646 *
647 *     THREE pmap initialization milestones exist:
648 *
649 *  locore.S
650 *    -> fundamental init (including MMU) in ASM
651 *
652 *  initarm()
653 *    -> fundamental init continues in C
654 *    -> first available physical address is known
655 *
656 *    pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins)
657 *      -> basic (safe) interface for physical address allocation is made
658 *      -> basic (safe) interface for virtual mapping is made
659 *      -> limited not SMP coherent work is possible
660 *
661 *    -> more fundamental init continues in C
662 *    -> locks and some more things are available
663 *    -> all fundamental allocations and mappings are done
664 *
665 *    pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins)
666 *      -> phys_avail[] and virtual_avail is set
667 *      -> control is passed to vm subsystem
668 *      -> physical and virtual address allocation are off limit
669 *      -> low level mapping functions, some SMP coherent,
670 *         are available, which cannot be used before vm subsystem
671 *         is being inited
672 *
673 *  mi_startup()
674 *    -> vm subsystem is being inited
675 *
676 *      pmap_init() -> THIRD PMAP MILESTONE (third epoch begins)
677 *        -> pmap is fully inited
678 *
679 *****************************************************************************/
680
681/*****************************************************************************
682 *
683 *	PMAP first stage initialization and utility functions
684 *	for pre-bootstrap epoch.
685 *
686 *  After pmap_bootstrap_prepare() is called, the following functions
687 *  can be used:
688 *
689 *  (1) strictly only for this stage functions for physical page allocations,
690 *      virtual space allocations, and mappings:
691 *
692 *  vm_paddr_t pmap_preboot_get_pages(u_int num);
693 *  void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num);
694 *  vm_offset_t pmap_preboot_reserve_pages(u_int num);
695 *  vm_offset_t pmap_preboot_get_vpages(u_int num);
696 *  void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size,
697 *      vm_prot_t prot, vm_memattr_t attr);
698 *
699 *  (2) for all stages:
700 *
701 *  vm_paddr_t pmap_kextract(vm_offset_t va);
702 *
703 *  NOTE: This is not SMP coherent stage.
704 *
705 *****************************************************************************/
706
707#define KERNEL_P2V(pa) \
708    ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR))
709#define KERNEL_V2P(va) \
710    ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr))
711
712static vm_paddr_t last_paddr;
713
714/*
715 *  Pre-bootstrap epoch page allocator.
716 */
717vm_paddr_t
718pmap_preboot_get_pages(u_int num)
719{
720	vm_paddr_t ret;
721
722	ret = last_paddr;
723	last_paddr += num * PAGE_SIZE;
724
725	return (ret);
726}
727
728/*
729 *	The fundamental initialization of PMAP stuff.
730 *
731 *  Some things already happened in locore.S and some things could happen
732 *  before pmap_bootstrap_prepare() is called, so let's recall what is done:
733 *  1. Caches are disabled.
734 *  2. We are running on virtual addresses already with 'boot_pt1'
735 *     as L1 page table.
736 *  3. So far, all virtual addresses can be converted to physical ones and
737 *     vice versa by the following macros:
738 *       KERNEL_P2V(pa) .... physical to virtual ones,
739 *       KERNEL_V2P(va) .... virtual to physical ones.
740 *
741 *  What is done herein:
742 *  1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'.
743 *  2. PT2MAP magic is brought to live.
744 *  3. Basic preboot functions for page allocations and mappings can be used.
745 *  4. Everything is prepared for L1 cache enabling.
746 *
747 *  Variations:
748 *  1. To use second TTB register, so kernel and users page tables will be
749 *     separated. This way process forking - pmap_pinit() - could be faster,
750 *     it saves physical pages and KVA per a process, and it's simple change.
751 *     However, it will lead, due to hardware matter, to the following:
752 *     (a) 2G space for kernel and 2G space for users.
753 *     (b) 1G space for kernel in low addresses and 3G for users above it.
754 *     A question is: Is the case (b) really an option? Note that case (b)
755 *     does save neither physical memory and KVA.
756 */
757void
758pmap_bootstrap_prepare(vm_paddr_t last)
759{
760	vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size;
761	vm_offset_t pt2pg_va;
762	pt1_entry_t *pte1p;
763	pt2_entry_t *pte2p;
764	u_int i;
765	uint32_t l1_attr;
766
767	/*
768	 * Now, we are going to make real kernel mapping. Note that we are
769	 * already running on some mapping made in locore.S and we expect
770	 * that it's large enough to ensure nofault access to physical memory
771	 * allocated herein before switch.
772	 *
773	 * As kernel image and everything needed before are and will be mapped
774	 * by section mappings, we align last physical address to PTE1_SIZE.
775	 */
776	last_paddr = pte1_roundup(last);
777
778	/*
779	 * Allocate and zero page(s) for kernel L1 page table.
780	 *
781	 * Note that it's first allocation on space which was PTE1_SIZE
782	 * aligned and as such base_pt1 is aligned to NB_IN_PT1 too.
783	 */
784	base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1);
785	kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1);
786	bzero((void*)kern_pt1, NB_IN_PT1);
787	pte1_sync_range(kern_pt1, NB_IN_PT1);
788
789	/* Allocate and zero page(s) for kernel PT2TAB. */
790	pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB);
791	kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa);
792	bzero(kern_pt2tab, NB_IN_PT2TAB);
793	pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB);
794
795	/* Allocate and zero page(s) for kernel L2 page tables. */
796	pt2pg_pa = pmap_preboot_get_pages(NKPT2PG);
797	pt2pg_va = KERNEL_P2V(pt2pg_pa);
798	size = NKPT2PG * PAGE_SIZE;
799	bzero((void*)pt2pg_va, size);
800	pte2_sync_range((pt2_entry_t *)pt2pg_va, size);
801
802	/*
803	 * Add a physical memory segment (vm_phys_seg) corresponding to the
804	 * preallocated pages for kernel L2 page tables so that vm_page
805	 * structures representing these pages will be created. The vm_page
806	 * structures are required for promotion of the corresponding kernel
807	 * virtual addresses to section mappings.
808	 */
809	vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0));
810
811	/*
812	 * Insert allocated L2 page table pages to PT2TAB and make
813	 * link to all PT2s in L1 page table. See how kernel_vm_end
814	 * is initialized.
815	 *
816	 * We play simple and safe. So every KVA will have underlaying
817	 * L2 page table, even kernel image mapped by sections.
818	 */
819	pte2p = kern_pt2tab_entry(KERNBASE);
820	for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE)
821		pt2tab_store(pte2p++, PTE2_KPT(pa));
822
823	pte1p = kern_pte1(KERNBASE);
824	for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2)
825		pte1_store(pte1p++, PTE1_LINK(pa));
826
827	/* Make section mappings for kernel. */
828	l1_attr = ATTR_TO_L1(PTE2_ATTR_DEFAULT);
829	pte1p = kern_pte1(KERNBASE);
830	for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE)
831		pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW, l1_attr));
832
833	/*
834	 * Get free and aligned space for PT2MAP and make L1 page table links
835	 * to L2 page tables held in PT2TAB.
836	 *
837	 * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t
838	 * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus
839	 * each entry in PT2TAB maps all PT2s in a page. This implies that
840	 * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE.
841	 */
842	PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE);
843	pte1p = kern_pte1((vm_offset_t)PT2MAP);
844	for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) {
845		pte1_store(pte1p++, PTE1_LINK(pa));
846	}
847
848	/*
849	 * Store PT2TAB in PT2TAB itself, i.e. self reference mapping.
850	 * Each pmap will hold own PT2TAB, so the mapping should be not global.
851	 */
852	pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP);
853	for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) {
854		pt2tab_store(pte2p++, PTE2_KPT_NG(pa));
855	}
856
857	/*
858	 * Choose correct L2 page table and make mappings for allocations
859	 * made herein which replaces temporary locore.S mappings after a while.
860	 * Note that PT2MAP cannot be used until we switch to kern_pt1.
861	 *
862	 * Note, that these allocations started aligned on 1M section and
863	 * kernel PT1 was allocated first. Making of mappings must follow
864	 * order of physical allocations as we've used KERNEL_P2V() macro
865	 * for virtual addresses resolution.
866	 */
867	pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1);
868	pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p)));
869
870	pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1));
871
872	/* Make mapping for kernel L1 page table. */
873	for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE)
874		pte2_store(pte2p++, PTE2_KPT(pa));
875
876	/* Make mapping for kernel PT2TAB. */
877	for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE)
878		pte2_store(pte2p++, PTE2_KPT(pa));
879
880	/* Finally, switch from 'boot_pt1' to 'kern_pt1'. */
881	pmap_kern_ttb = base_pt1 | ttb_flags;
882	cpuinfo_reinit_mmu(pmap_kern_ttb);
883	/*
884	 * Initialize the first available KVA. As kernel image is mapped by
885	 * sections, we are leaving some gap behind.
886	 */
887	virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE;
888}
889
890/*
891 *  Setup L2 page table page for given KVA.
892 *  Used in pre-bootstrap epoch.
893 *
894 *  Note that we have allocated NKPT2PG pages for L2 page tables in advance
895 *  and used them for mapping KVA starting from KERNBASE. However, this is not
896 *  enough. Vectors and devices need L2 page tables too. Note that they are
897 *  even above VM_MAX_KERNEL_ADDRESS.
898 */
899static __inline vm_paddr_t
900pmap_preboot_pt2pg_setup(vm_offset_t va)
901{
902	pt2_entry_t *pte2p, pte2;
903	vm_paddr_t pt2pg_pa;
904
905	/* Get associated entry in PT2TAB. */
906	pte2p = kern_pt2tab_entry(va);
907
908	/* Just return, if PT2s page exists already. */
909	pte2 = pt2tab_load(pte2p);
910	if (pte2_is_valid(pte2))
911		return (pte2_pa(pte2));
912
913	KASSERT(va >= VM_MAX_KERNEL_ADDRESS,
914	    ("%s: NKPT2PG too small", __func__));
915
916	/*
917	 * Allocate page for PT2s and insert it to PT2TAB.
918	 * In other words, map it into PT2MAP space.
919	 */
920	pt2pg_pa = pmap_preboot_get_pages(1);
921	pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa));
922
923	/* Zero all PT2s in allocated page. */
924	bzero((void*)pt2map_pt2pg(va), PAGE_SIZE);
925	pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE);
926
927	return (pt2pg_pa);
928}
929
930/*
931 *  Setup L2 page table for given KVA.
932 *  Used in pre-bootstrap epoch.
933 */
934static void
935pmap_preboot_pt2_setup(vm_offset_t va)
936{
937	pt1_entry_t *pte1p;
938	vm_paddr_t pt2pg_pa, pt2_pa;
939
940	/* Setup PT2's page. */
941	pt2pg_pa = pmap_preboot_pt2pg_setup(va);
942	pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va));
943
944	/* Insert PT2 to PT1. */
945	pte1p = kern_pte1(va);
946	pte1_store(pte1p, PTE1_LINK(pt2_pa));
947}
948
949/*
950 *  Get L2 page entry associated with given KVA.
951 *  Used in pre-bootstrap epoch.
952 */
953static __inline pt2_entry_t*
954pmap_preboot_vtopte2(vm_offset_t va)
955{
956	pt1_entry_t *pte1p;
957
958	/* Setup PT2 if needed. */
959	pte1p = kern_pte1(va);
960	if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */
961		pmap_preboot_pt2_setup(va);
962
963	return (pt2map_entry(va));
964}
965
966/*
967 *  Pre-bootstrap epoch page(s) mapping(s).
968 */
969void
970pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num)
971{
972	u_int i;
973	pt2_entry_t *pte2p;
974
975	/* Map all the pages. */
976	for (i = 0; i < num; i++) {
977		pte2p = pmap_preboot_vtopte2(va);
978		pte2_store(pte2p, PTE2_KRW(pa));
979		va += PAGE_SIZE;
980		pa += PAGE_SIZE;
981	}
982}
983
984/*
985 *  Pre-bootstrap epoch virtual space alocator.
986 */
987vm_offset_t
988pmap_preboot_reserve_pages(u_int num)
989{
990	u_int i;
991	vm_offset_t start, va;
992	pt2_entry_t *pte2p;
993
994	/* Allocate virtual space. */
995	start = va = virtual_avail;
996	virtual_avail += num * PAGE_SIZE;
997
998	/* Zero the mapping. */
999	for (i = 0; i < num; i++) {
1000		pte2p = pmap_preboot_vtopte2(va);
1001		pte2_store(pte2p, 0);
1002		va += PAGE_SIZE;
1003	}
1004
1005	return (start);
1006}
1007
1008/*
1009 *  Pre-bootstrap epoch page(s) allocation and mapping(s).
1010 */
1011vm_offset_t
1012pmap_preboot_get_vpages(u_int num)
1013{
1014	vm_paddr_t  pa;
1015	vm_offset_t va;
1016
1017	/* Allocate physical page(s). */
1018	pa = pmap_preboot_get_pages(num);
1019
1020	/* Allocate virtual space. */
1021	va = virtual_avail;
1022	virtual_avail += num * PAGE_SIZE;
1023
1024	/* Map and zero all. */
1025	pmap_preboot_map_pages(pa, va, num);
1026	bzero((void *)va, num * PAGE_SIZE);
1027
1028	return (va);
1029}
1030
1031/*
1032 *  Pre-bootstrap epoch page mapping(s) with attributes.
1033 */
1034void
1035pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size,
1036    vm_prot_t prot, vm_memattr_t attr)
1037{
1038	u_int num;
1039	u_int l1_attr, l1_prot, l2_prot, l2_attr;
1040	pt1_entry_t *pte1p;
1041	pt2_entry_t *pte2p;
1042
1043	l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR;
1044	l2_prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX;
1045	l2_attr = vm_memattr_to_pte2(attr);
1046	l1_prot = ATTR_TO_L1(l2_prot);
1047	l1_attr = ATTR_TO_L1(l2_attr);
1048
1049	/* Map all the pages. */
1050	num = round_page(size);
1051	while (num > 0) {
1052		if ((((va | pa) & PTE1_OFFSET) == 0) && (num >= PTE1_SIZE)) {
1053			pte1p = kern_pte1(va);
1054			pte1_store(pte1p, PTE1_KERN(pa, l1_prot, l1_attr));
1055			va += PTE1_SIZE;
1056			pa += PTE1_SIZE;
1057			num -= PTE1_SIZE;
1058		} else {
1059			pte2p = pmap_preboot_vtopte2(va);
1060			pte2_store(pte2p, PTE2_KERN(pa, l2_prot, l2_attr));
1061			va += PAGE_SIZE;
1062			pa += PAGE_SIZE;
1063			num -= PAGE_SIZE;
1064		}
1065	}
1066}
1067
1068/*
1069 *  Extract from the kernel page table the physical address
1070 *  that is mapped by the given virtual address "va".
1071 */
1072vm_paddr_t
1073pmap_kextract(vm_offset_t va)
1074{
1075	vm_paddr_t pa;
1076	pt1_entry_t pte1;
1077	pt2_entry_t pte2;
1078
1079	pte1 = pte1_load(kern_pte1(va));
1080	if (pte1_is_section(pte1)) {
1081		pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1082	} else if (pte1_is_link(pte1)) {
1083		/*
1084		 * We should beware of concurrent promotion that changes
1085		 * pte1 at this point. However, it's not a problem as PT2
1086		 * page is preserved by promotion in PT2TAB. So even if
1087		 * it happens, using of PT2MAP is still safe.
1088		 *
1089		 * QQQ: However, concurrent removing is a problem which
1090		 *      ends in abort on PT2MAP space. Locking must be used
1091		 *      to deal with this.
1092		 */
1093		pte2 = pte2_load(pt2map_entry(va));
1094		pa = pte2_pa(pte2) | (va & PTE2_OFFSET);
1095	}
1096	else {
1097		panic("%s: va %#x pte1 %#x", __func__, va, pte1);
1098	}
1099	return (pa);
1100}
1101
1102/*
1103 *  Extract from the kernel page table the physical address
1104 *  that is mapped by the given virtual address "va". Also
1105 *  return L2 page table entry which maps the address.
1106 *
1107 *  This is only intended to be used for panic dumps.
1108 */
1109vm_paddr_t
1110pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p)
1111{
1112	vm_paddr_t pa;
1113	pt1_entry_t pte1;
1114	pt2_entry_t pte2;
1115
1116	pte1 = pte1_load(kern_pte1(va));
1117	if (pte1_is_section(pte1)) {
1118		pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1119		pte2 = pa | ATTR_TO_L2(pte1) | PTE2_V;
1120	} else if (pte1_is_link(pte1)) {
1121		pte2 = pte2_load(pt2map_entry(va));
1122		pa = pte2_pa(pte2);
1123	} else {
1124		pte2 = 0;
1125		pa = 0;
1126	}
1127	if (pte2p != NULL)
1128		*pte2p = pte2;
1129	return (pa);
1130}
1131
1132/*****************************************************************************
1133 *
1134 *	PMAP second stage initialization and utility functions
1135 *	for bootstrap epoch.
1136 *
1137 *  After pmap_bootstrap() is called, the following functions for
1138 *  mappings can be used:
1139 *
1140 *  void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
1141 *  void pmap_kremove(vm_offset_t va);
1142 *  vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end,
1143 *      int prot);
1144 *
1145 *  NOTE: This is not SMP coherent stage. And physical page allocation is not
1146 *        allowed during this stage.
1147 *
1148 *****************************************************************************/
1149
1150/*
1151 *  Initialize kernel PMAP locks and lists, kernel_pmap itself, and
1152 *  reserve various virtual spaces for temporary mappings.
1153 */
1154void
1155pmap_bootstrap(vm_offset_t firstaddr)
1156{
1157	pt2_entry_t *unused __unused;
1158	struct pcpu *pc;
1159
1160	/*
1161	 * Initialize the kernel pmap (which is statically allocated).
1162	 */
1163	PMAP_LOCK_INIT(kernel_pmap);
1164	kernel_l1pa = (vm_paddr_t)kern_pt1;  /* for libkvm */
1165	kernel_pmap->pm_pt1 = kern_pt1;
1166	kernel_pmap->pm_pt2tab = kern_pt2tab;
1167	CPU_FILL(&kernel_pmap->pm_active);  /* don't allow deactivation */
1168	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1169
1170	/*
1171	 * Initialize the global pv list lock.
1172	 */
1173	rw_init(&pvh_global_lock, "pmap pv global");
1174
1175	LIST_INIT(&allpmaps);
1176
1177	/*
1178	 * Request a spin mutex so that changes to allpmaps cannot be
1179	 * preempted by smp_rendezvous_cpus().
1180	 */
1181	mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
1182	mtx_lock_spin(&allpmaps_lock);
1183	LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
1184	mtx_unlock_spin(&allpmaps_lock);
1185
1186	/*
1187	 * Reserve some special page table entries/VA space for temporary
1188	 * mapping of pages.
1189	 */
1190#define	SYSMAP(c, p, v, n)  do {		\
1191	v = (c)pmap_preboot_reserve_pages(n);	\
1192	p = pt2map_entry((vm_offset_t)v);	\
1193	} while (0)
1194
1195	/*
1196	 * Local CMAP1/CMAP2 are used for zeroing and copying pages.
1197	 * Local CMAP2 is also used for data cache cleaning.
1198	 * Global CMAP3 is used for the idle process page zeroing.
1199	 */
1200	pc = get_pcpu();
1201	mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
1202	SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1);
1203	SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1);
1204	SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1);
1205	SYSMAP(caddr_t, CMAP3, CADDR3, 1);
1206
1207	/*
1208	 * Crashdump maps.
1209	 */
1210	SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS);
1211
1212	/*
1213	 * _tmppt is used for reading arbitrary physical pages via /dev/mem.
1214	 */
1215	SYSMAP(caddr_t, unused, _tmppt, 1);
1216
1217	/*
1218	 * PADDR1 and PADDR2 are used by pmap_pte2_quick() and pmap_pte2(),
1219	 * respectively. PADDR3 is used by pmap_pte2_ddb().
1220	 */
1221	SYSMAP(pt2_entry_t *, PMAP1, PADDR1, 1);
1222	SYSMAP(pt2_entry_t *, PMAP2, PADDR2, 1);
1223#ifdef DDB
1224	SYSMAP(pt2_entry_t *, PMAP3, PADDR3, 1);
1225#endif
1226	mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
1227
1228	/*
1229	 * Note that in very short time in initarm(), we are going to
1230	 * initialize phys_avail[] array and no further page allocation
1231	 * can happen after that until vm subsystem will be initialized.
1232	 */
1233	kernel_vm_end_new = kernel_vm_end;
1234	virtual_end = vm_max_kernel_address;
1235}
1236
1237static void
1238pmap_init_reserved_pages(void)
1239{
1240	struct pcpu *pc;
1241	vm_offset_t pages;
1242	int i;
1243
1244	CPU_FOREACH(i) {
1245		pc = pcpu_find(i);
1246		/*
1247		 * Skip if the mapping has already been initialized,
1248		 * i.e. this is the BSP.
1249		 */
1250		if (pc->pc_cmap1_addr != 0)
1251			continue;
1252		mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
1253		pages = kva_alloc(PAGE_SIZE * 3);
1254		if (pages == 0)
1255			panic("%s: unable to allocate KVA", __func__);
1256		pc->pc_cmap1_pte2p = pt2map_entry(pages);
1257		pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE);
1258		pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2));
1259		pc->pc_cmap1_addr = (caddr_t)pages;
1260		pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE);
1261		pc->pc_qmap_addr = pages + (PAGE_SIZE * 2);
1262	}
1263}
1264SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL);
1265
1266/*
1267 *  The function can already be use in second initialization stage.
1268 *  As such, the function DOES NOT call pmap_growkernel() where PT2
1269 *  allocation can happen. So if used, be sure that PT2 for given
1270 *  virtual address is allocated already!
1271 *
1272 *  Add a wired page to the kva.
1273 *  Note: not SMP coherent.
1274 */
1275static __inline void
1276pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot,
1277    uint32_t attr)
1278{
1279	pt1_entry_t *pte1p;
1280	pt2_entry_t *pte2p;
1281
1282	pte1p = kern_pte1(va);
1283	if (!pte1_is_valid(pte1_load(pte1p))) { /* XXX - sections ?! */
1284		/*
1285		 * This is a very low level function, so PT2 and particularly
1286		 * PT2PG associated with given virtual address must be already
1287		 * allocated. It's a pain mainly during pmap initialization
1288		 * stage. However, called after pmap initialization with
1289		 * virtual address not under kernel_vm_end will lead to
1290		 * the same misery.
1291		 */
1292		if (!pte2_is_valid(pte2_load(kern_pt2tab_entry(va))))
1293			panic("%s: kernel PT2 not allocated!", __func__);
1294	}
1295
1296	pte2p = pt2map_entry(va);
1297	pte2_store(pte2p, PTE2_KERN(pa, prot, attr));
1298}
1299
1300PMAP_INLINE void
1301pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1302{
1303
1304	pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT);
1305}
1306
1307/*
1308 *  Remove a page from the kernel pagetables.
1309 *  Note: not SMP coherent.
1310 */
1311PMAP_INLINE void
1312pmap_kremove(vm_offset_t va)
1313{
1314	pt1_entry_t *pte1p;
1315	pt2_entry_t *pte2p;
1316
1317	pte1p = kern_pte1(va);
1318	if (pte1_is_section(pte1_load(pte1p))) {
1319		pte1_clear(pte1p);
1320	} else {
1321		pte2p = pt2map_entry(va);
1322		pte2_clear(pte2p);
1323	}
1324}
1325
1326/*
1327 *  Share new kernel PT2PG with all pmaps.
1328 *  The caller is responsible for maintaining TLB consistency.
1329 */
1330static void
1331pmap_kenter_pt2tab(vm_offset_t va, pt2_entry_t npte2)
1332{
1333	pmap_t pmap;
1334	pt2_entry_t *pte2p;
1335
1336	mtx_lock_spin(&allpmaps_lock);
1337	LIST_FOREACH(pmap, &allpmaps, pm_list) {
1338		pte2p = pmap_pt2tab_entry(pmap, va);
1339		pt2tab_store(pte2p, npte2);
1340	}
1341	mtx_unlock_spin(&allpmaps_lock);
1342}
1343
1344/*
1345 *  Share new kernel PTE1 with all pmaps.
1346 *  The caller is responsible for maintaining TLB consistency.
1347 */
1348static void
1349pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1)
1350{
1351	pmap_t pmap;
1352	pt1_entry_t *pte1p;
1353
1354	mtx_lock_spin(&allpmaps_lock);
1355	LIST_FOREACH(pmap, &allpmaps, pm_list) {
1356		pte1p = pmap_pte1(pmap, va);
1357		pte1_store(pte1p, npte1);
1358	}
1359	mtx_unlock_spin(&allpmaps_lock);
1360}
1361
1362/*
1363 *  Used to map a range of physical addresses into kernel
1364 *  virtual address space.
1365 *
1366 *  The value passed in '*virt' is a suggested virtual address for
1367 *  the mapping. Architectures which can support a direct-mapped
1368 *  physical to virtual region can return the appropriate address
1369 *  within that region, leaving '*virt' unchanged. Other
1370 *  architectures should map the pages starting at '*virt' and
1371 *  update '*virt' with the first usable address after the mapped
1372 *  region.
1373 *
1374 *  NOTE: Read the comments above pmap_kenter_prot_attr() as
1375 *        the function is used herein!
1376 */
1377vm_offset_t
1378pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1379{
1380	vm_offset_t va, sva;
1381	vm_paddr_t pte1_offset;
1382	pt1_entry_t npte1;
1383	uint32_t l1prot, l2prot;
1384	uint32_t l1attr, l2attr;
1385
1386	PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x),"
1387	    " prot = %d\n", __func__, *virt, start, end, end - start,  prot));
1388
1389	l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR;
1390	l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX;
1391	l1prot = ATTR_TO_L1(l2prot);
1392
1393	l2attr = PTE2_ATTR_DEFAULT;
1394	l1attr = ATTR_TO_L1(l2attr);
1395
1396	va = *virt;
1397	/*
1398	 * Does the physical address range's size and alignment permit at
1399	 * least one section mapping to be created?
1400	 */
1401	pte1_offset = start & PTE1_OFFSET;
1402	if ((end - start) - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) >=
1403	    PTE1_SIZE) {
1404		/*
1405		 * Increase the starting virtual address so that its alignment
1406		 * does not preclude the use of section mappings.
1407		 */
1408		if ((va & PTE1_OFFSET) < pte1_offset)
1409			va = pte1_trunc(va) + pte1_offset;
1410		else if ((va & PTE1_OFFSET) > pte1_offset)
1411			va = pte1_roundup(va) + pte1_offset;
1412	}
1413	sva = va;
1414	while (start < end) {
1415		if ((start & PTE1_OFFSET) == 0 && end - start >= PTE1_SIZE) {
1416			KASSERT((va & PTE1_OFFSET) == 0,
1417			    ("%s: misaligned va %#x", __func__, va));
1418			npte1 = PTE1_KERN(start, l1prot, l1attr);
1419			pmap_kenter_pte1(va, npte1);
1420			va += PTE1_SIZE;
1421			start += PTE1_SIZE;
1422		} else {
1423			pmap_kenter_prot_attr(va, start, l2prot, l2attr);
1424			va += PAGE_SIZE;
1425			start += PAGE_SIZE;
1426		}
1427	}
1428	tlb_flush_range(sva, va - sva);
1429	*virt = va;
1430	return (sva);
1431}
1432
1433/*
1434 *  Make a temporary mapping for a physical address.
1435 *  This is only intended to be used for panic dumps.
1436 */
1437void *
1438pmap_kenter_temporary(vm_paddr_t pa, int i)
1439{
1440	vm_offset_t va;
1441
1442	/* QQQ: 'i' should be less or equal to MAXDUMPPGS. */
1443
1444	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
1445	pmap_kenter(va, pa);
1446	tlb_flush_local(va);
1447	return ((void *)crashdumpmap);
1448}
1449
1450
1451/*************************************
1452 *
1453 *  TLB & cache maintenance routines.
1454 *
1455 *************************************/
1456
1457/*
1458 *  We inline these within pmap.c for speed.
1459 */
1460PMAP_INLINE void
1461pmap_tlb_flush(pmap_t pmap, vm_offset_t va)
1462{
1463
1464	if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1465		tlb_flush(va);
1466}
1467
1468PMAP_INLINE void
1469pmap_tlb_flush_range(pmap_t pmap, vm_offset_t sva, vm_size_t size)
1470{
1471
1472	if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1473		tlb_flush_range(sva, size);
1474}
1475
1476/*
1477 *  Abuse the pte2 nodes for unmapped kva to thread a kva freelist through.
1478 *  Requirements:
1479 *   - Must deal with pages in order to ensure that none of the PTE2_* bits
1480 *     are ever set, PTE2_V in particular.
1481 *   - Assumes we can write to pte2s without pte2_store() atomic ops.
1482 *   - Assumes nothing will ever test these addresses for 0 to indicate
1483 *     no mapping instead of correctly checking PTE2_V.
1484 *   - Assumes a vm_offset_t will fit in a pte2 (true for arm).
1485 *  Because PTE2_V is never set, there can be no mappings to invalidate.
1486 */
1487static vm_offset_t
1488pmap_pte2list_alloc(vm_offset_t *head)
1489{
1490	pt2_entry_t *pte2p;
1491	vm_offset_t va;
1492
1493	va = *head;
1494	if (va == 0)
1495		panic("pmap_ptelist_alloc: exhausted ptelist KVA");
1496	pte2p = pt2map_entry(va);
1497	*head = *pte2p;
1498	if (*head & PTE2_V)
1499		panic("%s: va with PTE2_V set!", __func__);
1500	*pte2p = 0;
1501	return (va);
1502}
1503
1504static void
1505pmap_pte2list_free(vm_offset_t *head, vm_offset_t va)
1506{
1507	pt2_entry_t *pte2p;
1508
1509	if (va & PTE2_V)
1510		panic("%s: freeing va with PTE2_V set!", __func__);
1511	pte2p = pt2map_entry(va);
1512	*pte2p = *head;		/* virtual! PTE2_V is 0 though */
1513	*head = va;
1514}
1515
1516static void
1517pmap_pte2list_init(vm_offset_t *head, void *base, int npages)
1518{
1519	int i;
1520	vm_offset_t va;
1521
1522	*head = 0;
1523	for (i = npages - 1; i >= 0; i--) {
1524		va = (vm_offset_t)base + i * PAGE_SIZE;
1525		pmap_pte2list_free(head, va);
1526	}
1527}
1528
1529/*****************************************************************************
1530 *
1531 *	PMAP third and final stage initialization.
1532 *
1533 *  After pmap_init() is called, PMAP subsystem is fully initialized.
1534 *
1535 *****************************************************************************/
1536
1537SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
1538
1539SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
1540    "Max number of PV entries");
1541SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
1542    "Page share factor per proc");
1543
1544static u_long nkpt2pg = NKPT2PG;
1545SYSCTL_ULONG(_vm_pmap, OID_AUTO, nkpt2pg, CTLFLAG_RD,
1546    &nkpt2pg, 0, "Pre-allocated pages for kernel PT2s");
1547
1548static int sp_enabled = 1;
1549SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
1550    &sp_enabled, 0, "Are large page mappings enabled?");
1551
1552static SYSCTL_NODE(_vm_pmap, OID_AUTO, pte1, CTLFLAG_RD, 0,
1553    "1MB page mapping counters");
1554
1555static u_long pmap_pte1_demotions;
1556SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, demotions, CTLFLAG_RD,
1557    &pmap_pte1_demotions, 0, "1MB page demotions");
1558
1559static u_long pmap_pte1_mappings;
1560SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, mappings, CTLFLAG_RD,
1561    &pmap_pte1_mappings, 0, "1MB page mappings");
1562
1563static u_long pmap_pte1_p_failures;
1564SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, p_failures, CTLFLAG_RD,
1565    &pmap_pte1_p_failures, 0, "1MB page promotion failures");
1566
1567static u_long pmap_pte1_promotions;
1568SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD,
1569    &pmap_pte1_promotions, 0, "1MB page promotions");
1570
1571static u_long pmap_pte1_kern_demotions;
1572SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD,
1573    &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions");
1574
1575static u_long pmap_pte1_kern_promotions;
1576SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD,
1577    &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions");
1578
1579static __inline ttb_entry_t
1580pmap_ttb_get(pmap_t pmap)
1581{
1582
1583	return (vtophys(pmap->pm_pt1) | ttb_flags);
1584}
1585
1586/*
1587 *  Initialize a vm_page's machine-dependent fields.
1588 *
1589 *  Variations:
1590 *  1. Pages for L2 page tables are always not managed. So, pv_list and
1591 *     pt2_wirecount can share same physical space. However, proper
1592 *     initialization on a page alloc for page tables and reinitialization
1593 *     on the page free must be ensured.
1594 */
1595void
1596pmap_page_init(vm_page_t m)
1597{
1598
1599	TAILQ_INIT(&m->md.pv_list);
1600	pt2_wirecount_init(m);
1601	m->md.pat_mode = VM_MEMATTR_DEFAULT;
1602}
1603
1604/*
1605 *  Virtualization for faster way how to zero whole page.
1606 */
1607static __inline void
1608pagezero(void *page)
1609{
1610
1611	bzero(page, PAGE_SIZE);
1612}
1613
1614/*
1615 *  Zero L2 page table page.
1616 *  Use same KVA as in pmap_zero_page().
1617 */
1618static __inline vm_paddr_t
1619pmap_pt2pg_zero(vm_page_t m)
1620{
1621	pt2_entry_t *cmap2_pte2p;
1622	vm_paddr_t pa;
1623	struct pcpu *pc;
1624
1625	pa = VM_PAGE_TO_PHYS(m);
1626
1627	/*
1628	 * XXX: For now, we map whole page even if it's already zero,
1629	 *      to sync it even if the sync is only DSB.
1630	 */
1631	sched_pin();
1632	pc = get_pcpu();
1633	cmap2_pte2p = pc->pc_cmap2_pte2p;
1634	mtx_lock(&pc->pc_cmap_lock);
1635	if (pte2_load(cmap2_pte2p) != 0)
1636		panic("%s: CMAP2 busy", __func__);
1637	pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
1638	    vm_page_pte2_attr(m)));
1639	/*  Even VM_ALLOC_ZERO request is only advisory. */
1640	if ((m->flags & PG_ZERO) == 0)
1641		pagezero(pc->pc_cmap2_addr);
1642	pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE);
1643	pte2_clear(cmap2_pte2p);
1644	tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
1645
1646	/*
1647	 * Unpin the thread before releasing the lock.  Otherwise the thread
1648	 * could be rescheduled while still bound to the current CPU, only
1649	 * to unpin itself immediately upon resuming execution.
1650	 */
1651	sched_unpin();
1652	mtx_unlock(&pc->pc_cmap_lock);
1653
1654	return (pa);
1655}
1656
1657/*
1658 *  Init just allocated page as L2 page table(s) holder
1659 *  and return its physical address.
1660 */
1661static __inline vm_paddr_t
1662pmap_pt2pg_init(pmap_t pmap, vm_offset_t va, vm_page_t m)
1663{
1664	vm_paddr_t pa;
1665	pt2_entry_t *pte2p;
1666
1667	/* Check page attributes. */
1668	if (m->md.pat_mode != pt_memattr)
1669		pmap_page_set_memattr(m, pt_memattr);
1670
1671	/* Zero page and init wire counts. */
1672	pa = pmap_pt2pg_zero(m);
1673	pt2_wirecount_init(m);
1674
1675	/*
1676	 * Map page to PT2MAP address space for given pmap.
1677	 * Note that PT2MAP space is shared with all pmaps.
1678	 */
1679	if (pmap == kernel_pmap)
1680		pmap_kenter_pt2tab(va, PTE2_KPT(pa));
1681	else {
1682		pte2p = pmap_pt2tab_entry(pmap, va);
1683		pt2tab_store(pte2p, PTE2_KPT_NG(pa));
1684	}
1685
1686	return (pa);
1687}
1688
1689/*
1690 *  Initialize the pmap module.
1691 *  Called by vm_init, to initialize any structures that the pmap
1692 *  system needs to map virtual memory.
1693 */
1694void
1695pmap_init(void)
1696{
1697	vm_size_t s;
1698	pt2_entry_t *pte2p, pte2;
1699	u_int i, pte1_idx, pv_npg;
1700
1701	PDEBUG(1, printf("%s: phys_start = %#x\n", __func__, PHYSADDR));
1702
1703	/*
1704	 * Initialize the vm page array entries for kernel pmap's
1705	 * L2 page table pages allocated in advance.
1706	 */
1707	pte1_idx = pte1_index(KERNBASE - PT2MAP_SIZE);
1708	pte2p = kern_pt2tab_entry(KERNBASE - PT2MAP_SIZE);
1709	for (i = 0; i < nkpt2pg + NPG_IN_PT2TAB; i++, pte2p++) {
1710		vm_paddr_t pa;
1711		vm_page_t m;
1712
1713		pte2 = pte2_load(pte2p);
1714		KASSERT(pte2_is_valid(pte2), ("%s: no valid entry", __func__));
1715
1716		pa = pte2_pa(pte2);
1717		m = PHYS_TO_VM_PAGE(pa);
1718		KASSERT(m >= vm_page_array &&
1719		    m < &vm_page_array[vm_page_array_size],
1720		    ("%s: L2 page table page is out of range", __func__));
1721
1722		m->pindex = pte1_idx;
1723		m->phys_addr = pa;
1724		pte1_idx += NPT2_IN_PG;
1725	}
1726
1727	/*
1728	 * Initialize the address space (zone) for the pv entries.  Set a
1729	 * high water mark so that the system can recover from excessive
1730	 * numbers of pv entries.
1731	 */
1732	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1733	pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
1734	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1735	pv_entry_max = roundup(pv_entry_max, _NPCPV);
1736	pv_entry_high_water = 9 * (pv_entry_max / 10);
1737
1738	/*
1739	 * Are large page mappings enabled?
1740	 */
1741	TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled);
1742	if (sp_enabled) {
1743		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1744		    ("%s: can't assign to pagesizes[1]", __func__));
1745		pagesizes[1] = PTE1_SIZE;
1746	}
1747
1748	/*
1749	 * Calculate the size of the pv head table for sections.
1750	 * Handle the possibility that "vm_phys_segs[...].end" is zero.
1751	 * Note that the table is only for sections which could be promoted.
1752	 */
1753	first_managed_pa = pte1_trunc(vm_phys_segs[0].start);
1754	pv_npg = (pte1_trunc(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE)
1755	    - first_managed_pa) / PTE1_SIZE + 1;
1756
1757	/*
1758	 * Allocate memory for the pv head table for sections.
1759	 */
1760	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1761	s = round_page(s);
1762	pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
1763	    M_WAITOK | M_ZERO);
1764	for (i = 0; i < pv_npg; i++)
1765		TAILQ_INIT(&pv_table[i].pv_list);
1766
1767	pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
1768	pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
1769	if (pv_chunkbase == NULL)
1770		panic("%s: not enough kvm for pv chunks", __func__);
1771	pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
1772}
1773
1774/*
1775 *  Add a list of wired pages to the kva
1776 *  this routine is only used for temporary
1777 *  kernel mappings that do not need to have
1778 *  page modification or references recorded.
1779 *  Note that old mappings are simply written
1780 *  over.  The page *must* be wired.
1781 *  Note: SMP coherent.  Uses a ranged shootdown IPI.
1782 */
1783void
1784pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1785{
1786	u_int anychanged;
1787	pt2_entry_t *epte2p, *pte2p, pte2;
1788	vm_page_t m;
1789	vm_paddr_t pa;
1790
1791	anychanged = 0;
1792	pte2p = pt2map_entry(sva);
1793	epte2p = pte2p + count;
1794	while (pte2p < epte2p) {
1795		m = *ma++;
1796		pa = VM_PAGE_TO_PHYS(m);
1797		pte2 = pte2_load(pte2p);
1798		if ((pte2_pa(pte2) != pa) ||
1799		    (pte2_attr(pte2) != vm_page_pte2_attr(m))) {
1800			anychanged++;
1801			pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW,
1802			    vm_page_pte2_attr(m)));
1803		}
1804		pte2p++;
1805	}
1806	if (__predict_false(anychanged))
1807		tlb_flush_range(sva, count * PAGE_SIZE);
1808}
1809
1810/*
1811 *  This routine tears out page mappings from the
1812 *  kernel -- it is meant only for temporary mappings.
1813 *  Note: SMP coherent.  Uses a ranged shootdown IPI.
1814 */
1815void
1816pmap_qremove(vm_offset_t sva, int count)
1817{
1818	vm_offset_t va;
1819
1820	va = sva;
1821	while (count-- > 0) {
1822		pmap_kremove(va);
1823		va += PAGE_SIZE;
1824	}
1825	tlb_flush_range(sva, va - sva);
1826}
1827
1828/*
1829 *  Are we current address space or kernel?
1830 */
1831static __inline int
1832pmap_is_current(pmap_t pmap)
1833{
1834
1835	return (pmap == kernel_pmap ||
1836		(pmap == vmspace_pmap(curthread->td_proc->p_vmspace)));
1837}
1838
1839/*
1840 *  If the given pmap is not the current or kernel pmap, the returned
1841 *  pte2 must be released by passing it to pmap_pte2_release().
1842 */
1843static pt2_entry_t *
1844pmap_pte2(pmap_t pmap, vm_offset_t va)
1845{
1846	pt1_entry_t pte1;
1847	vm_paddr_t pt2pg_pa;
1848
1849	pte1 = pte1_load(pmap_pte1(pmap, va));
1850	if (pte1_is_section(pte1))
1851		panic("%s: attempt to map PTE1", __func__);
1852	if (pte1_is_link(pte1)) {
1853		/* Are we current address space or kernel? */
1854		if (pmap_is_current(pmap))
1855			return (pt2map_entry(va));
1856		/* Note that L2 page table size is not equal to PAGE_SIZE. */
1857		pt2pg_pa = trunc_page(pte1_link_pa(pte1));
1858		mtx_lock(&PMAP2mutex);
1859		if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) {
1860			pte2_store(PMAP2, PTE2_KPT(pt2pg_pa));
1861			tlb_flush((vm_offset_t)PADDR2);
1862		}
1863		return (PADDR2 + (arm32_btop(va) & (NPTE2_IN_PG - 1)));
1864	}
1865	return (NULL);
1866}
1867
1868/*
1869 *  Releases a pte2 that was obtained from pmap_pte2().
1870 *  Be prepared for the pte2p being NULL.
1871 */
1872static __inline void
1873pmap_pte2_release(pt2_entry_t *pte2p)
1874{
1875
1876	if ((pt2_entry_t *)(trunc_page((vm_offset_t)pte2p)) == PADDR2) {
1877		mtx_unlock(&PMAP2mutex);
1878	}
1879}
1880
1881/*
1882 *  Super fast pmap_pte2 routine best used when scanning
1883 *  the pv lists.  This eliminates many coarse-grained
1884 *  invltlb calls.  Note that many of the pv list
1885 *  scans are across different pmaps.  It is very wasteful
1886 *  to do an entire tlb flush for checking a single mapping.
1887 *
1888 *  If the given pmap is not the current pmap, pvh_global_lock
1889 *  must be held and curthread pinned to a CPU.
1890 */
1891static pt2_entry_t *
1892pmap_pte2_quick(pmap_t pmap, vm_offset_t va)
1893{
1894	pt1_entry_t pte1;
1895	vm_paddr_t pt2pg_pa;
1896
1897	pte1 = pte1_load(pmap_pte1(pmap, va));
1898	if (pte1_is_section(pte1))
1899		panic("%s: attempt to map PTE1", __func__);
1900	if (pte1_is_link(pte1)) {
1901		/* Are we current address space or kernel? */
1902		if (pmap_is_current(pmap))
1903			return (pt2map_entry(va));
1904		rw_assert(&pvh_global_lock, RA_WLOCKED);
1905		KASSERT(curthread->td_pinned > 0,
1906		    ("%s: curthread not pinned", __func__));
1907		/* Note that L2 page table size is not equal to PAGE_SIZE. */
1908		pt2pg_pa = trunc_page(pte1_link_pa(pte1));
1909		if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) {
1910			pte2_store(PMAP1, PTE2_KPT(pt2pg_pa));
1911#ifdef SMP
1912			PMAP1cpu = PCPU_GET(cpuid);
1913#endif
1914			tlb_flush_local((vm_offset_t)PADDR1);
1915			PMAP1changed++;
1916		} else
1917#ifdef SMP
1918		if (PMAP1cpu != PCPU_GET(cpuid)) {
1919			PMAP1cpu = PCPU_GET(cpuid);
1920			tlb_flush_local((vm_offset_t)PADDR1);
1921			PMAP1changedcpu++;
1922		} else
1923#endif
1924			PMAP1unchanged++;
1925		return (PADDR1 + (arm32_btop(va) & (NPTE2_IN_PG - 1)));
1926	}
1927	return (NULL);
1928}
1929
1930/*
1931 *  Routine: pmap_extract
1932 *  Function:
1933 * 	Extract the physical page address associated
1934 *	with the given map/virtual_address pair.
1935 */
1936vm_paddr_t
1937pmap_extract(pmap_t pmap, vm_offset_t va)
1938{
1939	vm_paddr_t pa;
1940	pt1_entry_t pte1;
1941	pt2_entry_t *pte2p;
1942
1943	PMAP_LOCK(pmap);
1944	pte1 = pte1_load(pmap_pte1(pmap, va));
1945	if (pte1_is_section(pte1))
1946		pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1947	else if (pte1_is_link(pte1)) {
1948		pte2p = pmap_pte2(pmap, va);
1949		pa = pte2_pa(pte2_load(pte2p)) | (va & PTE2_OFFSET);
1950		pmap_pte2_release(pte2p);
1951	} else
1952		pa = 0;
1953	PMAP_UNLOCK(pmap);
1954	return (pa);
1955}
1956
1957/*
1958 *  Routine: pmap_extract_and_hold
1959 *  Function:
1960 *	Atomically extract and hold the physical page
1961 *	with the given pmap and virtual address pair
1962 *	if that mapping permits the given protection.
1963 */
1964vm_page_t
1965pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1966{
1967	vm_paddr_t pa, lockpa;
1968	pt1_entry_t pte1;
1969	pt2_entry_t pte2, *pte2p;
1970	vm_page_t m;
1971
1972	lockpa = 0;
1973	m = NULL;
1974	PMAP_LOCK(pmap);
1975retry:
1976	pte1 = pte1_load(pmap_pte1(pmap, va));
1977	if (pte1_is_section(pte1)) {
1978		if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) {
1979			pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1980			if (vm_page_pa_tryrelock(pmap, pa, &lockpa))
1981				goto retry;
1982			m = PHYS_TO_VM_PAGE(pa);
1983			vm_page_hold(m);
1984		}
1985	} else if (pte1_is_link(pte1)) {
1986		pte2p = pmap_pte2(pmap, va);
1987		pte2 = pte2_load(pte2p);
1988		pmap_pte2_release(pte2p);
1989		if (pte2_is_valid(pte2) &&
1990		    (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) {
1991			pa = pte2_pa(pte2);
1992			if (vm_page_pa_tryrelock(pmap, pa, &lockpa))
1993				goto retry;
1994			m = PHYS_TO_VM_PAGE(pa);
1995			vm_page_hold(m);
1996		}
1997	}
1998	PA_UNLOCK_COND(lockpa);
1999	PMAP_UNLOCK(pmap);
2000	return (m);
2001}
2002
2003/*
2004 *  Grow the number of kernel L2 page table entries, if needed.
2005 */
2006void
2007pmap_growkernel(vm_offset_t addr)
2008{
2009	vm_page_t m;
2010	vm_paddr_t pt2pg_pa, pt2_pa;
2011	pt1_entry_t pte1;
2012	pt2_entry_t pte2;
2013
2014	PDEBUG(1, printf("%s: addr = %#x\n", __func__, addr));
2015	/*
2016	 * All the time kernel_vm_end is first KVA for which underlying
2017	 * L2 page table is either not allocated or linked from L1 page table
2018	 * (not considering sections). Except for two possible cases:
2019	 *
2020	 *   (1) in the very beginning as long as pmap_growkernel() was
2021	 *       not called, it could be first unused KVA (which is not
2022	 *       rounded up to PTE1_SIZE),
2023	 *
2024	 *   (2) when all KVA space is mapped and vm_map_max(kernel_map)
2025	 *       address is not rounded up to PTE1_SIZE. (For example,
2026	 *       it could be 0xFFFFFFFF.)
2027	 */
2028	kernel_vm_end = pte1_roundup(kernel_vm_end);
2029	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
2030	addr = roundup2(addr, PTE1_SIZE);
2031	if (addr - 1 >= vm_map_max(kernel_map))
2032		addr = vm_map_max(kernel_map);
2033	while (kernel_vm_end < addr) {
2034		pte1 = pte1_load(kern_pte1(kernel_vm_end));
2035		if (pte1_is_valid(pte1)) {
2036			kernel_vm_end += PTE1_SIZE;
2037			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2038				kernel_vm_end = vm_map_max(kernel_map);
2039				break;
2040			}
2041			continue;
2042		}
2043
2044		/*
2045		 * kernel_vm_end_new is used in pmap_pinit() when kernel
2046		 * mappings are entered to new pmap all at once to avoid race
2047		 * between pmap_kenter_pte1() and kernel_vm_end increase.
2048		 * The same aplies to pmap_kenter_pt2tab().
2049		 */
2050		kernel_vm_end_new = kernel_vm_end + PTE1_SIZE;
2051
2052		pte2 = pt2tab_load(kern_pt2tab_entry(kernel_vm_end));
2053		if (!pte2_is_valid(pte2)) {
2054			/*
2055			 * Install new PT2s page into kernel PT2TAB.
2056			 */
2057			m = vm_page_alloc(NULL,
2058			    pte1_index(kernel_vm_end) & ~PT2PG_MASK,
2059			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
2060			    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2061			if (m == NULL)
2062				panic("%s: no memory to grow kernel", __func__);
2063			/*
2064			 * QQQ: To link all new L2 page tables from L1 page
2065			 *      table now and so pmap_kenter_pte1() them
2066			 *      at once together with pmap_kenter_pt2tab()
2067			 *      could be nice speed up. However,
2068			 *      pmap_growkernel() does not happen so often...
2069			 * QQQ: The other TTBR is another option.
2070			 */
2071			pt2pg_pa = pmap_pt2pg_init(kernel_pmap, kernel_vm_end,
2072			    m);
2073		} else
2074			pt2pg_pa = pte2_pa(pte2);
2075
2076		pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(kernel_vm_end));
2077		pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa));
2078
2079		kernel_vm_end = kernel_vm_end_new;
2080		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2081			kernel_vm_end = vm_map_max(kernel_map);
2082			break;
2083		}
2084	}
2085}
2086
2087static int
2088kvm_size(SYSCTL_HANDLER_ARGS)
2089{
2090	unsigned long ksize = vm_max_kernel_address - KERNBASE;
2091
2092	return (sysctl_handle_long(oidp, &ksize, 0, req));
2093}
2094SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
2095    0, 0, kvm_size, "IU", "Size of KVM");
2096
2097static int
2098kvm_free(SYSCTL_HANDLER_ARGS)
2099{
2100	unsigned long kfree = vm_max_kernel_address - kernel_vm_end;
2101
2102	return (sysctl_handle_long(oidp, &kfree, 0, req));
2103}
2104SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
2105    0, 0, kvm_free, "IU", "Amount of KVM free");
2106
2107/***********************************************
2108 *
2109 *  Pmap allocation/deallocation routines.
2110 *
2111 ***********************************************/
2112
2113/*
2114 *  Initialize the pmap for the swapper process.
2115 */
2116void
2117pmap_pinit0(pmap_t pmap)
2118{
2119	PDEBUG(1, printf("%s: pmap = %p\n", __func__, pmap));
2120
2121	PMAP_LOCK_INIT(pmap);
2122
2123	/*
2124	 * Kernel page table directory and pmap stuff around is already
2125	 * initialized, we are using it right now and here. So, finish
2126	 * only PMAP structures initialization for process0 ...
2127	 *
2128	 * Since the L1 page table and PT2TAB is shared with the kernel pmap,
2129	 * which is already included in the list "allpmaps", this pmap does
2130	 * not need to be inserted into that list.
2131	 */
2132	pmap->pm_pt1 = kern_pt1;
2133	pmap->pm_pt2tab = kern_pt2tab;
2134	CPU_ZERO(&pmap->pm_active);
2135	PCPU_SET(curpmap, pmap);
2136	TAILQ_INIT(&pmap->pm_pvchunk);
2137	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2138	CPU_SET(0, &pmap->pm_active);
2139}
2140
2141static __inline void
2142pte1_copy_nosync(pt1_entry_t *spte1p, pt1_entry_t *dpte1p, vm_offset_t sva,
2143    vm_offset_t eva)
2144{
2145	u_int idx, count;
2146
2147	idx = pte1_index(sva);
2148	count = (pte1_index(eva) - idx + 1) * sizeof(pt1_entry_t);
2149	bcopy(spte1p + idx, dpte1p + idx, count);
2150}
2151
2152static __inline void
2153pt2tab_copy_nosync(pt2_entry_t *spte2p, pt2_entry_t *dpte2p, vm_offset_t sva,
2154    vm_offset_t eva)
2155{
2156	u_int idx, count;
2157
2158	idx = pt2tab_index(sva);
2159	count = (pt2tab_index(eva) - idx + 1) * sizeof(pt2_entry_t);
2160	bcopy(spte2p + idx, dpte2p + idx, count);
2161}
2162
2163/*
2164 *  Initialize a preallocated and zeroed pmap structure,
2165 *  such as one in a vmspace structure.
2166 */
2167int
2168pmap_pinit(pmap_t pmap)
2169{
2170	pt1_entry_t *pte1p;
2171	pt2_entry_t *pte2p;
2172	vm_paddr_t pa, pt2tab_pa;
2173	u_int i;
2174
2175	PDEBUG(6, printf("%s: pmap = %p, pm_pt1 = %p\n", __func__, pmap,
2176	    pmap->pm_pt1));
2177
2178	/*
2179	 * No need to allocate L2 page table space yet but we do need
2180	 * a valid L1 page table and PT2TAB table.
2181	 *
2182	 * Install shared kernel mappings to these tables. It's a little
2183	 * tricky as some parts of KVA are reserved for vectors, devices,
2184	 * and whatever else. These parts are supposed to be above
2185	 * vm_max_kernel_address. Thus two regions should be installed:
2186	 *
2187	 *   (1) <KERNBASE, kernel_vm_end),
2188	 *   (2) <vm_max_kernel_address, 0xFFFFFFFF>.
2189	 *
2190	 * QQQ: The second region should be stable enough to be installed
2191	 *      only once in time when the tables are allocated.
2192	 * QQQ: Maybe copy of both regions at once could be faster ...
2193	 * QQQ: Maybe the other TTBR is an option.
2194	 *
2195	 * Finally, install own PT2TAB table to these tables.
2196	 */
2197
2198	if (pmap->pm_pt1 == NULL) {
2199		pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(kernel_arena,
2200		    NB_IN_PT1, M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0,
2201		    pt_memattr);
2202		if (pmap->pm_pt1 == NULL)
2203			return (0);
2204	}
2205	if (pmap->pm_pt2tab == NULL) {
2206		/*
2207		 * QQQ: (1) PT2TAB must be contiguous. If PT2TAB is one page
2208		 *      only, what should be the only size for 32 bit systems,
2209		 *      then we could allocate it with vm_page_alloc() and all
2210		 *      the stuff needed as other L2 page table pages.
2211		 *      (2) Note that a process PT2TAB is special L2 page table
2212		 *      page. Its mapping in kernel_arena is permanent and can
2213		 *      be used no matter which process is current. Its mapping
2214		 *      in PT2MAP can be used only for current process.
2215		 */
2216		pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(kernel_arena,
2217		    NB_IN_PT2TAB, M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr);
2218		if (pmap->pm_pt2tab == NULL) {
2219			/*
2220			 * QQQ: As struct pmap is allocated from UMA with
2221			 *      UMA_ZONE_NOFREE flag, it's important to leave
2222			 *      no allocation in pmap if initialization failed.
2223			 */
2224			kmem_free(kernel_arena, (vm_offset_t)pmap->pm_pt1,
2225			    NB_IN_PT1);
2226			pmap->pm_pt1 = NULL;
2227			return (0);
2228		}
2229		/*
2230		 * QQQ: Each L2 page table page vm_page_t has pindex set to
2231		 *      pte1 index of virtual address mapped by this page.
2232		 *      It's not valid for non kernel PT2TABs themselves.
2233		 *      The pindex of these pages can not be altered because
2234		 *      of the way how they are allocated now. However, it
2235		 *      should not be a problem.
2236		 */
2237	}
2238
2239	mtx_lock_spin(&allpmaps_lock);
2240	/*
2241	 * To avoid race with pmap_kenter_pte1() and pmap_kenter_pt2tab(),
2242	 * kernel_vm_end_new is used here instead of kernel_vm_end.
2243	 */
2244	pte1_copy_nosync(kern_pt1, pmap->pm_pt1, KERNBASE,
2245	    kernel_vm_end_new - 1);
2246	pte1_copy_nosync(kern_pt1, pmap->pm_pt1, vm_max_kernel_address,
2247	    0xFFFFFFFF);
2248	pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, KERNBASE,
2249	    kernel_vm_end_new - 1);
2250	pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, vm_max_kernel_address,
2251	    0xFFFFFFFF);
2252	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
2253	mtx_unlock_spin(&allpmaps_lock);
2254
2255	/*
2256	 * Store PT2MAP PT2 pages (a.k.a. PT2TAB) in PT2TAB itself.
2257	 * I.e. self reference mapping.  The PT2TAB is private, however mapped
2258	 * into shared PT2MAP space, so the mapping should be not global.
2259	 */
2260	pt2tab_pa = vtophys(pmap->pm_pt2tab);
2261	pte2p = pmap_pt2tab_entry(pmap, (vm_offset_t)PT2MAP);
2262	for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) {
2263		pt2tab_store(pte2p++, PTE2_KPT_NG(pa));
2264	}
2265
2266	/* Insert PT2MAP PT2s into pmap PT1. */
2267	pte1p = pmap_pte1(pmap, (vm_offset_t)PT2MAP);
2268	for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) {
2269		pte1_store(pte1p++, PTE1_LINK(pa));
2270	}
2271
2272	/*
2273	 * Now synchronize new mapping which was made above.
2274	 */
2275	pte1_sync_range(pmap->pm_pt1, NB_IN_PT1);
2276	pte2_sync_range(pmap->pm_pt2tab, NB_IN_PT2TAB);
2277
2278	CPU_ZERO(&pmap->pm_active);
2279	TAILQ_INIT(&pmap->pm_pvchunk);
2280	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2281
2282	return (1);
2283}
2284
2285#ifdef INVARIANTS
2286static boolean_t
2287pt2tab_user_is_empty(pt2_entry_t *tab)
2288{
2289	u_int i, end;
2290
2291	end = pt2tab_index(VM_MAXUSER_ADDRESS);
2292	for (i = 0; i < end; i++)
2293		if (tab[i] != 0) return (FALSE);
2294	return (TRUE);
2295}
2296#endif
2297/*
2298 *  Release any resources held by the given physical map.
2299 *  Called when a pmap initialized by pmap_pinit is being released.
2300 *  Should only be called if the map contains no valid mappings.
2301 */
2302void
2303pmap_release(pmap_t pmap)
2304{
2305#ifdef INVARIANTS
2306	vm_offset_t start, end;
2307#endif
2308	KASSERT(pmap->pm_stats.resident_count == 0,
2309	    ("%s: pmap resident count %ld != 0", __func__,
2310	    pmap->pm_stats.resident_count));
2311	KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab),
2312	    ("%s: has allocated user PT2(s)", __func__));
2313	KASSERT(CPU_EMPTY(&pmap->pm_active),
2314	    ("%s: pmap %p is active on some CPU(s)", __func__, pmap));
2315
2316	mtx_lock_spin(&allpmaps_lock);
2317	LIST_REMOVE(pmap, pm_list);
2318	mtx_unlock_spin(&allpmaps_lock);
2319
2320#ifdef INVARIANTS
2321	start = pte1_index(KERNBASE) * sizeof(pt1_entry_t);
2322	end = (pte1_index(0xFFFFFFFF) + 1) * sizeof(pt1_entry_t);
2323	bzero((char *)pmap->pm_pt1 + start, end - start);
2324
2325	start = pt2tab_index(KERNBASE) * sizeof(pt2_entry_t);
2326	end = (pt2tab_index(0xFFFFFFFF) + 1) * sizeof(pt2_entry_t);
2327	bzero((char *)pmap->pm_pt2tab + start, end - start);
2328#endif
2329	/*
2330	 * We are leaving PT1 and PT2TAB allocated on released pmap,
2331	 * so hopefully UMA vmspace_zone will always be inited with
2332	 * UMA_ZONE_NOFREE flag.
2333	 */
2334}
2335
2336/*********************************************************
2337 *
2338 *  L2 table pages and their pages management routines.
2339 *
2340 *********************************************************/
2341
2342/*
2343 *  Virtual interface for L2 page table wire counting.
2344 *
2345 *  Each L2 page table in a page has own counter which counts a number of
2346 *  valid mappings in a table. Global page counter counts mappings in all
2347 *  tables in a page plus a single itself mapping in PT2TAB.
2348 *
2349 *  During a promotion we leave the associated L2 page table counter
2350 *  untouched, so the table (strictly speaking a page which holds it)
2351 *  is never freed if promoted.
2352 *
2353 *  If a page m->wire_count == 1 then no valid mappings exist in any L2 page
2354 *  table in the page and the page itself is only mapped in PT2TAB.
2355 */
2356
2357static __inline void
2358pt2_wirecount_init(vm_page_t m)
2359{
2360	u_int i;
2361
2362	/*
2363	 * Note: A page m is allocated with VM_ALLOC_WIRED flag and
2364	 *       m->wire_count should be already set correctly.
2365	 *       So, there is no need to set it again herein.
2366	 */
2367	for (i = 0; i < NPT2_IN_PG; i++)
2368		m->md.pt2_wirecount[i] = 0;
2369}
2370
2371static __inline void
2372pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx)
2373{
2374
2375	/*
2376	 * Note: A just modificated pte2 (i.e. already allocated)
2377	 *       is acquiring one extra reference which must be
2378	 *       explicitly cleared. It influences the KASSERTs herein.
2379	 *       All L2 page tables in a page always belong to the same
2380	 *       pmap, so we allow only one extra reference for the page.
2381	 */
2382	KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1),
2383	    ("%s: PT2 is overflowing ...", __func__));
2384	KASSERT(m->wire_count <= (NPTE2_IN_PG + 1),
2385	    ("%s: PT2PG is overflowing ...", __func__));
2386
2387	m->wire_count++;
2388	m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++;
2389}
2390
2391static __inline void
2392pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx)
2393{
2394
2395	KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0,
2396	    ("%s: PT2 is underflowing ...", __func__));
2397	KASSERT(m->wire_count > 1,
2398	    ("%s: PT2PG is underflowing ...", __func__));
2399
2400	m->wire_count--;
2401	m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--;
2402}
2403
2404static __inline void
2405pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count)
2406{
2407
2408	KASSERT(count <= NPTE2_IN_PT2,
2409	    ("%s: invalid count %u", __func__, count));
2410	KASSERT(m->wire_count >  m->md.pt2_wirecount[pte1_idx & PT2PG_MASK],
2411	    ("%s: PT2PG corrupting (%u, %u) ...", __func__, m->wire_count,
2412	    m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]));
2413
2414	m->wire_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK];
2415	m->wire_count += count;
2416	m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count;
2417
2418	KASSERT(m->wire_count <= (NPTE2_IN_PG + 1),
2419	    ("%s: PT2PG is overflowed (%u) ...", __func__, m->wire_count));
2420}
2421
2422static __inline uint32_t
2423pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx)
2424{
2425
2426	return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]);
2427}
2428
2429static __inline boolean_t
2430pt2_is_empty(vm_page_t m, vm_offset_t va)
2431{
2432
2433	return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0);
2434}
2435
2436static __inline boolean_t
2437pt2_is_full(vm_page_t m, vm_offset_t va)
2438{
2439
2440	return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] ==
2441	    NPTE2_IN_PT2);
2442}
2443
2444static __inline boolean_t
2445pt2pg_is_empty(vm_page_t m)
2446{
2447
2448	return (m->wire_count == 1);
2449}
2450
2451/*
2452 *  This routine is called if the L2 page table
2453 *  is not mapped correctly.
2454 */
2455static vm_page_t
2456_pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags)
2457{
2458	uint32_t pte1_idx;
2459	pt1_entry_t *pte1p;
2460	pt2_entry_t pte2;
2461	vm_page_t  m;
2462	vm_paddr_t pt2pg_pa, pt2_pa;
2463
2464	pte1_idx = pte1_index(va);
2465	pte1p = pmap->pm_pt1 + pte1_idx;
2466
2467	KASSERT(pte1_load(pte1p) == 0,
2468	    ("%s: pm_pt1[%#x] is not zero: %#x", __func__, pte1_idx,
2469	    pte1_load(pte1p)));
2470
2471	pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, va));
2472	if (!pte2_is_valid(pte2)) {
2473		/*
2474		 * Install new PT2s page into pmap PT2TAB.
2475		 */
2476		m = vm_page_alloc(NULL, pte1_idx & ~PT2PG_MASK,
2477		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2478		if (m == NULL) {
2479			if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
2480				PMAP_UNLOCK(pmap);
2481				rw_wunlock(&pvh_global_lock);
2482				VM_WAIT;
2483				rw_wlock(&pvh_global_lock);
2484				PMAP_LOCK(pmap);
2485			}
2486
2487			/*
2488			 * Indicate the need to retry.  While waiting,
2489			 * the L2 page table page may have been allocated.
2490			 */
2491			return (NULL);
2492		}
2493		pmap->pm_stats.resident_count++;
2494		pt2pg_pa = pmap_pt2pg_init(pmap, va, m);
2495	} else {
2496		pt2pg_pa = pte2_pa(pte2);
2497		m = PHYS_TO_VM_PAGE(pt2pg_pa);
2498	}
2499
2500	pt2_wirecount_inc(m, pte1_idx);
2501	pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx);
2502	pte1_store(pte1p, PTE1_LINK(pt2_pa));
2503
2504	return (m);
2505}
2506
2507static vm_page_t
2508pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags)
2509{
2510	u_int pte1_idx;
2511	pt1_entry_t *pte1p, pte1;
2512	vm_page_t m;
2513
2514	pte1_idx = pte1_index(va);
2515retry:
2516	pte1p = pmap->pm_pt1 + pte1_idx;
2517	pte1 = pte1_load(pte1p);
2518
2519	/*
2520	 * This supports switching from a 1MB page to a
2521	 * normal 4K page.
2522	 */
2523	if (pte1_is_section(pte1)) {
2524		(void)pmap_demote_pte1(pmap, pte1p, va);
2525		/*
2526		 * Reload pte1 after demotion.
2527		 *
2528		 * Note: Demotion can even fail as either PT2 is not find for
2529		 *       the virtual address or PT2PG can not be allocated.
2530		 */
2531		pte1 = pte1_load(pte1p);
2532	}
2533
2534	/*
2535	 * If the L2 page table page is mapped, we just increment the
2536	 * hold count, and activate it.
2537	 */
2538	if (pte1_is_link(pte1)) {
2539		m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
2540		pt2_wirecount_inc(m, pte1_idx);
2541	} else  {
2542		/*
2543		 * Here if the PT2 isn't mapped, or if it has
2544		 * been deallocated.
2545		 */
2546		m = _pmap_allocpte2(pmap, va, flags);
2547		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
2548			goto retry;
2549	}
2550
2551	return (m);
2552}
2553
2554static __inline void
2555pmap_free_zero_pages(struct spglist *free)
2556{
2557	vm_page_t m;
2558
2559	while ((m = SLIST_FIRST(free)) != NULL) {
2560		SLIST_REMOVE_HEAD(free, plinks.s.ss);
2561		/* Preserve the page's PG_ZERO setting. */
2562		vm_page_free_toq(m);
2563	}
2564}
2565
2566/*
2567 *  Schedule the specified unused L2 page table page to be freed. Specifically,
2568 *  add the page to the specified list of pages that will be released to the
2569 *  physical memory manager after the TLB has been updated.
2570 */
2571static __inline void
2572pmap_add_delayed_free_list(vm_page_t m, struct spglist *free)
2573{
2574
2575	/*
2576	 * Put page on a list so that it is released after
2577	 * *ALL* TLB shootdown is done
2578	 */
2579#ifdef PMAP_DEBUG
2580	pmap_zero_page_check(m);
2581#endif
2582	m->flags |= PG_ZERO;
2583	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
2584}
2585
2586/*
2587 *  Unwire L2 page tables page.
2588 */
2589static void
2590pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m)
2591{
2592	pt1_entry_t *pte1p, opte1 __unused;
2593	pt2_entry_t *pte2p;
2594	uint32_t i;
2595
2596	KASSERT(pt2pg_is_empty(m),
2597	    ("%s: pmap %p PT2PG %p wired", __func__, pmap, m));
2598
2599	/*
2600	 * Unmap all L2 page tables in the page from L1 page table.
2601	 *
2602	 * QQQ: Individual L2 page tables (except the last one) can be unmapped
2603	 * earlier. However, we are doing that this way.
2604	 */
2605	KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK),
2606	    ("%s: pmap %p va %#x PT2PG %p bad index", __func__, pmap, va, m));
2607	pte1p = pmap->pm_pt1 + m->pindex;
2608	for (i = 0; i < NPT2_IN_PG; i++, pte1p++) {
2609		KASSERT(m->md.pt2_wirecount[i] == 0,
2610		    ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m));
2611		opte1 = pte1_load(pte1p);
2612		if (pte1_is_link(opte1)) {
2613			pte1_clear(pte1p);
2614			/*
2615			 * Flush intermediate TLB cache.
2616			 */
2617			pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT);
2618		}
2619#ifdef INVARIANTS
2620		else
2621			KASSERT((opte1 == 0) || pte1_is_section(opte1),
2622			    ("%s: pmap %p va %#x bad pte1 %x at %u", __func__,
2623			    pmap, va, opte1, i));
2624#endif
2625	}
2626
2627	/*
2628	 * Unmap the page from PT2TAB.
2629	 */
2630	pte2p = pmap_pt2tab_entry(pmap, va);
2631	(void)pt2tab_load_clear(pte2p);
2632	pmap_tlb_flush(pmap, pt2map_pt2pg(va));
2633
2634	m->wire_count = 0;
2635	pmap->pm_stats.resident_count--;
2636
2637	/*
2638	 * This is a release store so that the ordinary store unmapping
2639	 * the L2 page table page is globally performed before TLB shoot-
2640	 * down is begun.
2641	 */
2642	atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
2643}
2644
2645/*
2646 *  Decrements a L2 page table page's wire count, which is used to record the
2647 *  number of valid page table entries within the page.  If the wire count
2648 *  drops to zero, then the page table page is unmapped.  Returns TRUE if the
2649 *  page table page was unmapped and FALSE otherwise.
2650 */
2651static __inline boolean_t
2652pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2653{
2654	pt2_wirecount_dec(m, pte1_index(va));
2655	if (pt2pg_is_empty(m)) {
2656		/*
2657		 * QQQ: Wire count is zero, so whole page should be zero and
2658		 *      we can set PG_ZERO flag to it.
2659		 *      Note that when promotion is enabled, it takes some
2660		 *      more efforts. See pmap_unwire_pt2_all() below.
2661		 */
2662		pmap_unwire_pt2pg(pmap, va, m);
2663		pmap_add_delayed_free_list(m, free);
2664		return (TRUE);
2665	} else
2666		return (FALSE);
2667}
2668
2669/*
2670 *  Drop a L2 page table page's wire count at once, which is used to record
2671 *  the number of valid L2 page table entries within the page. If the wire
2672 *  count drops to zero, then the L2 page table page is unmapped.
2673 */
2674static __inline void
2675pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m,
2676    struct spglist *free)
2677{
2678	u_int pte1_idx = pte1_index(va);
2679
2680	KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK),
2681		("%s: PT2 page's pindex is wrong", __func__));
2682	KASSERT(m->wire_count > pt2_wirecount_get(m, pte1_idx),
2683	    ("%s: bad pt2 wire count %u > %u", __func__, m->wire_count,
2684	    pt2_wirecount_get(m, pte1_idx)));
2685
2686	/*
2687	 * It's possible that the L2 page table was never used.
2688	 * It happened in case that a section was created without promotion.
2689	 */
2690	if (pt2_is_full(m, va)) {
2691		pt2_wirecount_set(m, pte1_idx, 0);
2692
2693		/*
2694		 * QQQ: We clear L2 page table now, so when L2 page table page
2695		 *      is going to be freed, we can set it PG_ZERO flag ...
2696		 *      This function is called only on section mappings, so
2697		 *      hopefully it's not to big overload.
2698		 *
2699		 * XXX: If pmap is current, existing PT2MAP mapping could be
2700		 *      used for zeroing.
2701		 */
2702		pmap_zero_page_area(m, page_pt2off(pte1_idx), NB_IN_PT2);
2703	}
2704#ifdef INVARIANTS
2705	else
2706		KASSERT(pt2_is_empty(m, va), ("%s: PT2 is not empty (%u)",
2707		    __func__, pt2_wirecount_get(m, pte1_idx)));
2708#endif
2709	if (pt2pg_is_empty(m)) {
2710		pmap_unwire_pt2pg(pmap, va, m);
2711		pmap_add_delayed_free_list(m, free);
2712	}
2713}
2714
2715/*
2716 *  After removing a L2 page table entry, this routine is used to
2717 *  conditionally free the page, and manage the hold/wire counts.
2718 */
2719static boolean_t
2720pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free)
2721{
2722	pt1_entry_t pte1;
2723	vm_page_t mpte;
2724
2725	if (va >= VM_MAXUSER_ADDRESS)
2726		return (FALSE);
2727	pte1 = pte1_load(pmap_pte1(pmap, va));
2728	mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
2729	return (pmap_unwire_pt2(pmap, va, mpte, free));
2730}
2731
2732/*************************************
2733 *
2734 *  Page management routines.
2735 *
2736 *************************************/
2737
2738CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
2739CTASSERT(_NPCM == 11);
2740CTASSERT(_NPCPV == 336);
2741
2742static __inline struct pv_chunk *
2743pv_to_chunk(pv_entry_t pv)
2744{
2745
2746	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
2747}
2748
2749#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
2750
2751#define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
2752#define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
2753
2754static const uint32_t pc_freemask[_NPCM] = {
2755	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
2756	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
2757	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
2758	PC_FREE0_9, PC_FREE10
2759};
2760
2761SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2762	"Current number of pv entries");
2763
2764#ifdef PV_STATS
2765static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2766
2767SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2768    "Current number of pv entry chunks");
2769SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2770    "Current number of pv entry chunks allocated");
2771SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2772    "Current number of pv entry chunks frees");
2773SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail,
2774    0, "Number of times tried to get a chunk page but failed.");
2775
2776static long pv_entry_frees, pv_entry_allocs;
2777static int pv_entry_spare;
2778
2779SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2780    "Current number of pv entry frees");
2781SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs,
2782    0, "Current number of pv entry allocs");
2783SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2784    "Current number of spare pv entries");
2785#endif
2786
2787/*
2788 *  Is given page managed?
2789 */
2790static __inline bool
2791is_managed(vm_paddr_t pa)
2792{
2793	vm_page_t m;
2794
2795	m = PHYS_TO_VM_PAGE(pa);
2796	if (m == NULL)
2797		return (false);
2798	return ((m->oflags & VPO_UNMANAGED) == 0);
2799}
2800
2801static __inline bool
2802pte1_is_managed(pt1_entry_t pte1)
2803{
2804
2805	return (is_managed(pte1_pa(pte1)));
2806}
2807
2808static __inline bool
2809pte2_is_managed(pt2_entry_t pte2)
2810{
2811
2812	return (is_managed(pte2_pa(pte2)));
2813}
2814
2815/*
2816 *  We are in a serious low memory condition.  Resort to
2817 *  drastic measures to free some pages so we can allocate
2818 *  another pv entry chunk.
2819 */
2820static vm_page_t
2821pmap_pv_reclaim(pmap_t locked_pmap)
2822{
2823	struct pch newtail;
2824	struct pv_chunk *pc;
2825	struct md_page *pvh;
2826	pt1_entry_t *pte1p;
2827	pmap_t pmap;
2828	pt2_entry_t *pte2p, tpte2;
2829	pv_entry_t pv;
2830	vm_offset_t va;
2831	vm_page_t m, m_pc;
2832	struct spglist free;
2833	uint32_t inuse;
2834	int bit, field, freed;
2835
2836	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2837	pmap = NULL;
2838	m_pc = NULL;
2839	SLIST_INIT(&free);
2840	TAILQ_INIT(&newtail);
2841	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
2842	    SLIST_EMPTY(&free))) {
2843		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2844		if (pmap != pc->pc_pmap) {
2845			if (pmap != NULL) {
2846				if (pmap != locked_pmap)
2847					PMAP_UNLOCK(pmap);
2848			}
2849			pmap = pc->pc_pmap;
2850			/* Avoid deadlock and lock recursion. */
2851			if (pmap > locked_pmap)
2852				PMAP_LOCK(pmap);
2853			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
2854				pmap = NULL;
2855				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2856				continue;
2857			}
2858		}
2859
2860		/*
2861		 * Destroy every non-wired, 4 KB page mapping in the chunk.
2862		 */
2863		freed = 0;
2864		for (field = 0; field < _NPCM; field++) {
2865			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2866			    inuse != 0; inuse &= ~(1UL << bit)) {
2867				bit = ffs(inuse) - 1;
2868				pv = &pc->pc_pventry[field * 32 + bit];
2869				va = pv->pv_va;
2870				pte1p = pmap_pte1(pmap, va);
2871				if (pte1_is_section(pte1_load(pte1p)))
2872					continue;
2873				pte2p = pmap_pte2(pmap, va);
2874				tpte2 = pte2_load(pte2p);
2875				if ((tpte2 & PTE2_W) == 0)
2876					tpte2 = pte2_load_clear(pte2p);
2877				pmap_pte2_release(pte2p);
2878				if ((tpte2 & PTE2_W) != 0)
2879					continue;
2880				KASSERT(tpte2 != 0,
2881				    ("pmap_pv_reclaim: pmap %p va %#x zero pte",
2882				    pmap, va));
2883				pmap_tlb_flush(pmap, va);
2884				m = PHYS_TO_VM_PAGE(pte2_pa(tpte2));
2885				if (pte2_is_dirty(tpte2))
2886					vm_page_dirty(m);
2887				if ((tpte2 & PTE2_A) != 0)
2888					vm_page_aflag_set(m, PGA_REFERENCED);
2889				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2890				if (TAILQ_EMPTY(&m->md.pv_list) &&
2891				    (m->flags & PG_FICTITIOUS) == 0) {
2892					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2893					if (TAILQ_EMPTY(&pvh->pv_list)) {
2894						vm_page_aflag_clear(m,
2895						    PGA_WRITEABLE);
2896					}
2897				}
2898				pc->pc_map[field] |= 1UL << bit;
2899				pmap_unuse_pt2(pmap, va, &free);
2900				freed++;
2901			}
2902		}
2903		if (freed == 0) {
2904			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2905			continue;
2906		}
2907		/* Every freed mapping is for a 4 KB page. */
2908		pmap->pm_stats.resident_count -= freed;
2909		PV_STAT(pv_entry_frees += freed);
2910		PV_STAT(pv_entry_spare += freed);
2911		pv_entry_count -= freed;
2912		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2913		for (field = 0; field < _NPCM; field++)
2914			if (pc->pc_map[field] != pc_freemask[field]) {
2915				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2916				    pc_list);
2917				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2918
2919				/*
2920				 * One freed pv entry in locked_pmap is
2921				 * sufficient.
2922				 */
2923				if (pmap == locked_pmap)
2924					goto out;
2925				break;
2926			}
2927		if (field == _NPCM) {
2928			PV_STAT(pv_entry_spare -= _NPCPV);
2929			PV_STAT(pc_chunk_count--);
2930			PV_STAT(pc_chunk_frees++);
2931			/* Entire chunk is free; return it. */
2932			m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2933			pmap_qremove((vm_offset_t)pc, 1);
2934			pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc);
2935			break;
2936		}
2937	}
2938out:
2939	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
2940	if (pmap != NULL) {
2941		if (pmap != locked_pmap)
2942			PMAP_UNLOCK(pmap);
2943	}
2944	if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) {
2945		m_pc = SLIST_FIRST(&free);
2946		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2947		/* Recycle a freed page table page. */
2948		m_pc->wire_count = 1;
2949		atomic_add_int(&vm_cnt.v_wire_count, 1);
2950	}
2951	pmap_free_zero_pages(&free);
2952	return (m_pc);
2953}
2954
2955static void
2956free_pv_chunk(struct pv_chunk *pc)
2957{
2958	vm_page_t m;
2959
2960	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2961	PV_STAT(pv_entry_spare -= _NPCPV);
2962	PV_STAT(pc_chunk_count--);
2963	PV_STAT(pc_chunk_frees++);
2964	/* entire chunk is free, return it */
2965	m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2966	pmap_qremove((vm_offset_t)pc, 1);
2967	vm_page_unwire(m, PQ_NONE);
2968	vm_page_free(m);
2969	pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc);
2970}
2971
2972/*
2973 *  Free the pv_entry back to the free list.
2974 */
2975static void
2976free_pv_entry(pmap_t pmap, pv_entry_t pv)
2977{
2978	struct pv_chunk *pc;
2979	int idx, field, bit;
2980
2981	rw_assert(&pvh_global_lock, RA_WLOCKED);
2982	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2983	PV_STAT(pv_entry_frees++);
2984	PV_STAT(pv_entry_spare++);
2985	pv_entry_count--;
2986	pc = pv_to_chunk(pv);
2987	idx = pv - &pc->pc_pventry[0];
2988	field = idx / 32;
2989	bit = idx % 32;
2990	pc->pc_map[field] |= 1ul << bit;
2991	for (idx = 0; idx < _NPCM; idx++)
2992		if (pc->pc_map[idx] != pc_freemask[idx]) {
2993			/*
2994			 * 98% of the time, pc is already at the head of the
2995			 * list.  If it isn't already, move it to the head.
2996			 */
2997			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
2998			    pc)) {
2999				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3000				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
3001				    pc_list);
3002			}
3003			return;
3004		}
3005	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3006	free_pv_chunk(pc);
3007}
3008
3009/*
3010 *  Get a new pv_entry, allocating a block from the system
3011 *  when needed.
3012 */
3013static pv_entry_t
3014get_pv_entry(pmap_t pmap, boolean_t try)
3015{
3016	static const struct timeval printinterval = { 60, 0 };
3017	static struct timeval lastprint;
3018	int bit, field;
3019	pv_entry_t pv;
3020	struct pv_chunk *pc;
3021	vm_page_t m;
3022
3023	rw_assert(&pvh_global_lock, RA_WLOCKED);
3024	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3025	PV_STAT(pv_entry_allocs++);
3026	pv_entry_count++;
3027	if (pv_entry_count > pv_entry_high_water)
3028		if (ratecheck(&lastprint, &printinterval))
3029			printf("Approaching the limit on PV entries, consider "
3030			    "increasing either the vm.pmap.shpgperproc or the "
3031			    "vm.pmap.pv_entry_max tunable.\n");
3032retry:
3033	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3034	if (pc != NULL) {
3035		for (field = 0; field < _NPCM; field++) {
3036			if (pc->pc_map[field]) {
3037				bit = ffs(pc->pc_map[field]) - 1;
3038				break;
3039			}
3040		}
3041		if (field < _NPCM) {
3042			pv = &pc->pc_pventry[field * 32 + bit];
3043			pc->pc_map[field] &= ~(1ul << bit);
3044			/* If this was the last item, move it to tail */
3045			for (field = 0; field < _NPCM; field++)
3046				if (pc->pc_map[field] != 0) {
3047					PV_STAT(pv_entry_spare--);
3048					return (pv);	/* not full, return */
3049				}
3050			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3051			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3052			PV_STAT(pv_entry_spare--);
3053			return (pv);
3054		}
3055	}
3056	/*
3057	 * Access to the pte2list "pv_vafree" is synchronized by the pvh
3058	 * global lock.  If "pv_vafree" is currently non-empty, it will
3059	 * remain non-empty until pmap_pte2list_alloc() completes.
3060	 */
3061	if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
3062	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
3063		if (try) {
3064			pv_entry_count--;
3065			PV_STAT(pc_chunk_tryfail++);
3066			return (NULL);
3067		}
3068		m = pmap_pv_reclaim(pmap);
3069		if (m == NULL)
3070			goto retry;
3071	}
3072	PV_STAT(pc_chunk_count++);
3073	PV_STAT(pc_chunk_allocs++);
3074	pc = (struct pv_chunk *)pmap_pte2list_alloc(&pv_vafree);
3075	pmap_qenter((vm_offset_t)pc, &m, 1);
3076	pc->pc_pmap = pmap;
3077	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
3078	for (field = 1; field < _NPCM; field++)
3079		pc->pc_map[field] = pc_freemask[field];
3080	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
3081	pv = &pc->pc_pventry[0];
3082	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3083	PV_STAT(pv_entry_spare += _NPCPV - 1);
3084	return (pv);
3085}
3086
3087/*
3088 *  Create a pv entry for page at pa for
3089 *  (pmap, va).
3090 */
3091static void
3092pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
3093{
3094	pv_entry_t pv;
3095
3096	rw_assert(&pvh_global_lock, RA_WLOCKED);
3097	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3098	pv = get_pv_entry(pmap, FALSE);
3099	pv->pv_va = va;
3100	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3101}
3102
3103static __inline pv_entry_t
3104pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3105{
3106	pv_entry_t pv;
3107
3108	rw_assert(&pvh_global_lock, RA_WLOCKED);
3109	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3110		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
3111			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3112			break;
3113		}
3114	}
3115	return (pv);
3116}
3117
3118static void
3119pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3120{
3121	pv_entry_t pv;
3122
3123	pv = pmap_pvh_remove(pvh, pmap, va);
3124	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
3125	free_pv_entry(pmap, pv);
3126}
3127
3128static void
3129pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
3130{
3131	struct md_page *pvh;
3132
3133	rw_assert(&pvh_global_lock, RA_WLOCKED);
3134	pmap_pvh_free(&m->md, pmap, va);
3135	if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
3136		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3137		if (TAILQ_EMPTY(&pvh->pv_list))
3138			vm_page_aflag_clear(m, PGA_WRITEABLE);
3139	}
3140}
3141
3142static void
3143pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3144{
3145	struct md_page *pvh;
3146	pv_entry_t pv;
3147	vm_offset_t va_last;
3148	vm_page_t m;
3149
3150	rw_assert(&pvh_global_lock, RA_WLOCKED);
3151	KASSERT((pa & PTE1_OFFSET) == 0,
3152	    ("pmap_pv_demote_pte1: pa is not 1mpage aligned"));
3153
3154	/*
3155	 * Transfer the 1mpage's pv entry for this mapping to the first
3156	 * page's pv list.
3157	 */
3158	pvh = pa_to_pvh(pa);
3159	va = pte1_trunc(va);
3160	pv = pmap_pvh_remove(pvh, pmap, va);
3161	KASSERT(pv != NULL, ("pmap_pv_demote_pte1: pv not found"));
3162	m = PHYS_TO_VM_PAGE(pa);
3163	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3164	/* Instantiate the remaining NPTE2_IN_PT2 - 1 pv entries. */
3165	va_last = va + PTE1_SIZE - PAGE_SIZE;
3166	do {
3167		m++;
3168		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3169		    ("pmap_pv_demote_pte1: page %p is not managed", m));
3170		va += PAGE_SIZE;
3171		pmap_insert_entry(pmap, va, m);
3172	} while (va < va_last);
3173}
3174
3175#if VM_NRESERVLEVEL > 0
3176static void
3177pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3178{
3179	struct md_page *pvh;
3180	pv_entry_t pv;
3181	vm_offset_t va_last;
3182	vm_page_t m;
3183
3184	rw_assert(&pvh_global_lock, RA_WLOCKED);
3185	KASSERT((pa & PTE1_OFFSET) == 0,
3186	    ("pmap_pv_promote_pte1: pa is not 1mpage aligned"));
3187
3188	/*
3189	 * Transfer the first page's pv entry for this mapping to the
3190	 * 1mpage's pv list.  Aside from avoiding the cost of a call
3191	 * to get_pv_entry(), a transfer avoids the possibility that
3192	 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim()
3193	 * removes one of the mappings that is being promoted.
3194	 */
3195	m = PHYS_TO_VM_PAGE(pa);
3196	va = pte1_trunc(va);
3197	pv = pmap_pvh_remove(&m->md, pmap, va);
3198	KASSERT(pv != NULL, ("pmap_pv_promote_pte1: pv not found"));
3199	pvh = pa_to_pvh(pa);
3200	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3201	/* Free the remaining NPTE2_IN_PT2 - 1 pv entries. */
3202	va_last = va + PTE1_SIZE - PAGE_SIZE;
3203	do {
3204		m++;
3205		va += PAGE_SIZE;
3206		pmap_pvh_free(&m->md, pmap, va);
3207	} while (va < va_last);
3208}
3209#endif
3210
3211/*
3212 *  Conditionally create a pv entry.
3213 */
3214static boolean_t
3215pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
3216{
3217	pv_entry_t pv;
3218
3219	rw_assert(&pvh_global_lock, RA_WLOCKED);
3220	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3221	if (pv_entry_count < pv_entry_high_water &&
3222	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
3223		pv->pv_va = va;
3224		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3225		return (TRUE);
3226	} else
3227		return (FALSE);
3228}
3229
3230/*
3231 *  Create the pv entries for each of the pages within a section.
3232 */
3233static boolean_t
3234pmap_pv_insert_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3235{
3236	struct md_page *pvh;
3237	pv_entry_t pv;
3238
3239	rw_assert(&pvh_global_lock, RA_WLOCKED);
3240	if (pv_entry_count < pv_entry_high_water &&
3241	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
3242		pv->pv_va = va;
3243		pvh = pa_to_pvh(pa);
3244		TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3245		return (TRUE);
3246	} else
3247		return (FALSE);
3248}
3249
3250static inline void
3251pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1)
3252{
3253
3254	/* Kill all the small mappings or the big one only. */
3255	if (pte1_is_section(npte1))
3256		pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE);
3257	else
3258		pmap_tlb_flush(pmap, pte1_trunc(va));
3259}
3260
3261/*
3262 *  Update kernel pte1 on all pmaps.
3263 *
3264 *  The following function is called only on one cpu with disabled interrupts.
3265 *  In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way
3266 *  nobody can invoke explicit hardware table walk during the update of pte1.
3267 *  Unsolicited hardware table walk can still happen, invoked by speculative
3268 *  data or instruction prefetch or even by speculative hardware table walk.
3269 *
3270 *  The break-before-make approach should be implemented here. However, it's
3271 *  not so easy to do that for kernel mappings as it would be unhappy to unmap
3272 *  itself unexpectedly but voluntarily.
3273 */
3274static void
3275pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1)
3276{
3277	pmap_t pmap;
3278	pt1_entry_t *pte1p;
3279
3280	/*
3281	 * Get current pmap. Interrupts should be disabled here
3282	 * so PCPU_GET() is done atomically.
3283	 */
3284	pmap = PCPU_GET(curpmap);
3285	if (pmap == NULL)
3286		pmap = kernel_pmap;
3287
3288	/*
3289	 * (1) Change pte1 on current pmap.
3290	 * (2) Flush all obsolete TLB entries on current CPU.
3291	 * (3) Change pte1 on all pmaps.
3292	 * (4) Flush all obsolete TLB entries on all CPUs in SMP case.
3293	 */
3294
3295	pte1p = pmap_pte1(pmap, va);
3296	pte1_store(pte1p, npte1);
3297
3298	/* Kill all the small mappings or the big one only. */
3299	if (pte1_is_section(npte1)) {
3300		pmap_pte1_kern_promotions++;
3301		tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE);
3302	} else {
3303		pmap_pte1_kern_demotions++;
3304		tlb_flush_local(pte1_trunc(va));
3305	}
3306
3307	/*
3308	 * In SMP case, this function is called when all cpus are at smp
3309	 * rendezvous, so there is no need to use 'allpmaps_lock' lock here.
3310	 * In UP case, the function is called with this lock locked.
3311	 */
3312	LIST_FOREACH(pmap, &allpmaps, pm_list) {
3313		pte1p = pmap_pte1(pmap, va);
3314		pte1_store(pte1p, npte1);
3315	}
3316
3317#ifdef SMP
3318	/* Kill all the small mappings or the big one only. */
3319	if (pte1_is_section(npte1))
3320		tlb_flush_range(pte1_trunc(va), PTE1_SIZE);
3321	else
3322		tlb_flush(pte1_trunc(va));
3323#endif
3324}
3325
3326#ifdef SMP
3327struct pte1_action {
3328	vm_offset_t va;
3329	pt1_entry_t npte1;
3330	u_int update;		/* CPU that updates the PTE1 */
3331};
3332
3333static void
3334pmap_update_pte1_action(void *arg)
3335{
3336	struct pte1_action *act = arg;
3337
3338	if (act->update == PCPU_GET(cpuid))
3339		pmap_update_pte1_kernel(act->va, act->npte1);
3340}
3341
3342/*
3343 *  Change pte1 on current pmap.
3344 *  Note that kernel pte1 must be changed on all pmaps.
3345 *
3346 *  According to the architecture reference manual published by ARM,
3347 *  the behaviour is UNPREDICTABLE when two or more TLB entries map the same VA.
3348 *  According to this manual, UNPREDICTABLE behaviours must never happen in
3349 *  a viable system. In contrast, on x86 processors, it is not specified which
3350 *  TLB entry mapping the virtual address will be used, but the MMU doesn't
3351 *  generate a bogus translation the way it does on Cortex-A8 rev 2 (Beaglebone
3352 *  Black).
3353 *
3354 *  It's a problem when either promotion or demotion is being done. The pte1
3355 *  update and appropriate TLB flush must be done atomically in general.
3356 */
3357static void
3358pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va,
3359    pt1_entry_t npte1)
3360{
3361
3362	if (pmap == kernel_pmap) {
3363		struct pte1_action act;
3364
3365		sched_pin();
3366		act.va = va;
3367		act.npte1 = npte1;
3368		act.update = PCPU_GET(cpuid);
3369		smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier,
3370		    pmap_update_pte1_action, NULL, &act);
3371		sched_unpin();
3372	} else {
3373		register_t cspr;
3374
3375		/*
3376		 * Use break-before-make approach for changing userland
3377		 * mappings. It can cause L1 translation aborts on other
3378		 * cores in SMP case. So, special treatment is implemented
3379		 * in pmap_fault(). To reduce the likelihood that another core
3380		 * will be affected by the broken mapping, disable interrupts
3381		 * until the mapping change is completed.
3382		 */
3383		cspr = disable_interrupts(PSR_I | PSR_F);
3384		pte1_clear(pte1p);
3385		pmap_tlb_flush_pte1(pmap, va, npte1);
3386		pte1_store(pte1p, npte1);
3387		restore_interrupts(cspr);
3388	}
3389}
3390#else
3391static void
3392pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va,
3393    pt1_entry_t npte1)
3394{
3395
3396	if (pmap == kernel_pmap) {
3397		mtx_lock_spin(&allpmaps_lock);
3398		pmap_update_pte1_kernel(va, npte1);
3399		mtx_unlock_spin(&allpmaps_lock);
3400	} else {
3401		register_t cspr;
3402
3403		/*
3404		 * Use break-before-make approach for changing userland
3405		 * mappings. It's absolutely safe in UP case when interrupts
3406		 * are disabled.
3407		 */
3408		cspr = disable_interrupts(PSR_I | PSR_F);
3409		pte1_clear(pte1p);
3410		pmap_tlb_flush_pte1(pmap, va, npte1);
3411		pte1_store(pte1p, npte1);
3412		restore_interrupts(cspr);
3413	}
3414}
3415#endif
3416
3417#if VM_NRESERVLEVEL > 0
3418/*
3419 *  Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are
3420 *  within a single page table page (PT2) to a single 1MB page mapping.
3421 *  For promotion to occur, two conditions must be met: (1) the 4KB page
3422 *  mappings must map aligned, contiguous physical memory and (2) the 4KB page
3423 *  mappings must have identical characteristics.
3424 *
3425 *  Managed (PG_MANAGED) mappings within the kernel address space are not
3426 *  promoted.  The reason is that kernel PTE1s are replicated in each pmap but
3427 *  pmap_remove_write(), pmap_clear_modify(), and pmap_clear_reference() only
3428 *  read the PTE1 from the kernel pmap.
3429 */
3430static void
3431pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
3432{
3433	pt1_entry_t npte1;
3434	pt2_entry_t *fpte2p, fpte2, fpte2_fav;
3435	pt2_entry_t *pte2p, pte2;
3436	vm_offset_t pteva __unused;
3437	vm_page_t m __unused;
3438
3439	PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__,
3440	    pmap, va, pte1_load(pte1p), pte1p));
3441
3442	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3443
3444	/*
3445	 * Examine the first PTE2 in the specified PT2. Abort if this PTE2 is
3446	 * either invalid, unused, or does not map the first 4KB physical page
3447	 * within a 1MB page.
3448	 */
3449	fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va));
3450	fpte2 = pte2_load(fpte2p);
3451	if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) !=
3452	    (PTE2_A | PTE2_V)) {
3453		pmap_pte1_p_failures++;
3454		CTR3(KTR_PMAP, "%s: failure(1) for va %#x in pmap %p",
3455		    __func__, va, pmap);
3456		return;
3457	}
3458	if (pte2_is_managed(fpte2) && pmap == kernel_pmap) {
3459		pmap_pte1_p_failures++;
3460		CTR3(KTR_PMAP, "%s: failure(2) for va %#x in pmap %p",
3461		    __func__, va, pmap);
3462		return;
3463	}
3464	if ((fpte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) {
3465		/*
3466		 * When page is not modified, PTE2_RO can be set without
3467		 * a TLB invalidation.
3468		 */
3469		fpte2 |= PTE2_RO;
3470		pte2_store(fpte2p, fpte2);
3471	}
3472
3473	/*
3474	 * Examine each of the other PTE2s in the specified PT2. Abort if this
3475	 * PTE2 maps an unexpected 4KB physical page or does not have identical
3476	 * characteristics to the first PTE2.
3477	 */
3478	fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V));
3479	fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */
3480	for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) {
3481		pte2 = pte2_load(pte2p);
3482		if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) {
3483			pmap_pte1_p_failures++;
3484			CTR3(KTR_PMAP, "%s: failure(3) for va %#x in pmap %p",
3485			    __func__, va, pmap);
3486			return;
3487		}
3488		if ((pte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) {
3489			/*
3490			 * When page is not modified, PTE2_RO can be set
3491			 * without a TLB invalidation. See note above.
3492			 */
3493			pte2 |= PTE2_RO;
3494			pte2_store(pte2p, pte2);
3495			pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET &
3496			    PTE2_FRAME);
3497			CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p",
3498			    __func__, pteva, pmap);
3499		}
3500		if ((pte2 & PTE2_PROMOTE) != (fpte2 & PTE2_PROMOTE)) {
3501			pmap_pte1_p_failures++;
3502			CTR3(KTR_PMAP, "%s: failure(4) for va %#x in pmap %p",
3503			    __func__, va, pmap);
3504			return;
3505		}
3506
3507		fpte2_fav -= PTE2_SIZE;
3508	}
3509	/*
3510	 * The page table page in its current state will stay in PT2TAB
3511	 * until the PTE1 mapping the section is demoted by pmap_demote_pte1()
3512	 * or destroyed by pmap_remove_pte1().
3513	 *
3514	 * Note that L2 page table size is not equal to PAGE_SIZE.
3515	 */
3516	m = PHYS_TO_VM_PAGE(trunc_page(pte1_link_pa(pte1_load(pte1p))));
3517	KASSERT(m >= vm_page_array && m < &vm_page_array[vm_page_array_size],
3518	    ("%s: PT2 page is out of range", __func__));
3519	KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK),
3520	    ("%s: PT2 page's pindex is wrong", __func__));
3521
3522	/*
3523	 * Get pte1 from pte2 format.
3524	 */
3525	npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V;
3526
3527	/*
3528	 * Promote the pv entries.
3529	 */
3530	if (pte2_is_managed(fpte2))
3531		pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1));
3532
3533	/*
3534	 * Promote the mappings.
3535	 */
3536	pmap_change_pte1(pmap, pte1p, va, npte1);
3537
3538	pmap_pte1_promotions++;
3539	CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p",
3540	    __func__, va, pmap);
3541
3542	PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n",
3543	    __func__, pmap, va, npte1, pte1_load(pte1p), pte1p));
3544}
3545#endif /* VM_NRESERVLEVEL > 0 */
3546
3547/*
3548 *  Zero L2 page table page.
3549 */
3550static __inline void
3551pmap_clear_pt2(pt2_entry_t *fpte2p)
3552{
3553	pt2_entry_t *pte2p;
3554
3555	for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++)
3556		pte2_clear(pte2p);
3557
3558}
3559
3560/*
3561 *  Removes a 1MB page mapping from the kernel pmap.
3562 */
3563static void
3564pmap_remove_kernel_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
3565{
3566	vm_page_t m;
3567	uint32_t pte1_idx;
3568	pt2_entry_t *fpte2p;
3569	vm_paddr_t pt2_pa;
3570
3571	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3572	m = pmap_pt2_page(pmap, va);
3573	if (m == NULL)
3574		/*
3575		 * QQQ: Is this function called only on promoted pte1?
3576		 *      We certainly do section mappings directly
3577		 *      (without promotion) in kernel !!!
3578		 */
3579		panic("%s: missing pt2 page", __func__);
3580
3581	pte1_idx = pte1_index(va);
3582
3583	/*
3584	 * Initialize the L2 page table.
3585	 */
3586	fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx);
3587	pmap_clear_pt2(fpte2p);
3588
3589	/*
3590	 * Remove the mapping.
3591	 */
3592	pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(m), pte1_idx);
3593	pmap_kenter_pte1(va, PTE1_LINK(pt2_pa));
3594
3595	/*
3596	 * QQQ: We do not need to invalidate PT2MAP mapping
3597	 * as we did not change it. I.e. the L2 page table page
3598	 * was and still is mapped the same way.
3599	 */
3600}
3601
3602/*
3603 *  Do the things to unmap a section in a process
3604 */
3605static void
3606pmap_remove_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva,
3607    struct spglist *free)
3608{
3609	pt1_entry_t opte1;
3610	struct md_page *pvh;
3611	vm_offset_t eva, va;
3612	vm_page_t m;
3613
3614	PDEBUG(6, printf("%s(%p): va %#x pte1 %#x at %p\n", __func__, pmap, sva,
3615	    pte1_load(pte1p), pte1p));
3616
3617	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3618	KASSERT((sva & PTE1_OFFSET) == 0,
3619	    ("%s: sva is not 1mpage aligned", __func__));
3620
3621	/*
3622	 * Clear and invalidate the mapping. It should occupy one and only TLB
3623	 * entry. So, pmap_tlb_flush() called with aligned address should be
3624	 * sufficient.
3625	 */
3626	opte1 = pte1_load_clear(pte1p);
3627	pmap_tlb_flush(pmap, sva);
3628
3629	if (pte1_is_wired(opte1))
3630		pmap->pm_stats.wired_count -= PTE1_SIZE / PAGE_SIZE;
3631	pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE;
3632	if (pte1_is_managed(opte1)) {
3633		pvh = pa_to_pvh(pte1_pa(opte1));
3634		pmap_pvh_free(pvh, pmap, sva);
3635		eva = sva + PTE1_SIZE;
3636		for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1));
3637		    va < eva; va += PAGE_SIZE, m++) {
3638			if (pte1_is_dirty(opte1))
3639				vm_page_dirty(m);
3640			if (opte1 & PTE1_A)
3641				vm_page_aflag_set(m, PGA_REFERENCED);
3642			if (TAILQ_EMPTY(&m->md.pv_list) &&
3643			    TAILQ_EMPTY(&pvh->pv_list))
3644				vm_page_aflag_clear(m, PGA_WRITEABLE);
3645		}
3646	}
3647	if (pmap == kernel_pmap) {
3648		/*
3649		 * L2 page table(s) can't be removed from kernel map as
3650		 * kernel counts on it (stuff around pmap_growkernel()).
3651		 */
3652		 pmap_remove_kernel_pte1(pmap, pte1p, sva);
3653	} else {
3654		/*
3655		 * Get associated L2 page table page.
3656		 * It's possible that the page was never allocated.
3657		 */
3658		m = pmap_pt2_page(pmap, sva);
3659		if (m != NULL)
3660			pmap_unwire_pt2_all(pmap, sva, m, free);
3661	}
3662}
3663
3664/*
3665 *  Fills L2 page table page with mappings to consecutive physical pages.
3666 */
3667static __inline void
3668pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2)
3669{
3670	pt2_entry_t *pte2p;
3671
3672	for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) {
3673		pte2_store(pte2p, npte2);
3674		npte2 += PTE2_SIZE;
3675	}
3676}
3677
3678/*
3679 *  Tries to demote a 1MB page mapping. If demotion fails, the
3680 *  1MB page mapping is invalidated.
3681 */
3682static boolean_t
3683pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
3684{
3685	pt1_entry_t opte1, npte1;
3686	pt2_entry_t *fpte2p, npte2;
3687	vm_paddr_t pt2pg_pa, pt2_pa;
3688	vm_page_t m;
3689	struct spglist free;
3690	uint32_t pte1_idx, isnew = 0;
3691
3692	PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__,
3693	    pmap, va, pte1_load(pte1p), pte1p));
3694
3695	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3696
3697	opte1 = pte1_load(pte1p);
3698	KASSERT(pte1_is_section(opte1), ("%s: opte1 not a section", __func__));
3699
3700	if ((opte1 & PTE1_A) == 0 || (m = pmap_pt2_page(pmap, va)) == NULL) {
3701		KASSERT(!pte1_is_wired(opte1),
3702		    ("%s: PT2 page for a wired mapping is missing", __func__));
3703
3704		/*
3705		 * Invalidate the 1MB page mapping and return
3706		 * "failure" if the mapping was never accessed or the
3707		 * allocation of the new page table page fails.
3708		 */
3709		if ((opte1 & PTE1_A) == 0 || (m = vm_page_alloc(NULL,
3710		    pte1_index(va) & ~PT2PG_MASK, VM_ALLOC_NOOBJ |
3711		    VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) {
3712			SLIST_INIT(&free);
3713			pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free);
3714			pmap_free_zero_pages(&free);
3715			CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p",
3716			    __func__, va, pmap);
3717			return (FALSE);
3718		}
3719		if (va < VM_MAXUSER_ADDRESS)
3720			pmap->pm_stats.resident_count++;
3721
3722		isnew = 1;
3723
3724		/*
3725		 * We init all L2 page tables in the page even if
3726		 * we are going to change everything for one L2 page
3727		 * table in a while.
3728		 */
3729		pt2pg_pa = pmap_pt2pg_init(pmap, va, m);
3730	} else {
3731		if (va < VM_MAXUSER_ADDRESS) {
3732			if (pt2_is_empty(m, va))
3733				isnew = 1; /* Demoting section w/o promotion. */
3734#ifdef INVARIANTS
3735			else
3736				KASSERT(pt2_is_full(m, va), ("%s: bad PT2 wire"
3737				    " count %u", __func__,
3738				    pt2_wirecount_get(m, pte1_index(va))));
3739#endif
3740		}
3741	}
3742
3743	pt2pg_pa = VM_PAGE_TO_PHYS(m);
3744	pte1_idx = pte1_index(va);
3745	/*
3746	 * If the pmap is current, then the PT2MAP can provide access to
3747	 * the page table page (promoted L2 page tables are not unmapped).
3748	 * Otherwise, temporarily map the L2 page table page (m) into
3749	 * the kernel's address space at either PADDR1 or PADDR2.
3750	 *
3751	 * Note that L2 page table size is not equal to PAGE_SIZE.
3752	 */
3753	if (pmap_is_current(pmap))
3754		fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx);
3755	else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) {
3756		if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) {
3757			pte2_store(PMAP1, PTE2_KPT(pt2pg_pa));
3758#ifdef SMP
3759			PMAP1cpu = PCPU_GET(cpuid);
3760#endif
3761			tlb_flush_local((vm_offset_t)PADDR1);
3762			PMAP1changed++;
3763		} else
3764#ifdef SMP
3765		if (PMAP1cpu != PCPU_GET(cpuid)) {
3766			PMAP1cpu = PCPU_GET(cpuid);
3767			tlb_flush_local((vm_offset_t)PADDR1);
3768			PMAP1changedcpu++;
3769		} else
3770#endif
3771			PMAP1unchanged++;
3772		fpte2p = page_pt2((vm_offset_t)PADDR1, pte1_idx);
3773	} else {
3774		mtx_lock(&PMAP2mutex);
3775		if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) {
3776			pte2_store(PMAP2, PTE2_KPT(pt2pg_pa));
3777			tlb_flush((vm_offset_t)PADDR2);
3778		}
3779		fpte2p = page_pt2((vm_offset_t)PADDR2, pte1_idx);
3780	}
3781	pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx);
3782	npte1 = PTE1_LINK(pt2_pa);
3783
3784	KASSERT((opte1 & PTE1_A) != 0,
3785	    ("%s: opte1 is missing PTE1_A", __func__));
3786	KASSERT((opte1 & (PTE1_NM | PTE1_RO)) != PTE1_NM,
3787	    ("%s: opte1 has PTE1_NM", __func__));
3788
3789	/*
3790	 *  Get pte2 from pte1 format.
3791	*/
3792	npte2 = pte1_pa(opte1) | ATTR_TO_L2(opte1) | PTE2_V;
3793
3794	/*
3795	 * If the L2 page table page is new, initialize it. If the mapping
3796	 * has changed attributes, update the page table entries.
3797	 */
3798	if (isnew != 0) {
3799		pt2_wirecount_set(m, pte1_idx, NPTE2_IN_PT2);
3800		pmap_fill_pt2(fpte2p, npte2);
3801	} else if ((pte2_load(fpte2p) & PTE2_PROMOTE) !=
3802		    (npte2 & PTE2_PROMOTE))
3803		pmap_fill_pt2(fpte2p, npte2);
3804
3805	KASSERT(pte2_pa(pte2_load(fpte2p)) == pte2_pa(npte2),
3806	    ("%s: fpte2p and npte2 map different physical addresses",
3807	    __func__));
3808
3809	if (fpte2p == PADDR2)
3810		mtx_unlock(&PMAP2mutex);
3811
3812	/*
3813	 * Demote the mapping. This pmap is locked. The old PTE1 has
3814	 * PTE1_A set. If the old PTE1 has not PTE1_RO set, it also
3815	 * has not PTE1_NM set. Thus, there is no danger of a race with
3816	 * another processor changing the setting of PTE1_A and/or PTE1_NM
3817	 * between the read above and the store below.
3818	 */
3819	pmap_change_pte1(pmap, pte1p, va, npte1);
3820
3821	/*
3822	 * Demote the pv entry. This depends on the earlier demotion
3823	 * of the mapping. Specifically, the (re)creation of a per-
3824	 * page pv entry might trigger the execution of pmap_pv_reclaim(),
3825	 * which might reclaim a newly (re)created per-page pv entry
3826	 * and destroy the associated mapping. In order to destroy
3827	 * the mapping, the PTE1 must have already changed from mapping
3828	 * the 1mpage to referencing the page table page.
3829	 */
3830	if (pte1_is_managed(opte1))
3831		pmap_pv_demote_pte1(pmap, va, pte1_pa(opte1));
3832
3833	pmap_pte1_demotions++;
3834	CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p",
3835	    __func__, va, pmap);
3836
3837	PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n",
3838	    __func__, pmap, va, npte1, pte1_load(pte1p), pte1p));
3839	return (TRUE);
3840}
3841
3842/*
3843 *	Insert the given physical page (p) at
3844 *	the specified virtual address (v) in the
3845 *	target physical map with the protection requested.
3846 *
3847 *	If specified, the page will be wired down, meaning
3848 *	that the related pte can not be reclaimed.
3849 *
3850 *	NB:  This is the only routine which MAY NOT lazy-evaluate
3851 *	or lose information.  That is, this routine must actually
3852 *	insert this page into the given map NOW.
3853 */
3854int
3855pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3856    u_int flags, int8_t psind)
3857{
3858	pt1_entry_t *pte1p;
3859	pt2_entry_t *pte2p;
3860	pt2_entry_t npte2, opte2;
3861	pv_entry_t pv;
3862	vm_paddr_t opa, pa;
3863	vm_page_t mpte2, om;
3864	boolean_t wired;
3865
3866	va = trunc_page(va);
3867	mpte2 = NULL;
3868	wired = (flags & PMAP_ENTER_WIRED) != 0;
3869
3870	KASSERT(va <= vm_max_kernel_address, ("%s: toobig", __func__));
3871	KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS,
3872	    ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__,
3873	    va));
3874	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
3875		VM_OBJECT_ASSERT_LOCKED(m->object);
3876
3877	rw_wlock(&pvh_global_lock);
3878	PMAP_LOCK(pmap);
3879	sched_pin();
3880
3881	/*
3882	 * In the case that a page table page is not
3883	 * resident, we are creating it here.
3884	 */
3885	if (va < VM_MAXUSER_ADDRESS) {
3886		mpte2 = pmap_allocpte2(pmap, va, flags);
3887		if (mpte2 == NULL) {
3888			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
3889			    ("pmap_allocpte2 failed with sleep allowed"));
3890			sched_unpin();
3891			rw_wunlock(&pvh_global_lock);
3892			PMAP_UNLOCK(pmap);
3893			return (KERN_RESOURCE_SHORTAGE);
3894		}
3895	}
3896	pte1p = pmap_pte1(pmap, va);
3897	if (pte1_is_section(pte1_load(pte1p)))
3898		panic("%s: attempted on 1MB page", __func__);
3899	pte2p = pmap_pte2_quick(pmap, va);
3900	if (pte2p == NULL)
3901		panic("%s: invalid L1 page table entry va=%#x", __func__, va);
3902
3903	om = NULL;
3904	pa = VM_PAGE_TO_PHYS(m);
3905	opte2 = pte2_load(pte2p);
3906	opa = pte2_pa(opte2);
3907	/*
3908	 * Mapping has not changed, must be protection or wiring change.
3909	 */
3910	if (pte2_is_valid(opte2) && (opa == pa)) {
3911		/*
3912		 * Wiring change, just update stats. We don't worry about
3913		 * wiring PT2 pages as they remain resident as long as there
3914		 * are valid mappings in them. Hence, if a user page is wired,
3915		 * the PT2 page will be also.
3916		 */
3917		if (wired && !pte2_is_wired(opte2))
3918			pmap->pm_stats.wired_count++;
3919		else if (!wired && pte2_is_wired(opte2))
3920			pmap->pm_stats.wired_count--;
3921
3922		/*
3923		 * Remove extra pte2 reference
3924		 */
3925		if (mpte2)
3926			pt2_wirecount_dec(mpte2, pte1_index(va));
3927		if (pte2_is_managed(opte2))
3928			om = m;
3929		goto validate;
3930	}
3931
3932	/*
3933	 * QQQ: We think that changing physical address on writeable mapping
3934	 *      is not safe. Well, maybe on kernel address space with correct
3935	 *      locking, it can make a sense. However, we have no idea why
3936	 *      anyone should do that on user address space. Are we wrong?
3937	 */
3938	KASSERT((opa == 0) || (opa == pa) ||
3939	    !pte2_is_valid(opte2) || ((opte2 & PTE2_RO) != 0),
3940	    ("%s: pmap %p va %#x(%#x) opa %#x pa %#x - gotcha %#x %#x!",
3941	    __func__, pmap, va, opte2, opa, pa, flags, prot));
3942
3943	pv = NULL;
3944
3945	/*
3946	 * Mapping has changed, invalidate old range and fall through to
3947	 * handle validating new mapping.
3948	 */
3949	if (opa) {
3950		if (pte2_is_wired(opte2))
3951			pmap->pm_stats.wired_count--;
3952		if (pte2_is_managed(opte2)) {
3953			om = PHYS_TO_VM_PAGE(opa);
3954			pv = pmap_pvh_remove(&om->md, pmap, va);
3955		}
3956		/*
3957		 * Remove extra pte2 reference
3958		 */
3959		if (mpte2 != NULL)
3960			pt2_wirecount_dec(mpte2, va >> PTE1_SHIFT);
3961	} else
3962		pmap->pm_stats.resident_count++;
3963
3964	/*
3965	 * Enter on the PV list if part of our managed memory.
3966	 */
3967	if ((m->oflags & VPO_UNMANAGED) == 0) {
3968		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
3969		    ("%s: managed mapping within the clean submap", __func__));
3970		if (pv == NULL)
3971			pv = get_pv_entry(pmap, FALSE);
3972		pv->pv_va = va;
3973		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3974	} else if (pv != NULL)
3975		free_pv_entry(pmap, pv);
3976
3977	/*
3978	 * Increment counters
3979	 */
3980	if (wired)
3981		pmap->pm_stats.wired_count++;
3982
3983validate:
3984	/*
3985	 * Now validate mapping with desired protection/wiring.
3986	 */
3987	npte2 = PTE2(pa, PTE2_NM, vm_page_pte2_attr(m));
3988	if (prot & VM_PROT_WRITE) {
3989		if (pte2_is_managed(npte2))
3990			vm_page_aflag_set(m, PGA_WRITEABLE);
3991	}
3992	else
3993		npte2 |= PTE2_RO;
3994	if ((prot & VM_PROT_EXECUTE) == 0)
3995		npte2 |= PTE2_NX;
3996	if (wired)
3997		npte2 |= PTE2_W;
3998	if (va < VM_MAXUSER_ADDRESS)
3999		npte2 |= PTE2_U;
4000	if (pmap != kernel_pmap)
4001		npte2 |= PTE2_NG;
4002
4003	/*
4004	 * If the mapping or permission bits are different, we need
4005	 * to update the pte2.
4006	 *
4007	 * QQQ: Think again and again what to do
4008	 *      if the mapping is going to be changed!
4009	 */
4010	if ((opte2 & ~(PTE2_NM | PTE2_A)) != (npte2 & ~(PTE2_NM | PTE2_A))) {
4011		/*
4012		 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA
4013		 * is set. Do it now, before the mapping is stored and made
4014		 * valid for hardware table walk. If done later, there is a race
4015		 * for other threads of current process in lazy loading case.
4016		 * Don't do it for kernel memory which is mapped with exec
4017		 * permission even if the memory isn't going to hold executable
4018		 * code. The only time when icache sync is needed is after
4019		 * kernel module is loaded and the relocation info is processed.
4020		 * And it's done in elf_cpu_load_file().
4021		 *
4022		 * QQQ: (1) Does it exist any better way where
4023		 *          or how to sync icache?
4024		 *      (2) Now, we do it on a page basis.
4025		 */
4026		if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
4027		    m->md.pat_mode == VM_MEMATTR_WB_WA &&
4028		    (opa != pa || (opte2 & PTE2_NX)))
4029			cache_icache_sync_fresh(va, pa, PAGE_SIZE);
4030
4031		npte2 |= PTE2_A;
4032		if (flags & VM_PROT_WRITE)
4033			npte2 &= ~PTE2_NM;
4034		if (opte2 & PTE2_V) {
4035			/* Change mapping with break-before-make approach. */
4036			opte2 = pte2_load_clear(pte2p);
4037			pmap_tlb_flush(pmap, va);
4038			pte2_store(pte2p, npte2);
4039			if (opte2 & PTE2_A) {
4040				if (pte2_is_managed(opte2))
4041					vm_page_aflag_set(om, PGA_REFERENCED);
4042			}
4043			if (pte2_is_dirty(opte2)) {
4044				if (pte2_is_managed(opte2))
4045					vm_page_dirty(om);
4046			}
4047			if (pte2_is_managed(opte2) &&
4048			    TAILQ_EMPTY(&om->md.pv_list) &&
4049			    ((om->flags & PG_FICTITIOUS) != 0 ||
4050			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
4051				vm_page_aflag_clear(om, PGA_WRITEABLE);
4052		} else
4053			pte2_store(pte2p, npte2);
4054	}
4055#if 0
4056	else {
4057		/*
4058		 * QQQ: In time when both access and not mofified bits are
4059		 *      emulated by software, this should not happen. Some
4060		 *      analysis is need, if this really happen. Missing
4061		 *      tlb flush somewhere could be the reason.
4062		 */
4063		panic("%s: pmap %p va %#x opte2 %x npte2 %x !!", __func__, pmap,
4064		    va, opte2, npte2);
4065	}
4066#endif
4067
4068#if VM_NRESERVLEVEL > 0
4069	/*
4070	 * If both the L2 page table page and the reservation are fully
4071	 * populated, then attempt promotion.
4072	 */
4073	if ((mpte2 == NULL || pt2_is_full(mpte2, va)) &&
4074	    sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
4075	    vm_reserv_level_iffullpop(m) == 0)
4076		pmap_promote_pte1(pmap, pte1p, va);
4077#endif
4078	sched_unpin();
4079	rw_wunlock(&pvh_global_lock);
4080	PMAP_UNLOCK(pmap);
4081	return (KERN_SUCCESS);
4082}
4083
4084/*
4085 *  Do the things to unmap a page in a process.
4086 */
4087static int
4088pmap_remove_pte2(pmap_t pmap, pt2_entry_t *pte2p, vm_offset_t va,
4089    struct spglist *free)
4090{
4091	pt2_entry_t opte2;
4092	vm_page_t m;
4093
4094	rw_assert(&pvh_global_lock, RA_WLOCKED);
4095	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4096
4097	/* Clear and invalidate the mapping. */
4098	opte2 = pte2_load_clear(pte2p);
4099	pmap_tlb_flush(pmap, va);
4100
4101	KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %#x not link pte2 %#x",
4102	    __func__, pmap, va, opte2));
4103
4104	if (opte2 & PTE2_W)
4105		pmap->pm_stats.wired_count -= 1;
4106	pmap->pm_stats.resident_count -= 1;
4107	if (pte2_is_managed(opte2)) {
4108		m = PHYS_TO_VM_PAGE(pte2_pa(opte2));
4109		if (pte2_is_dirty(opte2))
4110			vm_page_dirty(m);
4111		if (opte2 & PTE2_A)
4112			vm_page_aflag_set(m, PGA_REFERENCED);
4113		pmap_remove_entry(pmap, m, va);
4114	}
4115	return (pmap_unuse_pt2(pmap, va, free));
4116}
4117
4118/*
4119 *  Remove a single page from a process address space.
4120 */
4121static void
4122pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
4123{
4124	pt2_entry_t *pte2p;
4125
4126	rw_assert(&pvh_global_lock, RA_WLOCKED);
4127	KASSERT(curthread->td_pinned > 0,
4128	    ("%s: curthread not pinned", __func__));
4129	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4130	if ((pte2p = pmap_pte2_quick(pmap, va)) == NULL ||
4131	    !pte2_is_valid(pte2_load(pte2p)))
4132		return;
4133	pmap_remove_pte2(pmap, pte2p, va, free);
4134}
4135
4136/*
4137 *  Remove the given range of addresses from the specified map.
4138 *
4139 *  It is assumed that the start and end are properly
4140 *  rounded to the page size.
4141 */
4142void
4143pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4144{
4145	vm_offset_t nextva;
4146	pt1_entry_t *pte1p, pte1;
4147	pt2_entry_t *pte2p, pte2;
4148	struct spglist free;
4149
4150	/*
4151	 * Perform an unsynchronized read. This is, however, safe.
4152	 */
4153	if (pmap->pm_stats.resident_count == 0)
4154		return;
4155
4156	SLIST_INIT(&free);
4157
4158	rw_wlock(&pvh_global_lock);
4159	sched_pin();
4160	PMAP_LOCK(pmap);
4161
4162	/*
4163	 * Special handling of removing one page. A very common
4164	 * operation and easy to short circuit some code.
4165	 */
4166	if (sva + PAGE_SIZE == eva) {
4167		pte1 = pte1_load(pmap_pte1(pmap, sva));
4168		if (pte1_is_link(pte1)) {
4169			pmap_remove_page(pmap, sva, &free);
4170			goto out;
4171		}
4172	}
4173
4174	for (; sva < eva; sva = nextva) {
4175		/*
4176		 * Calculate address for next L2 page table.
4177		 */
4178		nextva = pte1_trunc(sva + PTE1_SIZE);
4179		if (nextva < sva)
4180			nextva = eva;
4181		if (pmap->pm_stats.resident_count == 0)
4182			break;
4183
4184		pte1p = pmap_pte1(pmap, sva);
4185		pte1 = pte1_load(pte1p);
4186
4187		/*
4188		 * Weed out invalid mappings. Note: we assume that the L1 page
4189		 * table is always allocated, and in kernel virtual.
4190		 */
4191		if (pte1 == 0)
4192			continue;
4193
4194		if (pte1_is_section(pte1)) {
4195			/*
4196			 * Are we removing the entire large page?  If not,
4197			 * demote the mapping and fall through.
4198			 */
4199			if (sva + PTE1_SIZE == nextva && eva >= nextva) {
4200				pmap_remove_pte1(pmap, pte1p, sva, &free);
4201				continue;
4202			} else if (!pmap_demote_pte1(pmap, pte1p, sva)) {
4203				/* The large page mapping was destroyed. */
4204				continue;
4205			}
4206#ifdef INVARIANTS
4207			else {
4208				/* Update pte1 after demotion. */
4209				pte1 = pte1_load(pte1p);
4210			}
4211#endif
4212		}
4213
4214		KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p"
4215		    " is not link", __func__, pmap, sva, pte1, pte1p));
4216
4217		/*
4218		 * Limit our scan to either the end of the va represented
4219		 * by the current L2 page table page, or to the end of the
4220		 * range being removed.
4221		 */
4222		if (nextva > eva)
4223			nextva = eva;
4224
4225		for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva;
4226		    pte2p++, sva += PAGE_SIZE) {
4227			pte2 = pte2_load(pte2p);
4228			if (!pte2_is_valid(pte2))
4229				continue;
4230			if (pmap_remove_pte2(pmap, pte2p, sva, &free))
4231				break;
4232		}
4233	}
4234out:
4235	sched_unpin();
4236	rw_wunlock(&pvh_global_lock);
4237	PMAP_UNLOCK(pmap);
4238	pmap_free_zero_pages(&free);
4239}
4240
4241/*
4242 *	Routine:	pmap_remove_all
4243 *	Function:
4244 *		Removes this physical page from
4245 *		all physical maps in which it resides.
4246 *		Reflects back modify bits to the pager.
4247 *
4248 *	Notes:
4249 *		Original versions of this routine were very
4250 *		inefficient because they iteratively called
4251 *		pmap_remove (slow...)
4252 */
4253
4254void
4255pmap_remove_all(vm_page_t m)
4256{
4257	struct md_page *pvh;
4258	pv_entry_t pv;
4259	pmap_t pmap;
4260	pt2_entry_t *pte2p, opte2;
4261	pt1_entry_t *pte1p;
4262	vm_offset_t va;
4263	struct spglist free;
4264
4265	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4266	    ("%s: page %p is not managed", __func__, m));
4267	SLIST_INIT(&free);
4268	rw_wlock(&pvh_global_lock);
4269	sched_pin();
4270	if ((m->flags & PG_FICTITIOUS) != 0)
4271		goto small_mappings;
4272	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4273	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
4274		va = pv->pv_va;
4275		pmap = PV_PMAP(pv);
4276		PMAP_LOCK(pmap);
4277		pte1p = pmap_pte1(pmap, va);
4278		(void)pmap_demote_pte1(pmap, pte1p, va);
4279		PMAP_UNLOCK(pmap);
4280	}
4281small_mappings:
4282	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4283		pmap = PV_PMAP(pv);
4284		PMAP_LOCK(pmap);
4285		pmap->pm_stats.resident_count--;
4286		pte1p = pmap_pte1(pmap, pv->pv_va);
4287		KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found "
4288		    "a 1mpage in page %p's pv list", __func__, m));
4289		pte2p = pmap_pte2_quick(pmap, pv->pv_va);
4290		opte2 = pte2_load_clear(pte2p);
4291		pmap_tlb_flush(pmap, pv->pv_va);
4292		KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %x zero pte2",
4293		    __func__, pmap, pv->pv_va));
4294		if (pte2_is_wired(opte2))
4295			pmap->pm_stats.wired_count--;
4296		if (opte2 & PTE2_A)
4297			vm_page_aflag_set(m, PGA_REFERENCED);
4298
4299		/*
4300		 * Update the vm_page_t clean and reference bits.
4301		 */
4302		if (pte2_is_dirty(opte2))
4303			vm_page_dirty(m);
4304		pmap_unuse_pt2(pmap, pv->pv_va, &free);
4305		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4306		free_pv_entry(pmap, pv);
4307		PMAP_UNLOCK(pmap);
4308	}
4309	vm_page_aflag_clear(m, PGA_WRITEABLE);
4310	sched_unpin();
4311	rw_wunlock(&pvh_global_lock);
4312	pmap_free_zero_pages(&free);
4313}
4314
4315/*
4316 *  Just subroutine for pmap_remove_pages() to reasonably satisfy
4317 *  good coding style, a.k.a. 80 character line width limit hell.
4318 */
4319static __inline void
4320pmap_remove_pte1_quick(pmap_t pmap, pt1_entry_t pte1, pv_entry_t pv,
4321    struct spglist *free)
4322{
4323	vm_paddr_t pa;
4324	vm_page_t m, mt, mpt2pg;
4325	struct md_page *pvh;
4326
4327	pa = pte1_pa(pte1);
4328	m = PHYS_TO_VM_PAGE(pa);
4329
4330	KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x",
4331	    __func__, m, m->phys_addr, pa));
4332	KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4333	    m < &vm_page_array[vm_page_array_size],
4334	    ("%s: bad pte1 %#x", __func__, pte1));
4335
4336	if (pte1_is_dirty(pte1)) {
4337		for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++)
4338			vm_page_dirty(mt);
4339	}
4340
4341	pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE;
4342	pvh = pa_to_pvh(pa);
4343	TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4344	if (TAILQ_EMPTY(&pvh->pv_list)) {
4345		for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++)
4346			if (TAILQ_EMPTY(&mt->md.pv_list))
4347				vm_page_aflag_clear(mt, PGA_WRITEABLE);
4348	}
4349	mpt2pg = pmap_pt2_page(pmap, pv->pv_va);
4350	if (mpt2pg != NULL)
4351		pmap_unwire_pt2_all(pmap, pv->pv_va, mpt2pg, free);
4352}
4353
4354/*
4355 *  Just subroutine for pmap_remove_pages() to reasonably satisfy
4356 *  good coding style, a.k.a. 80 character line width limit hell.
4357 */
4358static __inline void
4359pmap_remove_pte2_quick(pmap_t pmap, pt2_entry_t pte2, pv_entry_t pv,
4360    struct spglist *free)
4361{
4362	vm_paddr_t pa;
4363	vm_page_t m;
4364	struct md_page *pvh;
4365
4366	pa = pte2_pa(pte2);
4367	m = PHYS_TO_VM_PAGE(pa);
4368
4369	KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x",
4370	    __func__, m, m->phys_addr, pa));
4371	KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4372	    m < &vm_page_array[vm_page_array_size],
4373	    ("%s: bad pte2 %#x", __func__, pte2));
4374
4375	if (pte2_is_dirty(pte2))
4376		vm_page_dirty(m);
4377
4378	pmap->pm_stats.resident_count--;
4379	TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4380	if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
4381		pvh = pa_to_pvh(pa);
4382		if (TAILQ_EMPTY(&pvh->pv_list))
4383			vm_page_aflag_clear(m, PGA_WRITEABLE);
4384	}
4385	pmap_unuse_pt2(pmap, pv->pv_va, free);
4386}
4387
4388/*
4389 *  Remove all pages from specified address space this aids process
4390 *  exit speeds. Also, this code is special cased for current process
4391 *  only, but can have the more generic (and slightly slower) mode enabled.
4392 *  This is much faster than pmap_remove in the case of running down
4393 *  an entire address space.
4394 */
4395void
4396pmap_remove_pages(pmap_t pmap)
4397{
4398	pt1_entry_t *pte1p, pte1;
4399	pt2_entry_t *pte2p, pte2;
4400	pv_entry_t pv;
4401	struct pv_chunk *pc, *npc;
4402	struct spglist free;
4403	int field, idx;
4404	int32_t bit;
4405	uint32_t inuse, bitmask;
4406	boolean_t allfree;
4407
4408	/*
4409	 * Assert that the given pmap is only active on the current
4410	 * CPU.  Unfortunately, we cannot block another CPU from
4411	 * activating the pmap while this function is executing.
4412	 */
4413	KASSERT(pmap == vmspace_pmap(curthread->td_proc->p_vmspace),
4414	    ("%s: non-current pmap %p", __func__, pmap));
4415#if defined(SMP) && defined(INVARIANTS)
4416	{
4417		cpuset_t other_cpus;
4418
4419		sched_pin();
4420		other_cpus = pmap->pm_active;
4421		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
4422		sched_unpin();
4423		KASSERT(CPU_EMPTY(&other_cpus),
4424		    ("%s: pmap %p active on other cpus", __func__, pmap));
4425	}
4426#endif
4427	SLIST_INIT(&free);
4428	rw_wlock(&pvh_global_lock);
4429	PMAP_LOCK(pmap);
4430	sched_pin();
4431	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4432		KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p",
4433		    __func__, pmap, pc->pc_pmap));
4434		allfree = TRUE;
4435		for (field = 0; field < _NPCM; field++) {
4436			inuse = (~(pc->pc_map[field])) & pc_freemask[field];
4437			while (inuse != 0) {
4438				bit = ffs(inuse) - 1;
4439				bitmask = 1UL << bit;
4440				idx = field * 32 + bit;
4441				pv = &pc->pc_pventry[idx];
4442				inuse &= ~bitmask;
4443
4444				/*
4445				 * Note that we cannot remove wired pages
4446				 * from a process' mapping at this time
4447				 */
4448				pte1p = pmap_pte1(pmap, pv->pv_va);
4449				pte1 = pte1_load(pte1p);
4450				if (pte1_is_section(pte1)) {
4451					if (pte1_is_wired(pte1))  {
4452						allfree = FALSE;
4453						continue;
4454					}
4455					pte1_clear(pte1p);
4456					pmap_remove_pte1_quick(pmap, pte1, pv,
4457					    &free);
4458				}
4459				else if (pte1_is_link(pte1)) {
4460					pte2p = pt2map_entry(pv->pv_va);
4461					pte2 = pte2_load(pte2p);
4462
4463					if (!pte2_is_valid(pte2)) {
4464						printf("%s: pmap %p va %#x "
4465						    "pte2 %#x\n", __func__,
4466						    pmap, pv->pv_va, pte2);
4467						panic("bad pte2");
4468					}
4469
4470					if (pte2_is_wired(pte2))   {
4471						allfree = FALSE;
4472						continue;
4473					}
4474					pte2_clear(pte2p);
4475					pmap_remove_pte2_quick(pmap, pte2, pv,
4476					    &free);
4477				} else {
4478					printf("%s: pmap %p va %#x pte1 %#x\n",
4479					    __func__, pmap, pv->pv_va, pte1);
4480					panic("bad pte1");
4481				}
4482
4483				/* Mark free */
4484				PV_STAT(pv_entry_frees++);
4485				PV_STAT(pv_entry_spare++);
4486				pv_entry_count--;
4487				pc->pc_map[field] |= bitmask;
4488			}
4489		}
4490		if (allfree) {
4491			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4492			free_pv_chunk(pc);
4493		}
4494	}
4495	tlb_flush_all_ng_local();
4496	sched_unpin();
4497	rw_wunlock(&pvh_global_lock);
4498	PMAP_UNLOCK(pmap);
4499	pmap_free_zero_pages(&free);
4500}
4501
4502/*
4503 *  This code makes some *MAJOR* assumptions:
4504 *  1. Current pmap & pmap exists.
4505 *  2. Not wired.
4506 *  3. Read access.
4507 *  4. No L2 page table pages.
4508 *  but is *MUCH* faster than pmap_enter...
4509 */
4510static vm_page_t
4511pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
4512    vm_prot_t prot, vm_page_t mpt2pg)
4513{
4514	pt2_entry_t *pte2p, pte2;
4515	vm_paddr_t pa;
4516	struct spglist free;
4517	uint32_t l2prot;
4518
4519	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
4520	    (m->oflags & VPO_UNMANAGED) != 0,
4521	    ("%s: managed mapping within the clean submap", __func__));
4522	rw_assert(&pvh_global_lock, RA_WLOCKED);
4523	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4524
4525	/*
4526	 * In the case that a L2 page table page is not
4527	 * resident, we are creating it here.
4528	 */
4529	if (va < VM_MAXUSER_ADDRESS) {
4530		u_int pte1_idx;
4531		pt1_entry_t pte1, *pte1p;
4532		vm_paddr_t pt2_pa;
4533
4534		/*
4535		 * Get L1 page table things.
4536		 */
4537		pte1_idx = pte1_index(va);
4538		pte1p = pmap_pte1(pmap, va);
4539		pte1 = pte1_load(pte1p);
4540
4541		if (mpt2pg && (mpt2pg->pindex == (pte1_idx & ~PT2PG_MASK))) {
4542			/*
4543			 * Each of NPT2_IN_PG L2 page tables on the page can
4544			 * come here. Make sure that associated L1 page table
4545			 * link is established.
4546			 *
4547			 * QQQ: It comes that we don't establish all links to
4548			 *      L2 page tables for newly allocated L2 page
4549			 *      tables page.
4550			 */
4551			KASSERT(!pte1_is_section(pte1),
4552			    ("%s: pte1 %#x is section", __func__, pte1));
4553			if (!pte1_is_link(pte1)) {
4554				pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(mpt2pg),
4555				    pte1_idx);
4556				pte1_store(pte1p, PTE1_LINK(pt2_pa));
4557			}
4558			pt2_wirecount_inc(mpt2pg, pte1_idx);
4559		} else {
4560			/*
4561			 * If the L2 page table page is mapped, we just
4562			 * increment the hold count, and activate it.
4563			 */
4564			if (pte1_is_section(pte1)) {
4565				return (NULL);
4566			} else if (pte1_is_link(pte1)) {
4567				mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
4568				pt2_wirecount_inc(mpt2pg, pte1_idx);
4569			} else {
4570				mpt2pg = _pmap_allocpte2(pmap, va,
4571				    PMAP_ENTER_NOSLEEP);
4572				if (mpt2pg == NULL)
4573					return (NULL);
4574			}
4575		}
4576	} else {
4577		mpt2pg = NULL;
4578	}
4579
4580	/*
4581	 * This call to pt2map_entry() makes the assumption that we are
4582	 * entering the page into the current pmap.  In order to support
4583	 * quick entry into any pmap, one would likely use pmap_pte2_quick().
4584	 * But that isn't as quick as pt2map_entry().
4585	 */
4586	pte2p = pt2map_entry(va);
4587	pte2 = pte2_load(pte2p);
4588	if (pte2_is_valid(pte2)) {
4589		if (mpt2pg != NULL) {
4590			/*
4591			 * Remove extra pte2 reference
4592			 */
4593			pt2_wirecount_dec(mpt2pg, pte1_index(va));
4594			mpt2pg = NULL;
4595		}
4596		return (NULL);
4597	}
4598
4599	/*
4600	 * Enter on the PV list if part of our managed memory.
4601	 */
4602	if ((m->oflags & VPO_UNMANAGED) == 0 &&
4603	    !pmap_try_insert_pv_entry(pmap, va, m)) {
4604		if (mpt2pg != NULL) {
4605			SLIST_INIT(&free);
4606			if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) {
4607				pmap_tlb_flush(pmap, va);
4608				pmap_free_zero_pages(&free);
4609			}
4610
4611			mpt2pg = NULL;
4612		}
4613		return (NULL);
4614	}
4615
4616	/*
4617	 * Increment counters
4618	 */
4619	pmap->pm_stats.resident_count++;
4620
4621	/*
4622	 * Now validate mapping with RO protection
4623	 */
4624	pa = VM_PAGE_TO_PHYS(m);
4625	l2prot = PTE2_RO | PTE2_NM;
4626	if (va < VM_MAXUSER_ADDRESS)
4627		l2prot |= PTE2_U | PTE2_NG;
4628	if ((prot & VM_PROT_EXECUTE) == 0)
4629		l2prot |= PTE2_NX;
4630	else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) {
4631		/*
4632		 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA
4633		 * is set. QQQ: For more info, see comments in pmap_enter().
4634		 */
4635		cache_icache_sync_fresh(va, pa, PAGE_SIZE);
4636	}
4637	pte2_store(pte2p, PTE2(pa, l2prot, vm_page_pte2_attr(m)));
4638
4639	return (mpt2pg);
4640}
4641
4642void
4643pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
4644{
4645
4646	rw_wlock(&pvh_global_lock);
4647	PMAP_LOCK(pmap);
4648	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
4649	rw_wunlock(&pvh_global_lock);
4650	PMAP_UNLOCK(pmap);
4651}
4652
4653/*
4654 *  Tries to create 1MB page mapping.  Returns TRUE if successful and
4655 *  FALSE otherwise.  Fails if (1) a page table page cannot be allocated without
4656 *  blocking, (2) a mapping already exists at the specified virtual address, or
4657 *  (3) a pv entry cannot be allocated without reclaiming another pv entry.
4658 */
4659static boolean_t
4660pmap_enter_pte1(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
4661{
4662	pt1_entry_t *pte1p;
4663	vm_paddr_t pa;
4664	uint32_t l1prot;
4665
4666	rw_assert(&pvh_global_lock, RA_WLOCKED);
4667	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4668	pte1p = pmap_pte1(pmap, va);
4669	if (pte1_is_valid(pte1_load(pte1p))) {
4670		CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", __func__,
4671		    va, pmap);
4672		return (FALSE);
4673	}
4674	if ((m->oflags & VPO_UNMANAGED) == 0) {
4675		/*
4676		 * Abort this mapping if its PV entry could not be created.
4677		 */
4678		if (!pmap_pv_insert_pte1(pmap, va, VM_PAGE_TO_PHYS(m))) {
4679			CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p",
4680			    __func__, va, pmap);
4681			return (FALSE);
4682		}
4683	}
4684	/*
4685	 * Increment counters.
4686	 */
4687	pmap->pm_stats.resident_count += PTE1_SIZE / PAGE_SIZE;
4688
4689	/*
4690	 * Map the section.
4691	 *
4692	 * QQQ: Why VM_PROT_WRITE is not evaluated and the mapping is
4693	 *      made readonly?
4694	 */
4695	pa = VM_PAGE_TO_PHYS(m);
4696	l1prot = PTE1_RO | PTE1_NM;
4697	if (va < VM_MAXUSER_ADDRESS)
4698		l1prot |= PTE1_U | PTE1_NG;
4699	if ((prot & VM_PROT_EXECUTE) == 0)
4700		l1prot |= PTE1_NX;
4701	else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) {
4702		/*
4703		 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA
4704		 * is set. QQQ: For more info, see comments in pmap_enter().
4705		 */
4706		cache_icache_sync_fresh(va, pa, PTE1_SIZE);
4707	}
4708	pte1_store(pte1p, PTE1(pa, l1prot, ATTR_TO_L1(vm_page_pte2_attr(m))));
4709
4710	pmap_pte1_mappings++;
4711	CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va,
4712	    pmap);
4713	return (TRUE);
4714}
4715
4716/*
4717 *  Maps a sequence of resident pages belonging to the same object.
4718 *  The sequence begins with the given page m_start.  This page is
4719 *  mapped at the given virtual address start.  Each subsequent page is
4720 *  mapped at a virtual address that is offset from start by the same
4721 *  amount as the page is offset from m_start within the object.  The
4722 *  last page in the sequence is the page with the largest offset from
4723 *  m_start that can be mapped at a virtual address less than the given
4724 *  virtual address end.  Not every virtual page between start and end
4725 *  is mapped; only those for which a resident page exists with the
4726 *  corresponding offset from m_start are mapped.
4727 */
4728void
4729pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
4730    vm_page_t m_start, vm_prot_t prot)
4731{
4732	vm_offset_t va;
4733	vm_page_t m, mpt2pg;
4734	vm_pindex_t diff, psize;
4735
4736	PDEBUG(6, printf("%s: pmap %p start %#x end  %#x m %p prot %#x\n",
4737	    __func__, pmap, start, end, m_start, prot));
4738
4739	VM_OBJECT_ASSERT_LOCKED(m_start->object);
4740	psize = atop(end - start);
4741	mpt2pg = NULL;
4742	m = m_start;
4743	rw_wlock(&pvh_global_lock);
4744	PMAP_LOCK(pmap);
4745	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
4746		va = start + ptoa(diff);
4747		if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end &&
4748		    m->psind == 1 && sp_enabled &&
4749		    pmap_enter_pte1(pmap, va, m, prot))
4750			m = &m[PTE1_SIZE / PAGE_SIZE - 1];
4751		else
4752			mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot,
4753			    mpt2pg);
4754		m = TAILQ_NEXT(m, listq);
4755	}
4756	rw_wunlock(&pvh_global_lock);
4757	PMAP_UNLOCK(pmap);
4758}
4759
4760/*
4761 *  This code maps large physical mmap regions into the
4762 *  processor address space.  Note that some shortcuts
4763 *  are taken, but the code works.
4764 */
4765void
4766pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
4767    vm_pindex_t pindex, vm_size_t size)
4768{
4769	pt1_entry_t *pte1p;
4770	vm_paddr_t pa, pte2_pa;
4771	vm_page_t p;
4772	vm_memattr_t pat_mode;
4773	u_int l1attr, l1prot;
4774
4775	VM_OBJECT_ASSERT_WLOCKED(object);
4776	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4777	    ("%s: non-device object", __func__));
4778	if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) {
4779		if (!vm_object_populate(object, pindex, pindex + atop(size)))
4780			return;
4781		p = vm_page_lookup(object, pindex);
4782		KASSERT(p->valid == VM_PAGE_BITS_ALL,
4783		    ("%s: invalid page %p", __func__, p));
4784		pat_mode = p->md.pat_mode;
4785
4786		/*
4787		 * Abort the mapping if the first page is not physically
4788		 * aligned to a 1MB page boundary.
4789		 */
4790		pte2_pa = VM_PAGE_TO_PHYS(p);
4791		if (pte2_pa & PTE1_OFFSET)
4792			return;
4793
4794		/*
4795		 * Skip the first page. Abort the mapping if the rest of
4796		 * the pages are not physically contiguous or have differing
4797		 * memory attributes.
4798		 */
4799		p = TAILQ_NEXT(p, listq);
4800		for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size;
4801		    pa += PAGE_SIZE) {
4802			KASSERT(p->valid == VM_PAGE_BITS_ALL,
4803			    ("%s: invalid page %p", __func__, p));
4804			if (pa != VM_PAGE_TO_PHYS(p) ||
4805			    pat_mode != p->md.pat_mode)
4806				return;
4807			p = TAILQ_NEXT(p, listq);
4808		}
4809
4810		/*
4811		 * Map using 1MB pages.
4812		 *
4813		 * QQQ: Well, we are mapping a section, so same condition must
4814		 * be hold like during promotion. It looks that only RW mapping
4815		 * is done here, so readonly mapping must be done elsewhere.
4816		 */
4817		l1prot = PTE1_U | PTE1_NG | PTE1_RW | PTE1_M | PTE1_A;
4818		l1attr = ATTR_TO_L1(vm_memattr_to_pte2(pat_mode));
4819		PMAP_LOCK(pmap);
4820		for (pa = pte2_pa; pa < pte2_pa + size; pa += PTE1_SIZE) {
4821			pte1p = pmap_pte1(pmap, addr);
4822			if (!pte1_is_valid(pte1_load(pte1p))) {
4823				pte1_store(pte1p, PTE1(pa, l1prot, l1attr));
4824				pmap->pm_stats.resident_count += PTE1_SIZE /
4825				    PAGE_SIZE;
4826				pmap_pte1_mappings++;
4827			}
4828			/* Else continue on if the PTE1 is already valid. */
4829			addr += PTE1_SIZE;
4830		}
4831		PMAP_UNLOCK(pmap);
4832	}
4833}
4834
4835/*
4836 *  Do the things to protect a 1mpage in a process.
4837 */
4838static void
4839pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva,
4840    vm_prot_t prot)
4841{
4842	pt1_entry_t npte1, opte1;
4843	vm_offset_t eva, va;
4844	vm_page_t m;
4845
4846	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4847	KASSERT((sva & PTE1_OFFSET) == 0,
4848	    ("%s: sva is not 1mpage aligned", __func__));
4849
4850	opte1 = npte1 = pte1_load(pte1p);
4851	if (pte1_is_managed(opte1) && pte1_is_dirty(opte1)) {
4852		eva = sva + PTE1_SIZE;
4853		for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1));
4854		    va < eva; va += PAGE_SIZE, m++)
4855			vm_page_dirty(m);
4856	}
4857	if ((prot & VM_PROT_WRITE) == 0)
4858		npte1 |= PTE1_RO | PTE1_NM;
4859	if ((prot & VM_PROT_EXECUTE) == 0)
4860		npte1 |= PTE1_NX;
4861
4862	/*
4863	 * QQQ: Herein, execute permission is never set.
4864	 *      It only can be cleared. So, no icache
4865	 *      syncing is needed.
4866	 */
4867
4868	if (npte1 != opte1) {
4869		pte1_store(pte1p, npte1);
4870		pmap_tlb_flush(pmap, sva);
4871	}
4872}
4873
4874/*
4875 *	Set the physical protection on the
4876 *	specified range of this map as requested.
4877 */
4878void
4879pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4880{
4881	boolean_t pv_lists_locked;
4882	vm_offset_t nextva;
4883	pt1_entry_t *pte1p, pte1;
4884	pt2_entry_t *pte2p, opte2, npte2;
4885
4886	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4887	if (prot == VM_PROT_NONE) {
4888		pmap_remove(pmap, sva, eva);
4889		return;
4890	}
4891
4892	if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
4893	    (VM_PROT_WRITE | VM_PROT_EXECUTE))
4894		return;
4895
4896	if (pmap_is_current(pmap))
4897		pv_lists_locked = FALSE;
4898	else {
4899		pv_lists_locked = TRUE;
4900resume:
4901		rw_wlock(&pvh_global_lock);
4902		sched_pin();
4903	}
4904
4905	PMAP_LOCK(pmap);
4906	for (; sva < eva; sva = nextva) {
4907		/*
4908		 * Calculate address for next L2 page table.
4909		 */
4910		nextva = pte1_trunc(sva + PTE1_SIZE);
4911		if (nextva < sva)
4912			nextva = eva;
4913
4914		pte1p = pmap_pte1(pmap, sva);
4915		pte1 = pte1_load(pte1p);
4916
4917		/*
4918		 * Weed out invalid mappings. Note: we assume that L1 page
4919		 * page table is always allocated, and in kernel virtual.
4920		 */
4921		if (pte1 == 0)
4922			continue;
4923
4924		if (pte1_is_section(pte1)) {
4925			/*
4926			 * Are we protecting the entire large page?  If not,
4927			 * demote the mapping and fall through.
4928			 */
4929			if (sva + PTE1_SIZE == nextva && eva >= nextva) {
4930				pmap_protect_pte1(pmap, pte1p, sva, prot);
4931				continue;
4932			} else {
4933				if (!pv_lists_locked) {
4934					pv_lists_locked = TRUE;
4935					if (!rw_try_wlock(&pvh_global_lock)) {
4936						PMAP_UNLOCK(pmap);
4937						goto resume;
4938					}
4939					sched_pin();
4940				}
4941				if (!pmap_demote_pte1(pmap, pte1p, sva)) {
4942					/*
4943					 * The large page mapping
4944					 * was destroyed.
4945					 */
4946					continue;
4947				}
4948#ifdef INVARIANTS
4949				else {
4950					/* Update pte1 after demotion */
4951					pte1 = pte1_load(pte1p);
4952				}
4953#endif
4954			}
4955		}
4956
4957		KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p"
4958		    " is not link", __func__, pmap, sva, pte1, pte1p));
4959
4960		/*
4961		 * Limit our scan to either the end of the va represented
4962		 * by the current L2 page table page, or to the end of the
4963		 * range being protected.
4964		 */
4965		if (nextva > eva)
4966			nextva = eva;
4967
4968		for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++,
4969		    sva += PAGE_SIZE) {
4970			vm_page_t m;
4971
4972			opte2 = npte2 = pte2_load(pte2p);
4973			if (!pte2_is_valid(opte2))
4974				continue;
4975
4976			if ((prot & VM_PROT_WRITE) == 0) {
4977				if (pte2_is_managed(opte2) &&
4978				    pte2_is_dirty(opte2)) {
4979					m = PHYS_TO_VM_PAGE(pte2_pa(opte2));
4980					vm_page_dirty(m);
4981				}
4982				npte2 |= PTE2_RO | PTE2_NM;
4983			}
4984
4985			if ((prot & VM_PROT_EXECUTE) == 0)
4986				npte2 |= PTE2_NX;
4987
4988			/*
4989			 * QQQ: Herein, execute permission is never set.
4990			 *      It only can be cleared. So, no icache
4991			 *      syncing is needed.
4992			 */
4993
4994			if (npte2 != opte2) {
4995				pte2_store(pte2p, npte2);
4996				pmap_tlb_flush(pmap, sva);
4997			}
4998		}
4999	}
5000	if (pv_lists_locked) {
5001		sched_unpin();
5002		rw_wunlock(&pvh_global_lock);
5003	}
5004	PMAP_UNLOCK(pmap);
5005}
5006
5007/*
5008 *	pmap_pvh_wired_mappings:
5009 *
5010 *	Return the updated number "count" of managed mappings that are wired.
5011 */
5012static int
5013pmap_pvh_wired_mappings(struct md_page *pvh, int count)
5014{
5015	pmap_t pmap;
5016	pt1_entry_t pte1;
5017	pt2_entry_t pte2;
5018	pv_entry_t pv;
5019
5020	rw_assert(&pvh_global_lock, RA_WLOCKED);
5021	sched_pin();
5022	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5023		pmap = PV_PMAP(pv);
5024		PMAP_LOCK(pmap);
5025		pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va));
5026		if (pte1_is_section(pte1)) {
5027			if (pte1_is_wired(pte1))
5028				count++;
5029		} else {
5030			KASSERT(pte1_is_link(pte1),
5031			    ("%s: pte1 %#x is not link", __func__, pte1));
5032			pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va));
5033			if (pte2_is_wired(pte2))
5034				count++;
5035		}
5036		PMAP_UNLOCK(pmap);
5037	}
5038	sched_unpin();
5039	return (count);
5040}
5041
5042/*
5043 *	pmap_page_wired_mappings:
5044 *
5045 *	Return the number of managed mappings to the given physical page
5046 *	that are wired.
5047 */
5048int
5049pmap_page_wired_mappings(vm_page_t m)
5050{
5051	int count;
5052
5053	count = 0;
5054	if ((m->oflags & VPO_UNMANAGED) != 0)
5055		return (count);
5056	rw_wlock(&pvh_global_lock);
5057	count = pmap_pvh_wired_mappings(&m->md, count);
5058	if ((m->flags & PG_FICTITIOUS) == 0) {
5059		count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
5060		    count);
5061	}
5062	rw_wunlock(&pvh_global_lock);
5063	return (count);
5064}
5065
5066/*
5067 *  Returns TRUE if any of the given mappings were used to modify
5068 *  physical memory.  Otherwise, returns FALSE.  Both page and 1mpage
5069 *  mappings are supported.
5070 */
5071static boolean_t
5072pmap_is_modified_pvh(struct md_page *pvh)
5073{
5074	pv_entry_t pv;
5075	pt1_entry_t pte1;
5076	pt2_entry_t pte2;
5077	pmap_t pmap;
5078	boolean_t rv;
5079
5080	rw_assert(&pvh_global_lock, RA_WLOCKED);
5081	rv = FALSE;
5082	sched_pin();
5083	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5084		pmap = PV_PMAP(pv);
5085		PMAP_LOCK(pmap);
5086		pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va));
5087		if (pte1_is_section(pte1)) {
5088			rv = pte1_is_dirty(pte1);
5089		} else {
5090			KASSERT(pte1_is_link(pte1),
5091			    ("%s: pte1 %#x is not link", __func__, pte1));
5092			pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va));
5093			rv = pte2_is_dirty(pte2);
5094		}
5095		PMAP_UNLOCK(pmap);
5096		if (rv)
5097			break;
5098	}
5099	sched_unpin();
5100	return (rv);
5101}
5102
5103/*
5104 *	pmap_is_modified:
5105 *
5106 *	Return whether or not the specified physical page was modified
5107 *	in any physical maps.
5108 */
5109boolean_t
5110pmap_is_modified(vm_page_t m)
5111{
5112	boolean_t rv;
5113
5114	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5115	    ("%s: page %p is not managed", __func__, m));
5116
5117	/*
5118	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
5119	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
5120	 * is clear, no PTE2s can have PG_M set.
5121	 */
5122	VM_OBJECT_ASSERT_WLOCKED(m->object);
5123	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
5124		return (FALSE);
5125	rw_wlock(&pvh_global_lock);
5126	rv = pmap_is_modified_pvh(&m->md) ||
5127	    ((m->flags & PG_FICTITIOUS) == 0 &&
5128	    pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
5129	rw_wunlock(&pvh_global_lock);
5130	return (rv);
5131}
5132
5133/*
5134 *	pmap_is_prefaultable:
5135 *
5136 *	Return whether or not the specified virtual address is eligible
5137 *	for prefault.
5138 */
5139boolean_t
5140pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
5141{
5142	pt1_entry_t pte1;
5143	pt2_entry_t pte2;
5144	boolean_t rv;
5145
5146	rv = FALSE;
5147	PMAP_LOCK(pmap);
5148	pte1 = pte1_load(pmap_pte1(pmap, addr));
5149	if (pte1_is_link(pte1)) {
5150		pte2 = pte2_load(pt2map_entry(addr));
5151		rv = !pte2_is_valid(pte2) ;
5152	}
5153	PMAP_UNLOCK(pmap);
5154	return (rv);
5155}
5156
5157/*
5158 *  Returns TRUE if any of the given mappings were referenced and FALSE
5159 *  otherwise. Both page and 1mpage mappings are supported.
5160 */
5161static boolean_t
5162pmap_is_referenced_pvh(struct md_page *pvh)
5163{
5164
5165	pv_entry_t pv;
5166	pt1_entry_t pte1;
5167	pt2_entry_t pte2;
5168	pmap_t pmap;
5169	boolean_t rv;
5170
5171	rw_assert(&pvh_global_lock, RA_WLOCKED);
5172	rv = FALSE;
5173	sched_pin();
5174	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5175		pmap = PV_PMAP(pv);
5176		PMAP_LOCK(pmap);
5177		pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va));
5178		if (pte1_is_section(pte1)) {
5179			rv = (pte1 & (PTE1_A | PTE1_V)) == (PTE1_A | PTE1_V);
5180		} else {
5181			pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va));
5182			rv = (pte2 & (PTE2_A | PTE2_V)) == (PTE2_A | PTE2_V);
5183		}
5184		PMAP_UNLOCK(pmap);
5185		if (rv)
5186			break;
5187	}
5188	sched_unpin();
5189	return (rv);
5190}
5191
5192/*
5193 *	pmap_is_referenced:
5194 *
5195 *	Return whether or not the specified physical page was referenced
5196 *	in any physical maps.
5197 */
5198boolean_t
5199pmap_is_referenced(vm_page_t m)
5200{
5201	boolean_t rv;
5202
5203	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5204	    ("%s: page %p is not managed", __func__, m));
5205	rw_wlock(&pvh_global_lock);
5206	rv = pmap_is_referenced_pvh(&m->md) ||
5207	    ((m->flags & PG_FICTITIOUS) == 0 &&
5208	    pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
5209	rw_wunlock(&pvh_global_lock);
5210	return (rv);
5211}
5212
5213/*
5214 *	pmap_ts_referenced:
5215 *
5216 *	Return a count of reference bits for a page, clearing those bits.
5217 *	It is not necessary for every reference bit to be cleared, but it
5218 *	is necessary that 0 only be returned when there are truly no
5219 *	reference bits set.
5220 *
5221 *	As an optimization, update the page's dirty field if a modified bit is
5222 *	found while counting reference bits.  This opportunistic update can be
5223 *	performed at low cost and can eliminate the need for some future calls
5224 *	to pmap_is_modified().  However, since this function stops after
5225 *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
5226 *	dirty pages.  Those dirty pages will only be detected by a future call
5227 *	to pmap_is_modified().
5228 */
5229int
5230pmap_ts_referenced(vm_page_t m)
5231{
5232	struct md_page *pvh;
5233	pv_entry_t pv, pvf;
5234	pmap_t pmap;
5235	pt1_entry_t  *pte1p, opte1;
5236	pt2_entry_t *pte2p, opte2;
5237	vm_paddr_t pa;
5238	int rtval = 0;
5239
5240	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5241	    ("%s: page %p is not managed", __func__, m));
5242	pa = VM_PAGE_TO_PHYS(m);
5243	pvh = pa_to_pvh(pa);
5244	rw_wlock(&pvh_global_lock);
5245	sched_pin();
5246	if ((m->flags & PG_FICTITIOUS) != 0 ||
5247	    (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
5248		goto small_mappings;
5249	pv = pvf;
5250	do {
5251		pmap = PV_PMAP(pv);
5252		PMAP_LOCK(pmap);
5253		pte1p = pmap_pte1(pmap, pv->pv_va);
5254		opte1 = pte1_load(pte1p);
5255		if (pte1_is_dirty(opte1)) {
5256			/*
5257			 * Although "opte1" is mapping a 1MB page, because
5258			 * this function is called at a 4KB page granularity,
5259			 * we only update the 4KB page under test.
5260			 */
5261			vm_page_dirty(m);
5262		}
5263		if ((opte1 & PTE1_A) != 0) {
5264			/*
5265			 * Since this reference bit is shared by 256 4KB pages,
5266			 * it should not be cleared every time it is tested.
5267			 * Apply a simple "hash" function on the physical page
5268			 * number, the virtual section number, and the pmap
5269			 * address to select one 4KB page out of the 256
5270			 * on which testing the reference bit will result
5271			 * in clearing that bit. This function is designed
5272			 * to avoid the selection of the same 4KB page
5273			 * for every 1MB page mapping.
5274			 *
5275			 * On demotion, a mapping that hasn't been referenced
5276			 * is simply destroyed.  To avoid the possibility of a
5277			 * subsequent page fault on a demoted wired mapping,
5278			 * always leave its reference bit set.  Moreover,
5279			 * since the section is wired, the current state of
5280			 * its reference bit won't affect page replacement.
5281			 */
5282			 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PTE1_SHIFT) ^
5283			    (uintptr_t)pmap) & (NPTE2_IN_PG - 1)) == 0 &&
5284			    !pte1_is_wired(opte1)) {
5285				pte1_clear_bit(pte1p, PTE1_A);
5286				pmap_tlb_flush(pmap, pv->pv_va);
5287			}
5288			rtval++;
5289		}
5290		PMAP_UNLOCK(pmap);
5291		/* Rotate the PV list if it has more than one entry. */
5292		if (TAILQ_NEXT(pv, pv_next) != NULL) {
5293			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5294			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5295		}
5296		if (rtval >= PMAP_TS_REFERENCED_MAX)
5297			goto out;
5298	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
5299small_mappings:
5300	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
5301		goto out;
5302	pv = pvf;
5303	do {
5304		pmap = PV_PMAP(pv);
5305		PMAP_LOCK(pmap);
5306		pte1p = pmap_pte1(pmap, pv->pv_va);
5307		KASSERT(pte1_is_link(pte1_load(pte1p)),
5308		    ("%s: not found a link in page %p's pv list", __func__, m));
5309
5310		pte2p = pmap_pte2_quick(pmap, pv->pv_va);
5311		opte2 = pte2_load(pte2p);
5312		if (pte2_is_dirty(opte2))
5313			vm_page_dirty(m);
5314		if ((opte2 & PTE2_A) != 0) {
5315			pte2_clear_bit(pte2p, PTE2_A);
5316			pmap_tlb_flush(pmap, pv->pv_va);
5317			rtval++;
5318		}
5319		PMAP_UNLOCK(pmap);
5320		/* Rotate the PV list if it has more than one entry. */
5321		if (TAILQ_NEXT(pv, pv_next) != NULL) {
5322			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5323			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5324		}
5325	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval <
5326	    PMAP_TS_REFERENCED_MAX);
5327out:
5328	sched_unpin();
5329	rw_wunlock(&pvh_global_lock);
5330	return (rtval);
5331}
5332
5333/*
5334 *	Clear the wired attribute from the mappings for the specified range of
5335 *	addresses in the given pmap.  Every valid mapping within that range
5336 *	must have the wired attribute set.  In contrast, invalid mappings
5337 *	cannot have the wired attribute set, so they are ignored.
5338 *
5339 *	The wired attribute of the page table entry is not a hardware feature,
5340 *	so there is no need to invalidate any TLB entries.
5341 */
5342void
5343pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5344{
5345	vm_offset_t nextva;
5346	pt1_entry_t *pte1p, pte1;
5347	pt2_entry_t *pte2p, pte2;
5348	boolean_t pv_lists_locked;
5349
5350	if (pmap_is_current(pmap))
5351		pv_lists_locked = FALSE;
5352	else {
5353		pv_lists_locked = TRUE;
5354resume:
5355		rw_wlock(&pvh_global_lock);
5356		sched_pin();
5357	}
5358	PMAP_LOCK(pmap);
5359	for (; sva < eva; sva = nextva) {
5360		nextva = pte1_trunc(sva + PTE1_SIZE);
5361		if (nextva < sva)
5362			nextva = eva;
5363
5364		pte1p = pmap_pte1(pmap, sva);
5365		pte1 = pte1_load(pte1p);
5366
5367		/*
5368		 * Weed out invalid mappings. Note: we assume that L1 page
5369		 * page table is always allocated, and in kernel virtual.
5370		 */
5371		if (pte1 == 0)
5372			continue;
5373
5374		if (pte1_is_section(pte1)) {
5375			if (!pte1_is_wired(pte1))
5376				panic("%s: pte1 %#x not wired", __func__, pte1);
5377
5378			/*
5379			 * Are we unwiring the entire large page?  If not,
5380			 * demote the mapping and fall through.
5381			 */
5382			if (sva + PTE1_SIZE == nextva && eva >= nextva) {
5383				pte1_clear_bit(pte1p, PTE1_W);
5384				pmap->pm_stats.wired_count -= PTE1_SIZE /
5385				    PAGE_SIZE;
5386				continue;
5387			} else {
5388				if (!pv_lists_locked) {
5389					pv_lists_locked = TRUE;
5390					if (!rw_try_wlock(&pvh_global_lock)) {
5391						PMAP_UNLOCK(pmap);
5392						/* Repeat sva. */
5393						goto resume;
5394					}
5395					sched_pin();
5396				}
5397				if (!pmap_demote_pte1(pmap, pte1p, sva))
5398					panic("%s: demotion failed", __func__);
5399#ifdef INVARIANTS
5400				else {
5401					/* Update pte1 after demotion */
5402					pte1 = pte1_load(pte1p);
5403				}
5404#endif
5405			}
5406		}
5407
5408		KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p"
5409		    " is not link", __func__, pmap, sva, pte1, pte1p));
5410
5411		/*
5412		 * Limit our scan to either the end of the va represented
5413		 * by the current L2 page table page, or to the end of the
5414		 * range being protected.
5415		 */
5416		if (nextva > eva)
5417			nextva = eva;
5418
5419		for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++,
5420		    sva += PAGE_SIZE) {
5421			pte2 = pte2_load(pte2p);
5422			if (!pte2_is_valid(pte2))
5423				continue;
5424			if (!pte2_is_wired(pte2))
5425				panic("%s: pte2 %#x is missing PTE2_W",
5426				    __func__, pte2);
5427
5428			/*
5429			 * PTE2_W must be cleared atomically. Although the pmap
5430			 * lock synchronizes access to PTE2_W, another processor
5431			 * could be changing PTE2_NM and/or PTE2_A concurrently.
5432			 */
5433			pte2_clear_bit(pte2p, PTE2_W);
5434			pmap->pm_stats.wired_count--;
5435		}
5436	}
5437	if (pv_lists_locked) {
5438		sched_unpin();
5439		rw_wunlock(&pvh_global_lock);
5440	}
5441	PMAP_UNLOCK(pmap);
5442}
5443
5444/*
5445 *  Clear the write and modified bits in each of the given page's mappings.
5446 */
5447void
5448pmap_remove_write(vm_page_t m)
5449{
5450	struct md_page *pvh;
5451	pv_entry_t next_pv, pv;
5452	pmap_t pmap;
5453	pt1_entry_t *pte1p;
5454	pt2_entry_t *pte2p, opte2;
5455	vm_offset_t va;
5456
5457	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5458	    ("%s: page %p is not managed", __func__, m));
5459
5460	/*
5461	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
5462	 * set by another thread while the object is locked.  Thus,
5463	 * if PGA_WRITEABLE is clear, no page table entries need updating.
5464	 */
5465	VM_OBJECT_ASSERT_WLOCKED(m->object);
5466	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
5467		return;
5468	rw_wlock(&pvh_global_lock);
5469	sched_pin();
5470	if ((m->flags & PG_FICTITIOUS) != 0)
5471		goto small_mappings;
5472	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5473	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5474		va = pv->pv_va;
5475		pmap = PV_PMAP(pv);
5476		PMAP_LOCK(pmap);
5477		pte1p = pmap_pte1(pmap, va);
5478		if (!(pte1_load(pte1p) & PTE1_RO))
5479			(void)pmap_demote_pte1(pmap, pte1p, va);
5480		PMAP_UNLOCK(pmap);
5481	}
5482small_mappings:
5483	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5484		pmap = PV_PMAP(pv);
5485		PMAP_LOCK(pmap);
5486		pte1p = pmap_pte1(pmap, pv->pv_va);
5487		KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found"
5488		    " a section in page %p's pv list", __func__, m));
5489		pte2p = pmap_pte2_quick(pmap, pv->pv_va);
5490		opte2 = pte2_load(pte2p);
5491		if (!(opte2 & PTE2_RO)) {
5492			pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM);
5493			if (pte2_is_dirty(opte2))
5494				vm_page_dirty(m);
5495			pmap_tlb_flush(pmap, pv->pv_va);
5496		}
5497		PMAP_UNLOCK(pmap);
5498	}
5499	vm_page_aflag_clear(m, PGA_WRITEABLE);
5500	sched_unpin();
5501	rw_wunlock(&pvh_global_lock);
5502}
5503
5504/*
5505 *	Apply the given advice to the specified range of addresses within the
5506 *	given pmap.  Depending on the advice, clear the referenced and/or
5507 *	modified flags in each mapping and set the mapped page's dirty field.
5508 */
5509void
5510pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
5511{
5512	pt1_entry_t *pte1p, opte1;
5513	pt2_entry_t *pte2p, pte2;
5514	vm_offset_t pdnxt;
5515	vm_page_t m;
5516	boolean_t pv_lists_locked;
5517
5518	if (advice != MADV_DONTNEED && advice != MADV_FREE)
5519		return;
5520	if (pmap_is_current(pmap))
5521		pv_lists_locked = FALSE;
5522	else {
5523		pv_lists_locked = TRUE;
5524resume:
5525		rw_wlock(&pvh_global_lock);
5526		sched_pin();
5527	}
5528	PMAP_LOCK(pmap);
5529	for (; sva < eva; sva = pdnxt) {
5530		pdnxt = pte1_trunc(sva + PTE1_SIZE);
5531		if (pdnxt < sva)
5532			pdnxt = eva;
5533		pte1p = pmap_pte1(pmap, sva);
5534		opte1 = pte1_load(pte1p);
5535		if (!pte1_is_valid(opte1)) /* XXX */
5536			continue;
5537		else if (pte1_is_section(opte1)) {
5538			if (!pte1_is_managed(opte1))
5539				continue;
5540			if (!pv_lists_locked) {
5541				pv_lists_locked = TRUE;
5542				if (!rw_try_wlock(&pvh_global_lock)) {
5543					PMAP_UNLOCK(pmap);
5544					goto resume;
5545				}
5546				sched_pin();
5547			}
5548			if (!pmap_demote_pte1(pmap, pte1p, sva)) {
5549				/*
5550				 * The large page mapping was destroyed.
5551				 */
5552				continue;
5553			}
5554
5555			/*
5556			 * Unless the page mappings are wired, remove the
5557			 * mapping to a single page so that a subsequent
5558			 * access may repromote.  Since the underlying L2 page
5559			 * table is fully populated, this removal never
5560			 * frees a L2 page table page.
5561			 */
5562			if (!pte1_is_wired(opte1)) {
5563				pte2p = pmap_pte2_quick(pmap, sva);
5564				KASSERT(pte2_is_valid(pte2_load(pte2p)),
5565				    ("%s: invalid PTE2", __func__));
5566				pmap_remove_pte2(pmap, pte2p, sva, NULL);
5567			}
5568		}
5569		if (pdnxt > eva)
5570			pdnxt = eva;
5571		for (pte2p = pmap_pte2_quick(pmap, sva); sva != pdnxt; pte2p++,
5572		    sva += PAGE_SIZE) {
5573			pte2 = pte2_load(pte2p);
5574			if (!pte2_is_valid(pte2) || !pte2_is_managed(pte2))
5575				continue;
5576			else if (pte2_is_dirty(pte2)) {
5577				if (advice == MADV_DONTNEED) {
5578					/*
5579					 * Future calls to pmap_is_modified()
5580					 * can be avoided by making the page
5581					 * dirty now.
5582					 */
5583					m = PHYS_TO_VM_PAGE(pte2_pa(pte2));
5584					vm_page_dirty(m);
5585				}
5586				pte2_set_bit(pte2p, PTE2_NM);
5587				pte2_clear_bit(pte2p, PTE2_A);
5588			} else if ((pte2 & PTE2_A) != 0)
5589				pte2_clear_bit(pte2p, PTE2_A);
5590			else
5591				continue;
5592			pmap_tlb_flush(pmap, sva);
5593		}
5594	}
5595	if (pv_lists_locked) {
5596		sched_unpin();
5597		rw_wunlock(&pvh_global_lock);
5598	}
5599	PMAP_UNLOCK(pmap);
5600}
5601
5602/*
5603 *	Clear the modify bits on the specified physical page.
5604 */
5605void
5606pmap_clear_modify(vm_page_t m)
5607{
5608	struct md_page *pvh;
5609	pv_entry_t next_pv, pv;
5610	pmap_t pmap;
5611	pt1_entry_t *pte1p, opte1;
5612	pt2_entry_t *pte2p, opte2;
5613	vm_offset_t va;
5614
5615	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5616	    ("%s: page %p is not managed", __func__, m));
5617	VM_OBJECT_ASSERT_WLOCKED(m->object);
5618	KASSERT(!vm_page_xbusied(m),
5619	    ("%s: page %p is exclusive busy", __func__, m));
5620
5621	/*
5622	 * If the page is not PGA_WRITEABLE, then no PTE2s can have PTE2_NM
5623	 * cleared. If the object containing the page is locked and the page
5624	 * is not exclusive busied, then PGA_WRITEABLE cannot be concurrently
5625	 * set.
5626	 */
5627	if ((m->flags & PGA_WRITEABLE) == 0)
5628		return;
5629	rw_wlock(&pvh_global_lock);
5630	sched_pin();
5631	if ((m->flags & PG_FICTITIOUS) != 0)
5632		goto small_mappings;
5633	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5634	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5635		va = pv->pv_va;
5636		pmap = PV_PMAP(pv);
5637		PMAP_LOCK(pmap);
5638		pte1p = pmap_pte1(pmap, va);
5639		opte1 = pte1_load(pte1p);
5640		if (!(opte1 & PTE1_RO)) {
5641			if (pmap_demote_pte1(pmap, pte1p, va) &&
5642			    !pte1_is_wired(opte1)) {
5643				/*
5644				 * Write protect the mapping to a
5645				 * single page so that a subsequent
5646				 * write access may repromote.
5647				 */
5648				va += VM_PAGE_TO_PHYS(m) - pte1_pa(opte1);
5649				pte2p = pmap_pte2_quick(pmap, va);
5650				opte2 = pte2_load(pte2p);
5651				if ((opte2 & PTE2_V)) {
5652					pte2_set_bit(pte2p, PTE2_NM | PTE2_RO);
5653					vm_page_dirty(m);
5654					pmap_tlb_flush(pmap, va);
5655				}
5656			}
5657		}
5658		PMAP_UNLOCK(pmap);
5659	}
5660small_mappings:
5661	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5662		pmap = PV_PMAP(pv);
5663		PMAP_LOCK(pmap);
5664		pte1p = pmap_pte1(pmap, pv->pv_va);
5665		KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found"
5666		    " a section in page %p's pv list", __func__, m));
5667		pte2p = pmap_pte2_quick(pmap, pv->pv_va);
5668		if (pte2_is_dirty(pte2_load(pte2p))) {
5669			pte2_set_bit(pte2p, PTE2_NM);
5670			pmap_tlb_flush(pmap, pv->pv_va);
5671		}
5672		PMAP_UNLOCK(pmap);
5673	}
5674	sched_unpin();
5675	rw_wunlock(&pvh_global_lock);
5676}
5677
5678
5679/*
5680 *  Sets the memory attribute for the specified page.
5681 */
5682void
5683pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5684{
5685	pt2_entry_t *cmap2_pte2p;
5686	vm_memattr_t oma;
5687	vm_paddr_t pa;
5688	struct pcpu *pc;
5689
5690	oma = m->md.pat_mode;
5691	m->md.pat_mode = ma;
5692
5693	CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m,
5694	    VM_PAGE_TO_PHYS(m), oma, ma);
5695	if ((m->flags & PG_FICTITIOUS) != 0)
5696		return;
5697#if 0
5698	/*
5699	 * If "m" is a normal page, flush it from the cache.
5700	 *
5701	 * First, try to find an existing mapping of the page by sf
5702	 * buffer. sf_buf_invalidate_cache() modifies mapping and
5703	 * flushes the cache.
5704	 */
5705	if (sf_buf_invalidate_cache(m, oma))
5706		return;
5707#endif
5708	/*
5709	 * If page is not mapped by sf buffer, map the page
5710	 * transient and do invalidation.
5711	 */
5712	if (ma != oma) {
5713		pa = VM_PAGE_TO_PHYS(m);
5714		sched_pin();
5715		pc = get_pcpu();
5716		cmap2_pte2p = pc->pc_cmap2_pte2p;
5717		mtx_lock(&pc->pc_cmap_lock);
5718		if (pte2_load(cmap2_pte2p) != 0)
5719			panic("%s: CMAP2 busy", __func__);
5720		pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
5721		    vm_memattr_to_pte2(ma)));
5722		dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE);
5723		pte2_clear(cmap2_pte2p);
5724		tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5725		sched_unpin();
5726		mtx_unlock(&pc->pc_cmap_lock);
5727	}
5728}
5729
5730/*
5731 *  Miscellaneous support routines follow
5732 */
5733
5734/*
5735 *  Returns TRUE if the given page is mapped individually or as part of
5736 *  a 1mpage.  Otherwise, returns FALSE.
5737 */
5738boolean_t
5739pmap_page_is_mapped(vm_page_t m)
5740{
5741	boolean_t rv;
5742
5743	if ((m->oflags & VPO_UNMANAGED) != 0)
5744		return (FALSE);
5745	rw_wlock(&pvh_global_lock);
5746	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
5747	    ((m->flags & PG_FICTITIOUS) == 0 &&
5748	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
5749	rw_wunlock(&pvh_global_lock);
5750	return (rv);
5751}
5752
5753/*
5754 *  Returns true if the pmap's pv is one of the first
5755 *  16 pvs linked to from this page.  This count may
5756 *  be changed upwards or downwards in the future; it
5757 *  is only necessary that true be returned for a small
5758 *  subset of pmaps for proper page aging.
5759 */
5760boolean_t
5761pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
5762{
5763	struct md_page *pvh;
5764	pv_entry_t pv;
5765	int loops = 0;
5766	boolean_t rv;
5767
5768	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5769	    ("%s: page %p is not managed", __func__, m));
5770	rv = FALSE;
5771	rw_wlock(&pvh_global_lock);
5772	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5773		if (PV_PMAP(pv) == pmap) {
5774			rv = TRUE;
5775			break;
5776		}
5777		loops++;
5778		if (loops >= 16)
5779			break;
5780	}
5781	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
5782		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5783		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5784			if (PV_PMAP(pv) == pmap) {
5785				rv = TRUE;
5786				break;
5787			}
5788			loops++;
5789			if (loops >= 16)
5790				break;
5791		}
5792	}
5793	rw_wunlock(&pvh_global_lock);
5794	return (rv);
5795}
5796
5797/*
5798 *	pmap_zero_page zeros the specified hardware page by mapping
5799 *	the page into KVM and using bzero to clear its contents.
5800 */
5801void
5802pmap_zero_page(vm_page_t m)
5803{
5804	pt2_entry_t *cmap2_pte2p;
5805	struct pcpu *pc;
5806
5807	sched_pin();
5808	pc = get_pcpu();
5809	cmap2_pte2p = pc->pc_cmap2_pte2p;
5810	mtx_lock(&pc->pc_cmap_lock);
5811	if (pte2_load(cmap2_pte2p) != 0)
5812		panic("%s: CMAP2 busy", __func__);
5813	pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
5814	    vm_page_pte2_attr(m)));
5815	pagezero(pc->pc_cmap2_addr);
5816	pte2_clear(cmap2_pte2p);
5817	tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5818	sched_unpin();
5819	mtx_unlock(&pc->pc_cmap_lock);
5820}
5821
5822/*
5823 *	pmap_zero_page_area zeros the specified hardware page by mapping
5824 *	the page into KVM and using bzero to clear its contents.
5825 *
5826 *	off and size may not cover an area beyond a single hardware page.
5827 */
5828void
5829pmap_zero_page_area(vm_page_t m, int off, int size)
5830{
5831	pt2_entry_t *cmap2_pte2p;
5832	struct pcpu *pc;
5833
5834	sched_pin();
5835	pc = get_pcpu();
5836	cmap2_pte2p = pc->pc_cmap2_pte2p;
5837	mtx_lock(&pc->pc_cmap_lock);
5838	if (pte2_load(cmap2_pte2p) != 0)
5839		panic("%s: CMAP2 busy", __func__);
5840	pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
5841	    vm_page_pte2_attr(m)));
5842	if (off == 0 && size == PAGE_SIZE)
5843		pagezero(pc->pc_cmap2_addr);
5844	else
5845		bzero(pc->pc_cmap2_addr + off, size);
5846	pte2_clear(cmap2_pte2p);
5847	tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5848	sched_unpin();
5849	mtx_unlock(&pc->pc_cmap_lock);
5850}
5851
5852/*
5853 *	pmap_zero_page_idle zeros the specified hardware page by mapping
5854 *	the page into KVM and using bzero to clear its contents.  This
5855 *	is intended to be called from the vm_pagezero process only and
5856 *	outside of Giant.
5857 */
5858void
5859pmap_zero_page_idle(vm_page_t m)
5860{
5861
5862	if (pte2_load(CMAP3) != 0)
5863		panic("%s: CMAP3 busy", __func__);
5864	sched_pin();
5865	pte2_store(CMAP3, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
5866	    vm_page_pte2_attr(m)));
5867	pagezero(CADDR3);
5868	pte2_clear(CMAP3);
5869	tlb_flush((vm_offset_t)CADDR3);
5870	sched_unpin();
5871}
5872
5873/*
5874 *	pmap_copy_page copies the specified (machine independent)
5875 *	page by mapping the page into virtual memory and using
5876 *	bcopy to copy the page, one machine dependent page at a
5877 *	time.
5878 */
5879void
5880pmap_copy_page(vm_page_t src, vm_page_t dst)
5881{
5882	pt2_entry_t *cmap1_pte2p, *cmap2_pte2p;
5883	struct pcpu *pc;
5884
5885	sched_pin();
5886	pc = get_pcpu();
5887	cmap1_pte2p = pc->pc_cmap1_pte2p;
5888	cmap2_pte2p = pc->pc_cmap2_pte2p;
5889	mtx_lock(&pc->pc_cmap_lock);
5890	if (pte2_load(cmap1_pte2p) != 0)
5891		panic("%s: CMAP1 busy", __func__);
5892	if (pte2_load(cmap2_pte2p) != 0)
5893		panic("%s: CMAP2 busy", __func__);
5894	pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src),
5895	    PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src)));
5896	pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst),
5897	    PTE2_AP_KRW, vm_page_pte2_attr(dst)));
5898	bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE);
5899	pte2_clear(cmap1_pte2p);
5900	tlb_flush((vm_offset_t)pc->pc_cmap1_addr);
5901	pte2_clear(cmap2_pte2p);
5902	tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5903	sched_unpin();
5904	mtx_unlock(&pc->pc_cmap_lock);
5905}
5906
5907int unmapped_buf_allowed = 1;
5908
5909void
5910pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
5911    vm_offset_t b_offset, int xfersize)
5912{
5913	pt2_entry_t *cmap1_pte2p, *cmap2_pte2p;
5914	vm_page_t a_pg, b_pg;
5915	char *a_cp, *b_cp;
5916	vm_offset_t a_pg_offset, b_pg_offset;
5917	struct pcpu *pc;
5918	int cnt;
5919
5920	sched_pin();
5921	pc = get_pcpu();
5922	cmap1_pte2p = pc->pc_cmap1_pte2p;
5923	cmap2_pte2p = pc->pc_cmap2_pte2p;
5924	mtx_lock(&pc->pc_cmap_lock);
5925	if (pte2_load(cmap1_pte2p) != 0)
5926		panic("pmap_copy_pages: CMAP1 busy");
5927	if (pte2_load(cmap2_pte2p) != 0)
5928		panic("pmap_copy_pages: CMAP2 busy");
5929	while (xfersize > 0) {
5930		a_pg = ma[a_offset >> PAGE_SHIFT];
5931		a_pg_offset = a_offset & PAGE_MASK;
5932		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
5933		b_pg = mb[b_offset >> PAGE_SHIFT];
5934		b_pg_offset = b_offset & PAGE_MASK;
5935		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
5936		pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg),
5937		    PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg)));
5938		tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr);
5939		pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg),
5940		    PTE2_AP_KRW, vm_page_pte2_attr(b_pg)));
5941		tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr);
5942		a_cp = pc->pc_cmap1_addr + a_pg_offset;
5943		b_cp = pc->pc_cmap2_addr + b_pg_offset;
5944		bcopy(a_cp, b_cp, cnt);
5945		a_offset += cnt;
5946		b_offset += cnt;
5947		xfersize -= cnt;
5948	}
5949	pte2_clear(cmap1_pte2p);
5950	tlb_flush((vm_offset_t)pc->pc_cmap1_addr);
5951	pte2_clear(cmap2_pte2p);
5952	tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5953	sched_unpin();
5954	mtx_unlock(&pc->pc_cmap_lock);
5955}
5956
5957vm_offset_t
5958pmap_quick_enter_page(vm_page_t m)
5959{
5960	struct pcpu *pc;
5961	pt2_entry_t *pte2p;
5962
5963	critical_enter();
5964	pc = get_pcpu();
5965	pte2p = pc->pc_qmap_pte2p;
5966
5967	KASSERT(pte2_load(pte2p) == 0, ("%s: PTE2 busy", __func__));
5968
5969	pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
5970	    vm_page_pte2_attr(m)));
5971	return (pc->pc_qmap_addr);
5972}
5973
5974void
5975pmap_quick_remove_page(vm_offset_t addr)
5976{
5977	struct pcpu *pc;
5978	pt2_entry_t *pte2p;
5979
5980	pc = get_pcpu();
5981	pte2p = pc->pc_qmap_pte2p;
5982
5983	KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__));
5984	KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__));
5985
5986	pte2_clear(pte2p);
5987	tlb_flush(pc->pc_qmap_addr);
5988	critical_exit();
5989}
5990
5991/*
5992 *	Copy the range specified by src_addr/len
5993 *	from the source map to the range dst_addr/len
5994 *	in the destination map.
5995 *
5996 *	This routine is only advisory and need not do anything.
5997 */
5998void
5999pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
6000    vm_offset_t src_addr)
6001{
6002	struct spglist free;
6003	vm_offset_t addr;
6004	vm_offset_t end_addr = src_addr + len;
6005	vm_offset_t nextva;
6006
6007	if (dst_addr != src_addr)
6008		return;
6009
6010	if (!pmap_is_current(src_pmap))
6011		return;
6012
6013	rw_wlock(&pvh_global_lock);
6014	if (dst_pmap < src_pmap) {
6015		PMAP_LOCK(dst_pmap);
6016		PMAP_LOCK(src_pmap);
6017	} else {
6018		PMAP_LOCK(src_pmap);
6019		PMAP_LOCK(dst_pmap);
6020	}
6021	sched_pin();
6022	for (addr = src_addr; addr < end_addr; addr = nextva) {
6023		pt2_entry_t *src_pte2p, *dst_pte2p;
6024		vm_page_t dst_mpt2pg, src_mpt2pg;
6025		pt1_entry_t src_pte1;
6026		u_int pte1_idx;
6027
6028		KASSERT(addr < VM_MAXUSER_ADDRESS,
6029		    ("%s: invalid to pmap_copy page tables", __func__));
6030
6031		nextva = pte1_trunc(addr + PTE1_SIZE);
6032		if (nextva < addr)
6033			nextva = end_addr;
6034
6035		pte1_idx = pte1_index(addr);
6036		src_pte1 = src_pmap->pm_pt1[pte1_idx];
6037		if (pte1_is_section(src_pte1)) {
6038			if ((addr & PTE1_OFFSET) != 0 ||
6039			    (addr + PTE1_SIZE) > end_addr)
6040				continue;
6041			if (dst_pmap->pm_pt1[pte1_idx] == 0 &&
6042			    (!pte1_is_managed(src_pte1) ||
6043			    pmap_pv_insert_pte1(dst_pmap, addr,
6044			    pte1_pa(src_pte1)))) {
6045				dst_pmap->pm_pt1[pte1_idx] = src_pte1 &
6046				    ~PTE1_W;
6047				dst_pmap->pm_stats.resident_count +=
6048				    PTE1_SIZE / PAGE_SIZE;
6049				pmap_pte1_mappings++;
6050			}
6051			continue;
6052		} else if (!pte1_is_link(src_pte1))
6053			continue;
6054
6055		src_mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(src_pte1));
6056
6057		/*
6058		 * We leave PT2s to be linked from PT1 even if they are not
6059		 * referenced until all PT2s in a page are without reference.
6060		 *
6061		 * QQQ: It could be changed ...
6062		 */
6063#if 0 /* single_pt2_link_is_cleared */
6064		KASSERT(pt2_wirecount_get(src_mpt2pg, pte1_idx) > 0,
6065		    ("%s: source page table page is unused", __func__));
6066#else
6067		if (pt2_wirecount_get(src_mpt2pg, pte1_idx) == 0)
6068			continue;
6069#endif
6070		if (nextva > end_addr)
6071			nextva = end_addr;
6072
6073		src_pte2p = pt2map_entry(addr);
6074		while (addr < nextva) {
6075			pt2_entry_t temp_pte2;
6076			temp_pte2 = pte2_load(src_pte2p);
6077			/*
6078			 * we only virtual copy managed pages
6079			 */
6080			if (pte2_is_managed(temp_pte2)) {
6081				dst_mpt2pg = pmap_allocpte2(dst_pmap, addr,
6082				    PMAP_ENTER_NOSLEEP);
6083				if (dst_mpt2pg == NULL)
6084					goto out;
6085				dst_pte2p = pmap_pte2_quick(dst_pmap, addr);
6086				if (!pte2_is_valid(pte2_load(dst_pte2p)) &&
6087				    pmap_try_insert_pv_entry(dst_pmap, addr,
6088				    PHYS_TO_VM_PAGE(pte2_pa(temp_pte2)))) {
6089					/*
6090					 * Clear the wired, modified, and
6091					 * accessed (referenced) bits
6092					 * during the copy.
6093					 */
6094					temp_pte2 &=  ~(PTE2_W | PTE2_A);
6095					temp_pte2 |= PTE2_NM;
6096					pte2_store(dst_pte2p, temp_pte2);
6097					dst_pmap->pm_stats.resident_count++;
6098				} else {
6099					SLIST_INIT(&free);
6100					if (pmap_unwire_pt2(dst_pmap, addr,
6101					    dst_mpt2pg, &free)) {
6102						pmap_tlb_flush(dst_pmap, addr);
6103						pmap_free_zero_pages(&free);
6104					}
6105					goto out;
6106				}
6107				if (pt2_wirecount_get(dst_mpt2pg, pte1_idx) >=
6108				    pt2_wirecount_get(src_mpt2pg, pte1_idx))
6109					break;
6110			}
6111			addr += PAGE_SIZE;
6112			src_pte2p++;
6113		}
6114	}
6115out:
6116	sched_unpin();
6117	rw_wunlock(&pvh_global_lock);
6118	PMAP_UNLOCK(src_pmap);
6119	PMAP_UNLOCK(dst_pmap);
6120}
6121
6122/*
6123 *	Increase the starting virtual address of the given mapping if a
6124 *	different alignment might result in more section mappings.
6125 */
6126void
6127pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
6128    vm_offset_t *addr, vm_size_t size)
6129{
6130	vm_offset_t pte1_offset;
6131
6132	if (size < PTE1_SIZE)
6133		return;
6134	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
6135		offset += ptoa(object->pg_color);
6136	pte1_offset = offset & PTE1_OFFSET;
6137	if (size - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) < PTE1_SIZE ||
6138	    (*addr & PTE1_OFFSET) == pte1_offset)
6139		return;
6140	if ((*addr & PTE1_OFFSET) < pte1_offset)
6141		*addr = pte1_trunc(*addr) + pte1_offset;
6142	else
6143		*addr = pte1_roundup(*addr) + pte1_offset;
6144}
6145
6146void
6147pmap_activate(struct thread *td)
6148{
6149	pmap_t pmap, oldpmap;
6150	u_int cpuid, ttb;
6151
6152	PDEBUG(9, printf("%s: td = %08x\n", __func__, (uint32_t)td));
6153
6154	critical_enter();
6155	pmap = vmspace_pmap(td->td_proc->p_vmspace);
6156	oldpmap = PCPU_GET(curpmap);
6157	cpuid = PCPU_GET(cpuid);
6158
6159#if defined(SMP)
6160	CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
6161	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
6162#else
6163	CPU_CLR(cpuid, &oldpmap->pm_active);
6164	CPU_SET(cpuid, &pmap->pm_active);
6165#endif
6166
6167	ttb = pmap_ttb_get(pmap);
6168
6169	/*
6170	 * pmap_activate is for the current thread on the current cpu
6171	 */
6172	td->td_pcb->pcb_pagedir = ttb;
6173	cp15_ttbr_set(ttb);
6174	PCPU_SET(curpmap, pmap);
6175	critical_exit();
6176}
6177
6178/*
6179 *  Perform the pmap work for mincore.
6180 */
6181int
6182pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
6183{
6184	pt1_entry_t *pte1p, pte1;
6185	pt2_entry_t *pte2p, pte2;
6186	vm_paddr_t pa;
6187	bool managed;
6188	int val;
6189
6190	PMAP_LOCK(pmap);
6191retry:
6192	pte1p = pmap_pte1(pmap, addr);
6193	pte1 = pte1_load(pte1p);
6194	if (pte1_is_section(pte1)) {
6195		pa = trunc_page(pte1_pa(pte1) | (addr & PTE1_OFFSET));
6196		managed = pte1_is_managed(pte1);
6197		val = MINCORE_SUPER | MINCORE_INCORE;
6198		if (pte1_is_dirty(pte1))
6199			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
6200		if (pte1 & PTE1_A)
6201			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
6202	} else if (pte1_is_link(pte1)) {
6203		pte2p = pmap_pte2(pmap, addr);
6204		pte2 = pte2_load(pte2p);
6205		pmap_pte2_release(pte2p);
6206		pa = pte2_pa(pte2);
6207		managed = pte2_is_managed(pte2);
6208		val = MINCORE_INCORE;
6209		if (pte2_is_dirty(pte2))
6210			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
6211		if (pte2 & PTE2_A)
6212			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
6213	} else {
6214		managed = false;
6215		val = 0;
6216	}
6217	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
6218	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
6219		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
6220		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
6221			goto retry;
6222	} else
6223		PA_UNLOCK_COND(*locked_pa);
6224	PMAP_UNLOCK(pmap);
6225	return (val);
6226}
6227
6228void
6229pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa)
6230{
6231	vm_offset_t sva;
6232	uint32_t l2attr;
6233
6234	KASSERT((size & PAGE_MASK) == 0,
6235	    ("%s: device mapping not page-sized", __func__));
6236
6237	sva = va;
6238	l2attr = vm_memattr_to_pte2(VM_MEMATTR_DEVICE);
6239	while (size != 0) {
6240		pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, l2attr);
6241		va += PAGE_SIZE;
6242		pa += PAGE_SIZE;
6243		size -= PAGE_SIZE;
6244	}
6245	tlb_flush_range(sva, va - sva);
6246}
6247
6248void
6249pmap_kremove_device(vm_offset_t va, vm_size_t size)
6250{
6251	vm_offset_t sva;
6252
6253	KASSERT((size & PAGE_MASK) == 0,
6254	    ("%s: device mapping not page-sized", __func__));
6255
6256	sva = va;
6257	while (size != 0) {
6258		pmap_kremove(va);
6259		va += PAGE_SIZE;
6260		size -= PAGE_SIZE;
6261	}
6262	tlb_flush_range(sva, va - sva);
6263}
6264
6265void
6266pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb)
6267{
6268
6269	pcb->pcb_pagedir = pmap_ttb_get(pmap);
6270}
6271
6272
6273/*
6274 *  Clean L1 data cache range by physical address.
6275 *  The range must be within a single page.
6276 */
6277static void
6278pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr)
6279{
6280	pt2_entry_t *cmap2_pte2p;
6281	struct pcpu *pc;
6282
6283	KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE,
6284	    ("%s: not on single page", __func__));
6285
6286	sched_pin();
6287	pc = get_pcpu();
6288	cmap2_pte2p = pc->pc_cmap2_pte2p;
6289	mtx_lock(&pc->pc_cmap_lock);
6290	if (pte2_load(cmap2_pte2p) != 0)
6291		panic("%s: CMAP2 busy", __func__);
6292	pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr));
6293	dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size);
6294	pte2_clear(cmap2_pte2p);
6295	tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
6296	sched_unpin();
6297	mtx_unlock(&pc->pc_cmap_lock);
6298}
6299
6300/*
6301 *  Sync instruction cache range which is not mapped yet.
6302 */
6303void
6304cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
6305{
6306	uint32_t len, offset;
6307	vm_page_t m;
6308
6309	/* Write back d-cache on given address range. */
6310	offset = pa & PAGE_MASK;
6311	for ( ; size != 0; size -= len, pa += len, offset = 0) {
6312		len = min(PAGE_SIZE - offset, size);
6313		m = PHYS_TO_VM_PAGE(pa);
6314		KASSERT(m != NULL, ("%s: vm_page_t is null for %#x",
6315		  __func__, pa));
6316		pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m));
6317	}
6318	/*
6319	 * I-cache is VIPT. Only way how to flush all virtual mappings
6320	 * on given physical address is to invalidate all i-cache.
6321	 */
6322	icache_inv_all();
6323}
6324
6325void
6326pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t size)
6327{
6328
6329	/* Write back d-cache on given address range. */
6330	if (va >= VM_MIN_KERNEL_ADDRESS) {
6331		dcache_wb_pou(va, size);
6332	} else {
6333		uint32_t len, offset;
6334		vm_paddr_t pa;
6335		vm_page_t m;
6336
6337		offset = va & PAGE_MASK;
6338		for ( ; size != 0; size -= len, va += len, offset = 0) {
6339			pa = pmap_extract(pmap, va); /* offset is preserved */
6340			len = min(PAGE_SIZE - offset, size);
6341			m = PHYS_TO_VM_PAGE(pa);
6342			KASSERT(m != NULL, ("%s: vm_page_t is null for %#x",
6343				__func__, pa));
6344			pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m));
6345		}
6346	}
6347	/*
6348	 * I-cache is VIPT. Only way how to flush all virtual mappings
6349	 * on given physical address is to invalidate all i-cache.
6350	 */
6351	icache_inv_all();
6352}
6353
6354/*
6355 *  The implementation of pmap_fault() uses IN_RANGE2() macro which
6356 *  depends on the fact that given range size is a power of 2.
6357 */
6358CTASSERT(powerof2(NB_IN_PT1));
6359CTASSERT(powerof2(PT2MAP_SIZE));
6360
6361#define IN_RANGE2(addr, start, size)	\
6362    ((vm_offset_t)(start) == ((vm_offset_t)(addr) & ~((size) - 1)))
6363
6364/*
6365 *  Handle access and R/W emulation faults.
6366 */
6367int
6368pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode)
6369{
6370	pt1_entry_t *pte1p, pte1;
6371	pt2_entry_t *pte2p, pte2;
6372
6373	if (pmap == NULL)
6374		pmap = kernel_pmap;
6375
6376	/*
6377	 * In kernel, we should never get abort with FAR which is in range of
6378	 * pmap->pm_pt1 or PT2MAP address spaces. If it happens, stop here
6379	 * and print out a useful abort message and even get to the debugger
6380	 * otherwise it likely ends with never ending loop of aborts.
6381	 */
6382	if (__predict_false(IN_RANGE2(far, pmap->pm_pt1, NB_IN_PT1))) {
6383		/*
6384		 * All L1 tables should always be mapped and present.
6385		 * However, we check only current one herein. For user mode,
6386		 * only permission abort from malicious user is not fatal.
6387		 * And alignment abort as it may have higher priority.
6388		 */
6389		if (!usermode || (idx != FAULT_ALIGN && idx != FAULT_PERM_L2)) {
6390			CTR4(KTR_PMAP, "%s: pmap %#x pm_pt1 %#x far %#x",
6391			    __func__, pmap, pmap->pm_pt1, far);
6392			panic("%s: pm_pt1 abort", __func__);
6393		}
6394		return (KERN_INVALID_ADDRESS);
6395	}
6396	if (__predict_false(IN_RANGE2(far, PT2MAP, PT2MAP_SIZE))) {
6397		/*
6398		 * PT2MAP should be always mapped and present in current
6399		 * L1 table. However, only existing L2 tables are mapped
6400		 * in PT2MAP. For user mode, only L2 translation abort and
6401		 * permission abort from malicious user is not fatal.
6402		 * And alignment abort as it may have higher priority.
6403		 */
6404		if (!usermode || (idx != FAULT_ALIGN &&
6405		    idx != FAULT_TRAN_L2 && idx != FAULT_PERM_L2)) {
6406			CTR4(KTR_PMAP, "%s: pmap %#x PT2MAP %#x far %#x",
6407			    __func__, pmap, PT2MAP, far);
6408			panic("%s: PT2MAP abort", __func__);
6409		}
6410		return (KERN_INVALID_ADDRESS);
6411	}
6412
6413	/*
6414	 * A pmap lock is used below for handling of access and R/W emulation
6415	 * aborts. They were handled by atomic operations before so some
6416	 * analysis of new situation is needed to answer the following question:
6417	 * Is it safe to use the lock even for these aborts?
6418	 *
6419	 * There may happen two cases in general:
6420	 *
6421	 * (1) Aborts while the pmap lock is locked already - this should not
6422	 * happen as pmap lock is not recursive. However, under pmap lock only
6423	 * internal kernel data should be accessed and such data should be
6424	 * mapped with A bit set and NM bit cleared. If double abort happens,
6425	 * then a mapping of data which has caused it must be fixed. Further,
6426	 * all new mappings are always made with A bit set and the bit can be
6427	 * cleared only on managed mappings.
6428	 *
6429	 * (2) Aborts while another lock(s) is/are locked - this already can
6430	 * happen. However, there is no difference here if it's either access or
6431	 * R/W emulation abort, or if it's some other abort.
6432	 */
6433
6434	PMAP_LOCK(pmap);
6435#ifdef INVARIANTS
6436	pte1 = pte1_load(pmap_pte1(pmap, far));
6437	if (pte1_is_link(pte1)) {
6438		/*
6439		 * Check in advance that associated L2 page table is mapped into
6440		 * PT2MAP space. Note that faulty access to not mapped L2 page
6441		 * table is caught in more general check above where "far" is
6442		 * checked that it does not lay in PT2MAP space. Note also that
6443		 * L1 page table and PT2TAB always exist and are mapped.
6444		 */
6445		pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far));
6446		if (!pte2_is_valid(pte2))
6447			panic("%s: missing L2 page table (%p, %#x)",
6448			    __func__, pmap, far);
6449	}
6450#endif
6451#ifdef SMP
6452	/*
6453	 * Special treatment is due to break-before-make approach done when
6454	 * pte1 is updated for userland mapping during section promotion or
6455	 * demotion. If not caught here, pmap_enter() can find a section
6456	 * mapping on faulting address. That is not allowed.
6457	 */
6458	if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) {
6459		PMAP_UNLOCK(pmap);
6460		return (KERN_SUCCESS);
6461	}
6462#endif
6463	/*
6464	 * Accesss bits for page and section. Note that the entry
6465	 * is not in TLB yet, so TLB flush is not necessary.
6466	 *
6467	 * QQQ: This is hardware emulation, we do not call userret()
6468	 *      for aborts from user mode.
6469	 */
6470	if (idx == FAULT_ACCESS_L2) {
6471		pte1 = pte1_load(pmap_pte1(pmap, far));
6472		if (pte1_is_link(pte1)) {
6473			/* L2 page table should exist and be mapped. */
6474			pte2p = pt2map_entry(far);
6475			pte2 = pte2_load(pte2p);
6476			if (pte2_is_valid(pte2)) {
6477				pte2_store(pte2p, pte2 | PTE2_A);
6478				PMAP_UNLOCK(pmap);
6479				return (KERN_SUCCESS);
6480			}
6481		} else {
6482			/*
6483			 * We got L2 access fault but PTE1 is not a link.
6484			 * Probably some race happened, do nothing.
6485			 */
6486			CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L2 - pmap %#x far %#x",
6487			    __func__, pmap, far);
6488			PMAP_UNLOCK(pmap);
6489			return (KERN_SUCCESS);
6490		}
6491	}
6492	if (idx == FAULT_ACCESS_L1) {
6493		pte1p = pmap_pte1(pmap, far);
6494		pte1 = pte1_load(pte1p);
6495		if (pte1_is_section(pte1)) {
6496			pte1_store(pte1p, pte1 | PTE1_A);
6497			PMAP_UNLOCK(pmap);
6498			return (KERN_SUCCESS);
6499		} else {
6500			/*
6501			 * We got L1 access fault but PTE1 is not section
6502			 * mapping. Probably some race happened, do nothing.
6503			 */
6504			CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L1 - pmap %#x far %#x",
6505			    __func__, pmap, far);
6506			PMAP_UNLOCK(pmap);
6507			return (KERN_SUCCESS);
6508		}
6509	}
6510
6511	/*
6512	 * Handle modify bits for page and section. Note that the modify
6513	 * bit is emulated by software. So PTEx_RO is software read only
6514	 * bit and PTEx_NM flag is real hardware read only bit.
6515	 *
6516	 * QQQ: This is hardware emulation, we do not call userret()
6517	 *      for aborts from user mode.
6518	 */
6519	if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) {
6520		pte1 = pte1_load(pmap_pte1(pmap, far));
6521		if (pte1_is_link(pte1)) {
6522			/* L2 page table should exist and be mapped. */
6523			pte2p = pt2map_entry(far);
6524			pte2 = pte2_load(pte2p);
6525			if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) &&
6526			    (pte2 & PTE2_NM)) {
6527				pte2_store(pte2p, pte2 & ~PTE2_NM);
6528				tlb_flush(trunc_page(far));
6529				PMAP_UNLOCK(pmap);
6530				return (KERN_SUCCESS);
6531			}
6532		} else {
6533			/*
6534			 * We got L2 permission fault but PTE1 is not a link.
6535			 * Probably some race happened, do nothing.
6536			 */
6537			CTR3(KTR_PMAP, "%s: FAULT_PERM_L2 - pmap %#x far %#x",
6538			    __func__, pmap, far);
6539			PMAP_UNLOCK(pmap);
6540			return (KERN_SUCCESS);
6541		}
6542	}
6543	if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) {
6544		pte1p = pmap_pte1(pmap, far);
6545		pte1 = pte1_load(pte1p);
6546		if (pte1_is_section(pte1)) {
6547			if (!(pte1 & PTE1_RO) && (pte1 & PTE1_NM)) {
6548				pte1_store(pte1p, pte1 & ~PTE1_NM);
6549				tlb_flush(pte1_trunc(far));
6550				PMAP_UNLOCK(pmap);
6551				return (KERN_SUCCESS);
6552			}
6553		} else {
6554			/*
6555			 * We got L1 permission fault but PTE1 is not section
6556			 * mapping. Probably some race happened, do nothing.
6557			 */
6558			CTR3(KTR_PMAP, "%s: FAULT_PERM_L1 - pmap %#x far %#x",
6559			    __func__, pmap, far);
6560			PMAP_UNLOCK(pmap);
6561			return (KERN_SUCCESS);
6562		}
6563	}
6564
6565	/*
6566	 * QQQ: The previous code, mainly fast handling of access and
6567	 *      modify bits aborts, could be moved to ASM. Now we are
6568	 *      starting to deal with not fast aborts.
6569	 */
6570	PMAP_UNLOCK(pmap);
6571	return (KERN_FAILURE);
6572}
6573
6574#if defined(PMAP_DEBUG)
6575/*
6576 *  Reusing of KVA used in pmap_zero_page function !!!
6577 */
6578static void
6579pmap_zero_page_check(vm_page_t m)
6580{
6581	pt2_entry_t *cmap2_pte2p;
6582	uint32_t *p, *end;
6583	struct pcpu *pc;
6584
6585	sched_pin();
6586	pc = get_pcpu();
6587	cmap2_pte2p = pc->pc_cmap2_pte2p;
6588	mtx_lock(&pc->pc_cmap_lock);
6589	if (pte2_load(cmap2_pte2p) != 0)
6590		panic("%s: CMAP2 busy", __func__);
6591	pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
6592	    vm_page_pte2_attr(m)));
6593	end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE);
6594	for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++)
6595		if (*p != 0)
6596			panic("%s: page %p not zero, va: %p", __func__, m,
6597			    pc->pc_cmap2_addr);
6598	pte2_clear(cmap2_pte2p);
6599	tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
6600	sched_unpin();
6601	mtx_unlock(&pc->pc_cmap_lock);
6602}
6603
6604int
6605pmap_pid_dump(int pid)
6606{
6607	pmap_t pmap;
6608	struct proc *p;
6609	int npte2 = 0;
6610	int i, j, index;
6611
6612	sx_slock(&allproc_lock);
6613	FOREACH_PROC_IN_SYSTEM(p) {
6614		if (p->p_pid != pid || p->p_vmspace == NULL)
6615			continue;
6616		index = 0;
6617		pmap = vmspace_pmap(p->p_vmspace);
6618		for (i = 0; i < NPTE1_IN_PT1; i++) {
6619			pt1_entry_t pte1;
6620			pt2_entry_t *pte2p, pte2;
6621			vm_offset_t base, va;
6622			vm_paddr_t pa;
6623			vm_page_t m;
6624
6625			base = i << PTE1_SHIFT;
6626			pte1 = pte1_load(&pmap->pm_pt1[i]);
6627
6628			if (pte1_is_section(pte1)) {
6629				/*
6630				 * QQQ: Do something here!
6631				 */
6632			} else if (pte1_is_link(pte1)) {
6633				for (j = 0; j < NPTE2_IN_PT2; j++) {
6634					va = base + (j << PAGE_SHIFT);
6635					if (va >= VM_MIN_KERNEL_ADDRESS) {
6636						if (index) {
6637							index = 0;
6638							printf("\n");
6639						}
6640						sx_sunlock(&allproc_lock);
6641						return (npte2);
6642					}
6643					pte2p = pmap_pte2(pmap, va);
6644					pte2 = pte2_load(pte2p);
6645					pmap_pte2_release(pte2p);
6646					if (!pte2_is_valid(pte2))
6647						continue;
6648
6649					pa = pte2_pa(pte2);
6650					m = PHYS_TO_VM_PAGE(pa);
6651					printf("va: 0x%x, pa: 0x%x, h: %d, w:"
6652					    " %d, f: 0x%x", va, pa,
6653					    m->hold_count, m->wire_count,
6654					    m->flags);
6655					npte2++;
6656					index++;
6657					if (index >= 2) {
6658						index = 0;
6659						printf("\n");
6660					} else {
6661						printf(" ");
6662					}
6663				}
6664			}
6665		}
6666	}
6667	sx_sunlock(&allproc_lock);
6668	return (npte2);
6669}
6670
6671#endif
6672
6673#ifdef DDB
6674static pt2_entry_t *
6675pmap_pte2_ddb(pmap_t pmap, vm_offset_t va)
6676{
6677	pt1_entry_t pte1;
6678	vm_paddr_t pt2pg_pa;
6679
6680	pte1 = pte1_load(pmap_pte1(pmap, va));
6681	if (!pte1_is_link(pte1))
6682		return (NULL);
6683
6684	if (pmap_is_current(pmap))
6685		return (pt2map_entry(va));
6686
6687	/* Note that L2 page table size is not equal to PAGE_SIZE. */
6688	pt2pg_pa = trunc_page(pte1_link_pa(pte1));
6689	if (pte2_pa(pte2_load(PMAP3)) != pt2pg_pa) {
6690		pte2_store(PMAP3, PTE2_KPT(pt2pg_pa));
6691#ifdef SMP
6692		PMAP3cpu = PCPU_GET(cpuid);
6693#endif
6694		tlb_flush_local((vm_offset_t)PADDR3);
6695	}
6696#ifdef SMP
6697	else if (PMAP3cpu != PCPU_GET(cpuid)) {
6698		PMAP3cpu = PCPU_GET(cpuid);
6699		tlb_flush_local((vm_offset_t)PADDR3);
6700	}
6701#endif
6702	return (PADDR3 + (arm32_btop(va) & (NPTE2_IN_PG - 1)));
6703}
6704
6705static void
6706dump_pmap(pmap_t pmap)
6707{
6708
6709	printf("pmap %p\n", pmap);
6710	printf("  pm_pt1: %p\n", pmap->pm_pt1);
6711	printf("  pm_pt2tab: %p\n", pmap->pm_pt2tab);
6712	printf("  pm_active: 0x%08lX\n", pmap->pm_active.__bits[0]);
6713}
6714
6715DB_SHOW_COMMAND(pmaps, pmap_list_pmaps)
6716{
6717
6718	pmap_t pmap;
6719	LIST_FOREACH(pmap, &allpmaps, pm_list) {
6720		dump_pmap(pmap);
6721	}
6722}
6723
6724static int
6725pte2_class(pt2_entry_t pte2)
6726{
6727	int cls;
6728
6729	cls = (pte2 >> 2) & 0x03;
6730	cls |= (pte2 >> 4) & 0x04;
6731	return (cls);
6732}
6733
6734static void
6735dump_section(pmap_t pmap, uint32_t pte1_idx)
6736{
6737}
6738
6739static void
6740dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok)
6741{
6742	uint32_t i;
6743	vm_offset_t va;
6744	pt2_entry_t *pte2p, pte2;
6745	vm_page_t m;
6746
6747	va = pte1_idx << PTE1_SHIFT;
6748	pte2p = pmap_pte2_ddb(pmap, va);
6749	for (i = 0; i < NPTE2_IN_PT2; i++, pte2p++, va += PAGE_SIZE) {
6750		pte2 = pte2_load(pte2p);
6751		if (pte2 == 0)
6752			continue;
6753		if (!pte2_is_valid(pte2)) {
6754			printf(" 0x%08X: 0x%08X", va, pte2);
6755			if (!invalid_ok)
6756				printf(" - not valid !!!");
6757			printf("\n");
6758			continue;
6759		}
6760		m = PHYS_TO_VM_PAGE(pte2_pa(pte2));
6761		printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2,
6762		    pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m);
6763		if (m != NULL) {
6764			printf(" v:%d h:%d w:%d f:0x%04X\n", m->valid,
6765			    m->hold_count, m->wire_count, m->flags);
6766		} else {
6767			printf("\n");
6768		}
6769	}
6770}
6771
6772static __inline boolean_t
6773is_pv_chunk_space(vm_offset_t va)
6774{
6775
6776	if ((((vm_offset_t)pv_chunkbase) <= va) &&
6777	    (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks)))
6778		return (TRUE);
6779	return (FALSE);
6780}
6781
6782DB_SHOW_COMMAND(pmap, pmap_pmap_print)
6783{
6784	/* XXX convert args. */
6785	pmap_t pmap = (pmap_t)addr;
6786	pt1_entry_t pte1;
6787	pt2_entry_t pte2;
6788	vm_offset_t va, eva;
6789	vm_page_t m;
6790	uint32_t i;
6791	boolean_t invalid_ok, dump_link_ok, dump_pv_chunk;
6792
6793	if (have_addr) {
6794		pmap_t pm;
6795
6796		LIST_FOREACH(pm, &allpmaps, pm_list)
6797			if (pm == pmap) break;
6798		if (pm == NULL) {
6799			printf("given pmap %p is not in allpmaps list\n", pmap);
6800			return;
6801		}
6802	} else
6803		pmap = PCPU_GET(curpmap);
6804
6805	eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF;
6806	dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */
6807
6808	printf("pmap: 0x%08X\n", (uint32_t)pmap);
6809	printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP);
6810	printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab);
6811
6812	for(i = 0; i < NPTE1_IN_PT1; i++) {
6813		pte1 = pte1_load(&pmap->pm_pt1[i]);
6814		if (pte1 == 0)
6815			continue;
6816		va = i << PTE1_SHIFT;
6817		if (va >= eva)
6818			break;
6819
6820		if (pte1_is_section(pte1)) {
6821			printf("0x%08X: Section 0x%08X, s:%d g:%d\n", va, pte1,
6822			    !!(pte1 & PTE1_S), !(pte1 & PTE1_NG));
6823			dump_section(pmap, i);
6824		} else if (pte1_is_link(pte1)) {
6825			dump_link_ok = TRUE;
6826			invalid_ok = FALSE;
6827			pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
6828			m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
6829			printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p",
6830			    va, pte1, pte2, m);
6831			if (is_pv_chunk_space(va)) {
6832				printf(" - pv_chunk space");
6833				if (dump_pv_chunk)
6834					invalid_ok = TRUE;
6835				else
6836					dump_link_ok = FALSE;
6837			}
6838			else if (m != NULL)
6839				printf(" w:%d w2:%u", m->wire_count,
6840				    pt2_wirecount_get(m, pte1_index(va)));
6841			if (pte2 == 0)
6842				printf(" !!! pt2tab entry is ZERO");
6843			else if (pte2_pa(pte1) != pte2_pa(pte2))
6844				printf(" !!! pt2tab entry is DIFFERENT - m: %p",
6845				    PHYS_TO_VM_PAGE(pte2_pa(pte2)));
6846			printf("\n");
6847			if (dump_link_ok)
6848				dump_link(pmap, i, invalid_ok);
6849		} else
6850			printf("0x%08X: Invalid entry 0x%08X\n", va, pte1);
6851	}
6852}
6853
6854static void
6855dump_pt2tab(pmap_t pmap)
6856{
6857	uint32_t i;
6858	pt2_entry_t pte2;
6859	vm_offset_t va;
6860	vm_paddr_t pa;
6861	vm_page_t m;
6862
6863	printf("PT2TAB:\n");
6864	for (i = 0; i < PT2TAB_ENTRIES; i++) {
6865		pte2 = pte2_load(&pmap->pm_pt2tab[i]);
6866		if (!pte2_is_valid(pte2))
6867			continue;
6868		va = i << PT2TAB_SHIFT;
6869		pa = pte2_pa(pte2);
6870		m = PHYS_TO_VM_PAGE(pa);
6871		printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2,
6872		    pte2_class(pte2), !!(pte2 & PTE2_S), m);
6873		if (m != NULL)
6874			printf(" , h: %d, w: %d, f: 0x%04X pidx: %lld",
6875			    m->hold_count, m->wire_count, m->flags, m->pindex);
6876		printf("\n");
6877	}
6878}
6879
6880DB_SHOW_COMMAND(pmap_pt2tab, pmap_pt2tab_print)
6881{
6882	/* XXX convert args. */
6883	pmap_t pmap = (pmap_t)addr;
6884	pt1_entry_t pte1;
6885	pt2_entry_t pte2;
6886	vm_offset_t va;
6887	uint32_t i, start;
6888
6889	if (have_addr) {
6890		printf("supported only on current pmap\n");
6891		return;
6892	}
6893
6894	pmap = PCPU_GET(curpmap);
6895	printf("curpmap: 0x%08X\n", (uint32_t)pmap);
6896	printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP);
6897	printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab);
6898
6899	start = pte1_index((vm_offset_t)PT2MAP);
6900	for (i = start; i < (start + NPT2_IN_PT2TAB); i++) {
6901		pte1 = pte1_load(&pmap->pm_pt1[i]);
6902		if (pte1 == 0)
6903			continue;
6904		va = i << PTE1_SHIFT;
6905		if (pte1_is_section(pte1)) {
6906			printf("0x%08X: Section 0x%08X, s:%d\n", va, pte1,
6907			    !!(pte1 & PTE1_S));
6908			dump_section(pmap, i);
6909		} else if (pte1_is_link(pte1)) {
6910			pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
6911			printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X\n", va,
6912			    pte1, pte2);
6913			if (pte2 == 0)
6914				printf("  !!! pt2tab entry is ZERO\n");
6915		} else
6916			printf("0x%08X: Invalid entry 0x%08X\n", va, pte1);
6917	}
6918	dump_pt2tab(pmap);
6919}
6920#endif
6921