exception.S revision 293853
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
29 */
30/*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56#include <machine/asm.h>
57__FBSDID("$FreeBSD: stable/10/sys/sparc64/sparc64/exception.S 293853 2016-01-13 21:38:52Z marius $");
58
59#include "opt_compat.h"
60#include "opt_ddb.h"
61
62#include <machine/asi.h>
63#include <machine/asmacros.h>
64#include <machine/frame.h>
65#include <machine/fsr.h>
66#include <machine/intr_machdep.h>
67#include <machine/ktr.h>
68#include <machine/pcb.h>
69#include <machine/pstate.h>
70#include <machine/trap.h>
71#include <machine/tsb.h>
72#include <machine/tstate.h>
73#include <machine/utrap.h>
74#include <machine/wstate.h>
75
76#include "assym.s"
77
78#define	TSB_ASI			0x0
79#define	TSB_KERNEL		0x0
80#define	TSB_KERNEL_MASK		0x0
81#define	TSB_KERNEL_PHYS		0x0
82#define	TSB_KERNEL_PHYS_END	0x0
83#define	TSB_QUAD_LDD		0x0
84
85	.register %g2,#ignore
86	.register %g3,#ignore
87	.register %g6,#ignore
88	.register %g7,#ignore
89
90/*
91 * Atomically set a bit in a TTE.
92 */
93#define	TTE_SET_BIT(r1, r2, r3, bit, a, asi) \
94	add	r1, TTE_DATA, r1 ; \
95	LD(x, a) [r1] asi, r2 ; \
969:	or	r2, bit, r3 ; \
97	CAS(x, a) [r1] asi, r2, r3 ; \
98	cmp	r2, r3 ; \
99	bne,pn	%xcc, 9b ; \
100	 mov	r3, r2
101
102#define	TTE_SET_REF(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_REF, a, asi)
103#define	TTE_SET_W(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_W, a, asi)
104
105/*
106 * Macros for spilling and filling live windows.
107 *
108 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
109 * handler will not use more than 24 instructions total, to leave room for
110 * resume vectors which occupy the last 8 instructions.
111 */
112
113#define	SPILL(storer, base, size, asi) \
114	storer	%l0, [base + (0 * size)] asi ; \
115	storer	%l1, [base + (1 * size)] asi ; \
116	storer	%l2, [base + (2 * size)] asi ; \
117	storer	%l3, [base + (3 * size)] asi ; \
118	storer	%l4, [base + (4 * size)] asi ; \
119	storer	%l5, [base + (5 * size)] asi ; \
120	storer	%l6, [base + (6 * size)] asi ; \
121	storer	%l7, [base + (7 * size)] asi ; \
122	storer	%i0, [base + (8 * size)] asi ; \
123	storer	%i1, [base + (9 * size)] asi ; \
124	storer	%i2, [base + (10 * size)] asi ; \
125	storer	%i3, [base + (11 * size)] asi ; \
126	storer	%i4, [base + (12 * size)] asi ; \
127	storer	%i5, [base + (13 * size)] asi ; \
128	storer	%i6, [base + (14 * size)] asi ; \
129	storer	%i7, [base + (15 * size)] asi
130
131#define	FILL(loader, base, size, asi) \
132	loader	[base + (0 * size)] asi, %l0 ; \
133	loader	[base + (1 * size)] asi, %l1 ; \
134	loader	[base + (2 * size)] asi, %l2 ; \
135	loader	[base + (3 * size)] asi, %l3 ; \
136	loader	[base + (4 * size)] asi, %l4 ; \
137	loader	[base + (5 * size)] asi, %l5 ; \
138	loader	[base + (6 * size)] asi, %l6 ; \
139	loader	[base + (7 * size)] asi, %l7 ; \
140	loader	[base + (8 * size)] asi, %i0 ; \
141	loader	[base + (9 * size)] asi, %i1 ; \
142	loader	[base + (10 * size)] asi, %i2 ; \
143	loader	[base + (11 * size)] asi, %i3 ; \
144	loader	[base + (12 * size)] asi, %i4 ; \
145	loader	[base + (13 * size)] asi, %i5 ; \
146	loader	[base + (14 * size)] asi, %i6 ; \
147	loader	[base + (15 * size)] asi, %i7
148
149#define	ERRATUM50(reg)	mov reg, reg
150
151#define	KSTACK_SLOP	1024
152
153/*
154 * Sanity check the kernel stack and bail out if it's wrong.
155 * XXX: doesn't handle being on the panic stack.
156 */
157#define	KSTACK_CHECK \
158	dec	16, ASP_REG ; \
159	stx	%g1, [ASP_REG + 0] ; \
160	stx	%g2, [ASP_REG + 8] ; \
161	add	%sp, SPOFF, %g1 ; \
162	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
163	bnz,a	%xcc, tl1_kstack_fault ; \
164	 inc	16, ASP_REG ; \
165	ldx	[PCPU(CURTHREAD)], %g2 ; \
166	ldx	[%g2 + TD_KSTACK], %g2 ; \
167	add	%g2, KSTACK_SLOP, %g2 ; \
168	subcc	%g1, %g2, %g1 ; \
169	ble,a	%xcc, tl1_kstack_fault ; \
170	 inc	16, ASP_REG ; \
171	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
172	cmp	%g1, %g2 ; \
173	bgt,a	%xcc, tl1_kstack_fault ; \
174	 inc	16, ASP_REG ; \
175	ldx	[ASP_REG + 8], %g2 ; \
176	ldx	[ASP_REG + 0], %g1 ; \
177	inc	16, ASP_REG
178
179	.globl	tl_text_begin
180tl_text_begin:
181	nop
182
183ENTRY(tl1_kstack_fault)
184	rdpr	%tl, %g1
1851:	cmp	%g1, 2
186	be,a	2f
187	 nop
188
189#if KTR_COMPILE & KTR_TRAP
190	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
191	    , %g2, %g3, %g4, 7, 8, 9)
192	rdpr	%tl, %g3
193	stx	%g3, [%g2 + KTR_PARM1]
194	rdpr	%tpc, %g3
195	stx	%g3, [%g2 + KTR_PARM1]
196	rdpr	%tnpc, %g3
197	stx	%g3, [%g2 + KTR_PARM1]
1989:
199#endif
200
201	sub	%g1, 1, %g1
202	wrpr	%g1, 0, %tl
203	ba,a	%xcc, 1b
204	 nop
205
2062:
207#if KTR_COMPILE & KTR_TRAP
208	CATR(KTR_TRAP,
209	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
210	    , %g1, %g2, %g3, 7, 8, 9)
211	add	%sp, SPOFF, %g2
212	stx	%g2, [%g1 + KTR_PARM1]
213	ldx	[PCPU(CURTHREAD)], %g2
214	ldx	[%g2 + TD_KSTACK], %g2
215	stx	%g2, [%g1 + KTR_PARM2]
216	rdpr	%canrestore, %g2
217	stx	%g2, [%g1 + KTR_PARM3]
218	rdpr	%cansave, %g2
219	stx	%g2, [%g1 + KTR_PARM4]
220	rdpr	%otherwin, %g2
221	stx	%g2, [%g1 + KTR_PARM5]
222	rdpr	%wstate, %g2
223	stx	%g2, [%g1 + KTR_PARM6]
2249:
225#endif
226
227	wrpr	%g0, 0, %canrestore
228	wrpr	%g0, 6, %cansave
229	wrpr	%g0, 0, %otherwin
230	wrpr	%g0, WSTATE_KERNEL, %wstate
231
232	sub	ASP_REG, SPOFF + CCFSZ, %sp
233	clr	%fp
234
235	set	trap, %o2
236	ba	%xcc, tl1_trap
237	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
238END(tl1_kstack_fault)
239
240/*
241 * Magic to resume from a spill or fill trap.  If we get an alignment or an
242 * MMU fault during a spill or a fill, this macro will detect the fault and
243 * resume at a set instruction offset in the trap handler.
244 *
245 * To check if the previous trap was a spill/fill we convert the trapped pc
246 * to a trap type and verify that it is in the range of spill/fill vectors.
247 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
248 * tl bit allows us to detect both ranges with one test.
249 *
250 * This is:
251 *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
252 *
253 * To calculate the new pc we take advantage of the xor feature of wrpr.
254 * Forcing all the low bits of the trapped pc on we can produce any offset
255 * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
256 *
257 *	0x7f ^ 0x1f == 0x60
258 *	0x1f == (0x80 - 0x60) - 1
259 *
260 * Which are the offset and xor value used to resume from alignment faults.
261 */
262
263/*
264 * Determine if we have trapped inside of a spill/fill vector, and if so resume
265 * at a fixed instruction offset in the trap vector.  Must be called on
266 * alternate globals.
267 */
268#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
269	dec	16, ASP_REG ; \
270	stx	%g1, [ASP_REG + 0] ; \
271	stx	%g2, [ASP_REG + 8] ; \
272	rdpr	%tpc, %g1 ; \
273	ERRATUM50(%g1) ; \
274	rdpr	%tba, %g2 ; \
275	sub	%g1, %g2, %g2 ; \
276	srlx	%g2, 5, %g2 ; \
277	andn	%g2, 0x200, %g2 ; \
278	cmp	%g2, 0x80 ; \
279	blu,pt	%xcc, 9f ; \
280	 cmp	%g2, 0x100 ; \
281	bgeu,pt	%xcc, 9f ; \
282	 or	%g1, 0x7f, %g1 ; \
283	wrpr	%g1, xor, %tnpc ; \
284	stxa_g0_sfsr ; \
285	ldx	[ASP_REG + 8], %g2 ; \
286	ldx	[ASP_REG + 0], %g1 ; \
287	inc	16, ASP_REG ; \
288	done ; \
2899:	ldx	[ASP_REG + 8], %g2 ; \
290	ldx	[ASP_REG + 0], %g1 ; \
291	inc	16, ASP_REG
292
293/*
294 * For certain faults we need to clear the SFSR MMU register before returning.
295 */
296#define	RSF_CLR_SFSR \
297	wr	%g0, ASI_DMMU, %asi ; \
298	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
299
300#define	RSF_XOR(off)	((0x80 - off) - 1)
301
302/*
303 * Instruction offsets in spill and fill trap handlers for handling certain
304 * nested traps, and corresponding xor constants for wrpr.
305 */
306#define	RSF_OFF_ALIGN	0x60
307#define	RSF_OFF_MMU	0x70
308
309#define	RESUME_SPILLFILL_ALIGN \
310	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
311#define	RESUME_SPILLFILL_MMU \
312	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
313#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
314	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
315
316/*
317 * Constant to add to %tnpc when taking a fill trap just before returning to
318 * user mode.
319 */
320#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
321
322/*
323 * Generate a T_SPILL or T_FILL trap if the window operation fails.
324 */
325#define	RSF_TRAP(type) \
326	ba	%xcc, tl0_sftrap ; \
327	 mov	type, %g2 ; \
328	.align	16
329
330/*
331 * Game over if the window operation fails.
332 */
333#define	RSF_FATAL(type) \
334	ba	%xcc, rsf_fatal ; \
335	 mov	type, %g2 ; \
336	.align	16
337
338/*
339 * Magic to resume from a failed fill a few instructions after the corrsponding
340 * restore.  This is used on return from the kernel to usermode.
341 */
342#define	RSF_FILL_MAGIC \
343	rdpr	%tnpc, %g1 ; \
344	add	%g1, RSF_FILL_INC, %g1 ; \
345	wrpr	%g1, 0, %tnpc ; \
346	done ; \
347	.align	16
348
349/*
350 * Spill to the pcb if a spill to the user stack in kernel mode fails.
351 */
352#define	RSF_SPILL_TOPCB \
353	ba,a	%xcc, tl1_spill_topcb ; \
354	 nop ; \
355	.align	16
356
357ENTRY(rsf_fatal)
358#if KTR_COMPILE & KTR_TRAP
359	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
360	    , %g1, %g3, %g4, 7, 8, 9)
361	rdpr	%tt, %g3
362	stx	%g3, [%g1 + KTR_PARM1]
363	stx	%g2, [%g1 + KTR_PARM2]
3649:
365#endif
366
367	KSTACK_CHECK
368
369	sir
370END(rsf_fatal)
371
372	.data
373	_ALIGN_DATA
374	.globl	intrnames, sintrnames
375intrnames:
376	.space	(IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
377sintrnames:
378	.quad	(IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
379
380	.globl	intrcnt, sintrcnt
381intrcnt:
382	.space	(IV_MAX + PIL_MAX) * 8
383sintrcnt:
384	.quad	(IV_MAX + PIL_MAX) * 8
385
386	.text
387
388/*
389 * Trap table and associated macros
390 *
391 * Due to its size a trap table is an inherently hard thing to represent in
392 * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
393 * instructions each, many of which are identical.  The way that this is
394 * laid out is the instructions (8 or 32) for the actual trap vector appear
395 * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
396 * but if not supporting code can be placed just after the definition of the
397 * macro.  The macros are then instantiated in a different section (.trap),
398 * which is setup to be placed by the linker at the beginning of .text, and the
399 * code around the macros is moved to the end of trap table.  In this way the
400 * code that must be sequential in memory can be split up, and located near
401 * its supporting code so that it is easier to follow.
402 */
403
404	/*
405	 * Clean window traps occur when %cleanwin is zero to ensure that data
406	 * is not leaked between address spaces in registers.
407	 */
408	.macro	clean_window
409	clr	%o0
410	clr	%o1
411	clr	%o2
412	clr	%o3
413	clr	%o4
414	clr	%o5
415	clr	%o6
416	clr	%o7
417	clr	%l0
418	clr	%l1
419	clr	%l2
420	clr	%l3
421	clr	%l4
422	clr	%l5
423	clr	%l6
424	rdpr	%cleanwin, %l7
425	inc	%l7
426	wrpr	%l7, 0, %cleanwin
427	clr	%l7
428	retry
429	.align	128
430	.endm
431
432	/*
433	 * Stack fixups for entry from user mode.  We are still running on the
434	 * user stack, and with its live registers, so we must save soon.  We
435	 * are on alternate globals so we do have some registers.  Set the
436	 * transitional window state, and do the save.  If this traps we
437	 * attempt to spill a window to the user stack.  If this fails, we
438	 * spill the window to the pcb and continue.  Spilling to the pcb
439	 * must not fail.
440	 *
441	 * NOTE: Must be called with alternate globals and clobbers %g1.
442	 */
443
444	.macro	tl0_split
445	rdpr	%wstate, %g1
446	wrpr	%g1, WSTATE_TRANSITION, %wstate
447	save
448	.endm
449
450	.macro	tl0_setup	type
451	tl0_split
452	clr	%o1
453	set	trap, %o2
454	ba	%xcc, tl0_utrap
455	 mov	\type, %o0
456	.endm
457
458	/*
459	 * Generic trap type.  Call trap() with the specified type.
460	 */
461	.macro	tl0_gen		type
462	tl0_setup \type
463	.align	32
464	.endm
465
466	/*
467	 * This is used to suck up the massive swaths of reserved trap types.
468	 * Generates count "reserved" trap vectors.
469	 */
470	.macro	tl0_reserved	count
471	.rept	\count
472	tl0_gen	T_RESERVED
473	.endr
474	.endm
475
476	.macro	tl1_split
477	rdpr	%wstate, %g1
478	wrpr	%g1, WSTATE_NESTED, %wstate
479	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
480	.endm
481
482	.macro	tl1_setup	type
483	tl1_split
484	clr	%o1
485	set	trap, %o2
486	ba	%xcc, tl1_trap
487	 mov	\type | T_KERNEL, %o0
488	.endm
489
490	.macro	tl1_gen		type
491	tl1_setup \type
492	.align	32
493	.endm
494
495	.macro	tl1_reserved	count
496	.rept	\count
497	tl1_gen	T_RESERVED
498	.endr
499	.endm
500
501	.macro	tl0_insn_excptn
502	wrpr	%g0, PSTATE_ALT, %pstate
503	wr	%g0, ASI_IMMU, %asi
504	rdpr	%tpc, %g3
505	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
506	/*
507	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
508	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
509	 * this triggers a RED state exception though.
510	 */
511	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
512	membar	#Sync
513	ba	%xcc, tl0_sfsr_trap
514	 mov	T_INSTRUCTION_EXCEPTION, %g2
515	.align	32
516	.endm
517
518	.macro	tl0_data_excptn
519	wrpr	%g0, PSTATE_ALT, %pstate
520	wr	%g0, ASI_DMMU, %asi
521	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
522	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
523	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
524	membar	#Sync
525	ba	%xcc, tl0_sfsr_trap
526	 mov	T_DATA_EXCEPTION, %g2
527	.align	32
528	.endm
529
530	.macro	tl0_align
531	wr	%g0, ASI_DMMU, %asi
532	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
533	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
534	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
535	membar	#Sync
536	ba	%xcc, tl0_sfsr_trap
537	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
538	.align	32
539	.endm
540
541ENTRY(tl0_sfsr_trap)
542	tl0_split
543	clr	%o1
544	set	trap, %o2
545	mov	%g3, %o4
546	mov	%g4, %o5
547	ba	%xcc, tl0_utrap
548	 mov	%g2, %o0
549END(tl0_sfsr_trap)
550
551	.macro	tl0_intr level, mask
552	tl0_split
553	set	\mask, %o1
554	ba	%xcc, tl0_intr
555	 mov	\level, %o0
556	.align	32
557	.endm
558
559#define	INTR(level, traplvl)						\
560	tl ## traplvl ## _intr	level, 1 << level
561
562#define	TICK(traplvl) \
563	tl ## traplvl ## _intr	PIL_TICK, 0x10001
564
565#define	INTR_LEVEL(tl)							\
566	INTR(1, tl) ;							\
567	INTR(2, tl) ;							\
568	INTR(3, tl) ;							\
569	INTR(4, tl) ;							\
570	INTR(5, tl) ;							\
571	INTR(6, tl) ;							\
572	INTR(7, tl) ;							\
573	INTR(8, tl) ;							\
574	INTR(9, tl) ;							\
575	INTR(10, tl) ;							\
576	INTR(11, tl) ;							\
577	INTR(12, tl) ;							\
578	INTR(13, tl) ;							\
579	TICK(tl) ;							\
580	INTR(15, tl) ;
581
582	.macro	tl0_intr_level
583	INTR_LEVEL(0)
584	.endm
585
586	.macro	intr_vector
587	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
588	andcc	%g1, IRSR_BUSY, %g0
589	bnz,a,pt %xcc, intr_vector
590	 nop
591	ba,a,pt	%xcc, intr_vector_stray
592	 nop
593	.align	32
594	.endm
595
596	.macro	tl0_immu_miss
597	/*
598	 * Load the context and the virtual page number from the tag access
599	 * register.  We ignore the context.
600	 */
601	wr	%g0, ASI_IMMU, %asi
602	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
603
604	/*
605	 * Initialize the page size walker.
606	 */
607	mov	TS_MIN, %g2
608
609	/*
610	 * Loop over all supported page sizes.
611	 */
612
613	/*
614	 * Compute the page shift for the page size we are currently looking
615	 * for.
616	 */
6171:	add	%g2, %g2, %g3
618	add	%g3, %g2, %g3
619	add	%g3, PAGE_SHIFT, %g3
620
621	/*
622	 * Extract the virtual page number from the contents of the tag
623	 * access register.
624	 */
625	srlx	%g1, %g3, %g3
626
627	/*
628	 * Compute the TTE bucket address.
629	 */
630	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
631	and	%g3, TSB_BUCKET_MASK, %g4
632	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
633	add	%g4, %g5, %g4
634
635	/*
636	 * Compute the TTE tag target.
637	 */
638	sllx	%g3, TV_SIZE_BITS, %g3
639	or	%g3, %g2, %g3
640
641	/*
642	 * Loop over the TTEs in this bucket.
643	 */
644
645	/*
646	 * Load the TTE.  Note that this instruction may fault, clobbering
647	 * the contents of the tag access register, %g5, %g6, and %g7.  We
648	 * do not use %g5, and %g6 and %g7 are not used until this instruction
649	 * completes successfully.
650	 */
6512:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
652
653	/*
654	 * Check that it's valid and executable and that the TTE tags match.
655	 */
656	brgez,pn %g7, 3f
657	 andcc	%g7, TD_EXEC, %g0
658	bz,pn	%xcc, 3f
659	 cmp	%g3, %g6
660	bne,pn	%xcc, 3f
661	 EMPTY
662
663	/*
664	 * We matched a TTE, load the TLB.
665	 */
666
667	/*
668	 * Set the reference bit, if it's currently clear.
669	 */
670	 andcc	%g7, TD_REF, %g0
671	bz,a,pn	%xcc, tl0_immu_miss_set_ref
672	 nop
673
674	/*
675	 * Load the TTE tag and data into the TLB and retry the instruction.
676	 */
677	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
678	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
679	retry
680
681	/*
682	 * Advance to the next TTE in this bucket, and check the low bits
683	 * of the bucket pointer to see if we've finished the bucket.
684	 */
6853:	add	%g4, 1 << TTE_SHIFT, %g4
686	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
687	bnz,pt	%xcc, 2b
688	 EMPTY
689
690	/*
691	 * See if we just checked the largest page size, and advance to the
692	 * next one if not.
693	 */
694	 cmp	%g2, TS_MAX
695	bne,pt	%xcc, 1b
696	 add	%g2, 1, %g2
697
698	/*
699	 * Not in user TSB, call C code.
700	 */
701	ba,a	%xcc, tl0_immu_miss_trap
702	.align	128
703	.endm
704
705ENTRY(tl0_immu_miss_set_ref)
706	/*
707	 * Set the reference bit.
708	 */
709	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
710
711	/*
712	 * May have become invalid during casxa, in which case start over.
713	 */
714	brgez,pn %g2, 1f
715	 nop
716
717	/*
718	 * Load the TTE tag and data into the TLB and retry the instruction.
719	 */
720	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
721	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
7221:	retry
723END(tl0_immu_miss_set_ref)
724
725ENTRY(tl0_immu_miss_trap)
726	/*
727	 * Put back the contents of the tag access register, in case we
728	 * faulted.
729	 */
730	sethi	%hi(KERNBASE), %g2
731	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
732	flush	%g2
733
734	/*
735	 * Switch to alternate globals.
736	 */
737	wrpr	%g0, PSTATE_ALT, %pstate
738
739	/*
740	 * Reload the tag access register.
741	 */
742	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
743
744	/*
745	 * Save the tag access register, and call common trap code.
746	 */
747	tl0_split
748	clr	%o1
749	set	trap, %o2
750	mov	%g2, %o3
751	ba	%xcc, tl0_utrap
752	 mov	T_INSTRUCTION_MISS, %o0
753END(tl0_immu_miss_trap)
754
755	.macro	tl0_dmmu_miss
756	/*
757	 * Load the context and the virtual page number from the tag access
758	 * register.  We ignore the context.
759	 */
760	wr	%g0, ASI_DMMU, %asi
761	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
762
763	/*
764	 * Initialize the page size walker.
765	 */
766tl1_dmmu_miss_user:
767	mov	TS_MIN, %g2
768
769	/*
770	 * Loop over all supported page sizes.
771	 */
772
773	/*
774	 * Compute the page shift for the page size we are currently looking
775	 * for.
776	 */
7771:	add	%g2, %g2, %g3
778	add	%g3, %g2, %g3
779	add	%g3, PAGE_SHIFT, %g3
780
781	/*
782	 * Extract the virtual page number from the contents of the tag
783	 * access register.
784	 */
785	srlx	%g1, %g3, %g3
786
787	/*
788	 * Compute the TTE bucket address.
789	 */
790	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
791	and	%g3, TSB_BUCKET_MASK, %g4
792	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
793	add	%g4, %g5, %g4
794
795	/*
796	 * Compute the TTE tag target.
797	 */
798	sllx	%g3, TV_SIZE_BITS, %g3
799	or	%g3, %g2, %g3
800
801	/*
802	 * Loop over the TTEs in this bucket.
803	 */
804
805	/*
806	 * Load the TTE.  Note that this instruction may fault, clobbering
807	 * the contents of the tag access register, %g5, %g6, and %g7.  We
808	 * do not use %g5, and %g6 and %g7 are not used until this instruction
809	 * completes successfully.
810	 */
8112:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
812
813	/*
814	 * Check that it's valid and that the virtual page numbers match.
815	 */
816	brgez,pn %g7, 3f
817	 cmp	%g3, %g6
818	bne,pn	%xcc, 3f
819	 EMPTY
820
821	/*
822	 * We matched a TTE, load the TLB.
823	 */
824
825	/*
826	 * Set the reference bit, if it's currently clear.
827	 */
828	 andcc	%g7, TD_REF, %g0
829	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
830	 nop
831
832	/*
833	 * Load the TTE tag and data into the TLB and retry the instruction.
834	 */
835	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
836	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
837	retry
838
839	/*
840	 * Advance to the next TTE in this bucket, and check the low bits
841	 * of the bucket pointer to see if we've finished the bucket.
842	 */
8433:	add	%g4, 1 << TTE_SHIFT, %g4
844	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
845	bnz,pt	%xcc, 2b
846	 EMPTY
847
848	/*
849	 * See if we just checked the largest page size, and advance to the
850	 * next one if not.
851	 */
852	 cmp	%g2, TS_MAX
853	bne,pt	%xcc, 1b
854	 add	%g2, 1, %g2
855
856	/*
857	 * Not in user TSB, call C code.
858	 */
859	ba,a	%xcc, tl0_dmmu_miss_trap
860	.align	128
861	.endm
862
863ENTRY(tl0_dmmu_miss_set_ref)
864	/*
865	 * Set the reference bit.
866	 */
867	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
868
869	/*
870	 * May have become invalid during casxa, in which case start over.
871	 */
872	brgez,pn %g2, 1f
873	 nop
874
875	/*
876	 * Load the TTE tag and data into the TLB and retry the instruction.
877	 */
878	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
879	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
8801:	retry
881END(tl0_dmmu_miss_set_ref)
882
883ENTRY(tl0_dmmu_miss_trap)
884	/*
885	 * Put back the contents of the tag access register, in case we
886	 * faulted.
887	 */
888	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
889	membar	#Sync
890
891	/*
892	 * Switch to alternate globals.
893	 */
894	wrpr	%g0, PSTATE_ALT, %pstate
895
896	/*
897	 * Check if we actually came from the kernel.
898	 */
899	rdpr	%tl, %g1
900	cmp	%g1, 1
901	bgt,a,pn %xcc, 1f
902	 nop
903
904	/*
905	 * Reload the tag access register.
906	 */
907	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
908
909	/*
910	 * Save the tag access register and call common trap code.
911	 */
912	tl0_split
913	clr	%o1
914	set	trap, %o2
915	mov	%g2, %o3
916	ba	%xcc, tl0_utrap
917	 mov	T_DATA_MISS, %o0
918
919	/*
920	 * Handle faults during window spill/fill.
921	 */
9221:	RESUME_SPILLFILL_MMU
923
924	/*
925	 * Reload the tag access register.
926	 */
927	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
928
929	tl1_split
930	clr	%o1
931	set	trap, %o2
932	mov	%g2, %o3
933	ba	%xcc, tl1_trap
934	 mov	T_DATA_MISS | T_KERNEL, %o0
935END(tl0_dmmu_miss_trap)
936
937	.macro	tl0_dmmu_prot
938	ba,a	%xcc, tl0_dmmu_prot_1
939	 nop
940	.align	128
941	.endm
942
943ENTRY(tl0_dmmu_prot_1)
944	/*
945	 * Load the context and the virtual page number from the tag access
946	 * register.  We ignore the context.
947	 */
948	wr	%g0, ASI_DMMU, %asi
949	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
950
951	/*
952	 * Initialize the page size walker.
953	 */
954tl1_dmmu_prot_user:
955	mov	TS_MIN, %g2
956
957	/*
958	 * Loop over all supported page sizes.
959	 */
960
961	/*
962	 * Compute the page shift for the page size we are currently looking
963	 * for.
964	 */
9651:	add	%g2, %g2, %g3
966	add	%g3, %g2, %g3
967	add	%g3, PAGE_SHIFT, %g3
968
969	/*
970	 * Extract the virtual page number from the contents of the tag
971	 * access register.
972	 */
973	srlx	%g1, %g3, %g3
974
975	/*
976	 * Compute the TTE bucket address.
977	 */
978	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
979	and	%g3, TSB_BUCKET_MASK, %g4
980	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
981	add	%g4, %g5, %g4
982
983	/*
984	 * Compute the TTE tag target.
985	 */
986	sllx	%g3, TV_SIZE_BITS, %g3
987	or	%g3, %g2, %g3
988
989	/*
990	 * Loop over the TTEs in this bucket.
991	 */
992
993	/*
994	 * Load the TTE.  Note that this instruction may fault, clobbering
995	 * the contents of the tag access register, %g5, %g6, and %g7.  We
996	 * do not use %g5, and %g6 and %g7 are not used until this instruction
997	 * completes successfully.
998	 */
9992:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1000
1001	/*
1002	 * Check that it's valid and writable and that the virtual page
1003	 * numbers match.
1004	 */
1005	brgez,pn %g7, 4f
1006	 andcc	%g7, TD_SW, %g0
1007	bz,pn	%xcc, 4f
1008	 cmp	%g3, %g6
1009	bne,pn	%xcc, 4f
1010	 nop
1011
1012	/*
1013	 * Set the hardware write bit.
1014	 */
1015	TTE_SET_W(%g4, %g2, %g3, a, ASI_N)
1016
1017	/*
1018	 * Delete the old TLB entry and clear the SFSR.
1019	 */
1020	srlx	%g1, PAGE_SHIFT, %g3
1021	sllx	%g3, PAGE_SHIFT, %g3
1022	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1023	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1024	membar	#Sync
1025
1026	/*
1027	 * May have become invalid during casxa, in which case start over.
1028	 */
1029	brgez,pn %g2, 3f
1030	 or	%g2, TD_W, %g2
1031
1032	/*
1033	 * Load the TTE data into the TLB and retry the instruction.
1034	 */
1035	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1036	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
10373:	retry
1038
1039	/*
1040	 * Check the low bits to see if we've finished the bucket.
1041	 */
10424:	add	%g4, 1 << TTE_SHIFT, %g4
1043	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1044	bnz,pt	%xcc, 2b
1045	 EMPTY
1046
1047	/*
1048	 * See if we just checked the largest page size, and advance to the
1049	 * next one if not.
1050	 */
1051	 cmp	%g2, TS_MAX
1052	bne,pt	%xcc, 1b
1053	 add	%g2, 1, %g2
1054
1055	/*
1056	 * Not in user TSB, call C code.
1057	 */
1058	ba,a	%xcc, tl0_dmmu_prot_trap
1059	 nop
1060END(tl0_dmmu_prot_1)
1061
1062ENTRY(tl0_dmmu_prot_trap)
1063	/*
1064	 * Put back the contents of the tag access register, in case we
1065	 * faulted.
1066	 */
1067	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1068	membar	#Sync
1069
1070	/*
1071	 * Switch to alternate globals.
1072	 */
1073	wrpr	%g0, PSTATE_ALT, %pstate
1074
1075	/*
1076	 * Check if we actually came from the kernel.
1077	 */
1078	rdpr	%tl, %g1
1079	cmp	%g1, 1
1080	bgt,a,pn %xcc, 1f
1081	 nop
1082
1083	/*
1084	 * Load the SFAR, SFSR and TAR.
1085	 */
1086	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1087	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1088	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1089	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1090	membar	#Sync
1091
1092	/*
1093	 * Save the MMU registers and call common trap code.
1094	 */
1095	tl0_split
1096	clr	%o1
1097	set	trap, %o2
1098	mov	%g2, %o3
1099	mov	%g3, %o4
1100	mov	%g4, %o5
1101	ba	%xcc, tl0_utrap
1102	 mov	T_DATA_PROTECTION, %o0
1103
1104	/*
1105	 * Handle faults during window spill/fill.
1106	 */
11071:	RESUME_SPILLFILL_MMU_CLR_SFSR
1108
1109	/*
1110	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1111	 */
1112	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1113	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1114	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1115	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1116	membar	#Sync
1117
1118	tl1_split
1119	clr	%o1
1120	set	trap, %o2
1121	mov	%g2, %o3
1122	mov	%g3, %o4
1123	mov	%g4, %o5
1124	ba	%xcc, tl1_trap
1125	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1126END(tl0_dmmu_prot_trap)
1127
1128	.macro	tl0_spill_0_n
1129	wr	%g0, ASI_AIUP, %asi
1130	SPILL(stxa, %sp + SPOFF, 8, %asi)
1131	saved
1132	retry
1133	.align	32
1134	RSF_TRAP(T_SPILL)
1135	RSF_TRAP(T_SPILL)
1136	.endm
1137
1138	.macro	tl0_spill_1_n
1139	wr	%g0, ASI_AIUP, %asi
1140	SPILL(stwa, %sp, 4, %asi)
1141	saved
1142	retry
1143	.align	32
1144	RSF_TRAP(T_SPILL)
1145	RSF_TRAP(T_SPILL)
1146	.endm
1147
1148	.macro	tl0_fill_0_n
1149	wr	%g0, ASI_AIUP, %asi
1150	FILL(ldxa, %sp + SPOFF, 8, %asi)
1151	restored
1152	retry
1153	.align	32
1154	RSF_TRAP(T_FILL)
1155	RSF_TRAP(T_FILL)
1156	.endm
1157
1158	.macro	tl0_fill_1_n
1159	wr	%g0, ASI_AIUP, %asi
1160	FILL(lduwa, %sp, 4, %asi)
1161	restored
1162	retry
1163	.align	32
1164	RSF_TRAP(T_FILL)
1165	RSF_TRAP(T_FILL)
1166	.endm
1167
1168ENTRY(tl0_sftrap)
1169	rdpr	%tstate, %g1
1170	and	%g1, TSTATE_CWP_MASK, %g1
1171	wrpr	%g1, 0, %cwp
1172	tl0_split
1173	clr	%o1
1174	set	trap, %o2
1175	ba	%xcc, tl0_trap
1176	 mov	%g2, %o0
1177END(tl0_sftrap)
1178
1179	.macro	tl0_spill_bad	count
1180	.rept	\count
1181	sir
1182	.align	128
1183	.endr
1184	.endm
1185
1186	.macro	tl0_fill_bad	count
1187	.rept	\count
1188	sir
1189	.align	128
1190	.endr
1191	.endm
1192
1193	.macro	tl0_syscall
1194	tl0_split
1195	clr	%o1
1196	set	syscall, %o2
1197	ba	%xcc, tl0_trap
1198	 mov	T_SYSCALL, %o0
1199	.align	32
1200	.endm
1201
1202	.macro	tl0_fp_restore
1203	ba,a	%xcc, tl0_fp_restore
1204	 nop
1205	.align	32
1206	.endm
1207
1208ENTRY(tl0_fp_restore)
1209	ldx	[PCB_REG + PCB_FLAGS], %g1
1210	andn	%g1, PCB_FEF, %g1
1211	stx	%g1, [PCB_REG + PCB_FLAGS]
1212
1213	wr	%g0, FPRS_FEF, %fprs
1214	wr	%g0, ASI_BLK_S, %asi
1215	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1216	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1217	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1218	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1219	membar	#Sync
1220	done
1221END(tl0_fp_restore)
1222
1223	.macro	tl1_insn_excptn
1224	wrpr	%g0, PSTATE_ALT, %pstate
1225	wr	%g0, ASI_IMMU, %asi
1226	rdpr	%tpc, %g3
1227	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
1228	/*
1229	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1230	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
1231	 * this triggers a RED state exception though.
1232	 */
1233	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
1234	membar	#Sync
1235	ba	%xcc, tl1_insn_exceptn_trap
1236	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1237	.align	32
1238	.endm
1239
1240ENTRY(tl1_insn_exceptn_trap)
1241	tl1_split
1242	clr	%o1
1243	set	trap, %o2
1244	mov	%g3, %o4
1245	mov	%g4, %o5
1246	ba	%xcc, tl1_trap
1247	 mov	%g2, %o0
1248END(tl1_insn_exceptn_trap)
1249
1250	.macro	tl1_fp_disabled
1251	ba,a	%xcc, tl1_fp_disabled_1
1252	 nop
1253	.align	32
1254	.endm
1255
1256ENTRY(tl1_fp_disabled_1)
1257	rdpr	%tpc, %g1
1258	set	fpu_fault_begin, %g2
1259	sub	%g1, %g2, %g1
1260	cmp	%g1, fpu_fault_size
1261	bgeu,a,pn %xcc, 1f
1262	 nop
1263
1264	wr	%g0, FPRS_FEF, %fprs
1265	wr	%g0, ASI_BLK_S, %asi
1266	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1267	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1268	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1269	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1270	membar	#Sync
1271	retry
1272
12731:	tl1_split
1274	clr	%o1
1275	set	trap, %o2
1276	ba	%xcc, tl1_trap
1277	 mov	T_FP_DISABLED | T_KERNEL, %o0
1278END(tl1_fp_disabled_1)
1279
1280	.macro	tl1_data_excptn
1281	wrpr	%g0, PSTATE_ALT, %pstate
1282	ba,a	%xcc, tl1_data_excptn_trap
1283	 nop
1284	.align	32
1285	.endm
1286
1287ENTRY(tl1_data_excptn_trap)
1288	RESUME_SPILLFILL_MMU_CLR_SFSR
1289	ba	%xcc, tl1_sfsr_trap
1290	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
1291END(tl1_data_excptn_trap)
1292
1293	.macro	tl1_align
1294	wrpr	%g0, PSTATE_ALT, %pstate
1295	ba,a	%xcc, tl1_align_trap
1296	 nop
1297	.align	32
1298	.endm
1299
1300ENTRY(tl1_align_trap)
1301	RESUME_SPILLFILL_ALIGN
1302	ba	%xcc, tl1_sfsr_trap
1303	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1304END(tl1_align_trap)
1305
1306ENTRY(tl1_sfsr_trap)
1307	wr	%g0, ASI_DMMU, %asi
1308	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1309	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1310	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1311	membar	#Sync
1312
1313	tl1_split
1314	clr	%o1
1315	set	trap, %o2
1316	mov	%g3, %o4
1317	mov	%g4, %o5
1318	ba	%xcc, tl1_trap
1319	 mov	%g2, %o0
1320END(tl1_sfsr_trap)
1321
1322	.macro	tl1_intr level, mask
1323	tl1_split
1324	set	\mask, %o1
1325	ba	%xcc, tl1_intr
1326	 mov	\level, %o0
1327	.align	32
1328	.endm
1329
1330	.macro	tl1_intr_level
1331	INTR_LEVEL(1)
1332	.endm
1333
1334	.macro	tl1_immu_miss
1335	/*
1336	 * Load the context and the virtual page number from the tag access
1337	 * register.  We ignore the context.
1338	 */
1339	wr	%g0, ASI_IMMU, %asi
1340	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
1341
1342	/*
1343	 * Compute the address of the TTE.  The TSB mask and address of the
1344	 * TSB are patched at startup.
1345	 */
1346	.globl	tl1_immu_miss_patch_tsb_1
1347tl1_immu_miss_patch_tsb_1:
1348	sethi	%uhi(TSB_KERNEL), %g6
1349	or	%g6, %ulo(TSB_KERNEL), %g6
1350	sllx	%g6, 32, %g6
1351	sethi	%hi(TSB_KERNEL), %g7
1352	or	%g7, %g6, %g7
1353	.globl	tl1_immu_miss_patch_tsb_mask_1
1354tl1_immu_miss_patch_tsb_mask_1:
1355	sethi	%hi(TSB_KERNEL_MASK), %g6
1356	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1357
1358	srlx	%g5, TAR_VPN_SHIFT, %g5
1359	and	%g5, %g6, %g6
1360	sllx	%g6, TTE_SHIFT, %g6
1361	add	%g6, %g7, %g6
1362
1363	/*
1364	 * Load the TTE.
1365	 */
1366	.globl	tl1_immu_miss_patch_quad_ldd_1
1367tl1_immu_miss_patch_quad_ldd_1:
1368	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1369
1370	/*
1371	 * Check that it's valid and executable and that the virtual page
1372	 * numbers match.
1373	 */
1374	brgez,pn %g7, tl1_immu_miss_trap
1375	 andcc	%g7, TD_EXEC, %g0
1376	bz,pn	%xcc, tl1_immu_miss_trap
1377	 srlx	%g6, TV_SIZE_BITS, %g6
1378	cmp	%g5, %g6
1379	bne,pn	%xcc, tl1_immu_miss_trap
1380	 EMPTY
1381
1382	/*
1383	 * Set the reference bit if it's currently clear.
1384	 */
1385	 andcc	%g7, TD_REF, %g0
1386	bz,a,pn	%xcc, tl1_immu_miss_set_ref
1387	 nop
1388
1389	/*
1390	 * Load the TTE data into the TLB and retry the instruction.
1391	 */
1392	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1393	retry
1394	.align	128
1395	.endm
1396
1397ENTRY(tl1_immu_miss_set_ref)
1398	/*
1399	 * Recompute the TTE address, which we clobbered loading the TTE.
1400	 * The TSB mask and address of the TSB are patched at startup.
1401	 */
1402	.globl	tl1_immu_miss_patch_tsb_2
1403tl1_immu_miss_patch_tsb_2:
1404	sethi	%uhi(TSB_KERNEL), %g6
1405	or	%g6, %ulo(TSB_KERNEL), %g6
1406	sllx	%g6, 32, %g6
1407	sethi	%hi(TSB_KERNEL), %g7
1408	or	%g7, %g6, %g7
1409	.globl	tl1_immu_miss_patch_tsb_mask_2
1410tl1_immu_miss_patch_tsb_mask_2:
1411	sethi	%hi(TSB_KERNEL_MASK), %g6
1412	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1413
1414	and	%g5, %g6, %g5
1415	sllx	%g5, TTE_SHIFT, %g5
1416	add	%g5, %g7, %g5
1417
1418	/*
1419	 * Set the reference bit.
1420	 */
1421	.globl	tl1_immu_miss_patch_asi_1
1422tl1_immu_miss_patch_asi_1:
1423	wr	%g0, TSB_ASI, %asi
1424	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1425
1426	/*
1427	 * May have become invalid during casxa, in which case start over.
1428	 */
1429	brgez,pn %g6, 1f
1430	 nop
1431
1432	/*
1433	 * Load the TTE data into the TLB and retry the instruction.
1434	 */
1435	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
14361:	retry
1437END(tl1_immu_miss_set_ref)
1438
1439ENTRY(tl1_immu_miss_trap)
1440	/*
1441	 * Switch to alternate globals.
1442	 */
1443	wrpr	%g0, PSTATE_ALT, %pstate
1444
1445	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
1446
1447	tl1_split
1448	clr	%o1
1449	set	trap, %o2
1450	mov	%g2, %o3
1451	ba	%xcc, tl1_trap
1452	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
1453END(tl1_immu_miss_trap)
1454
1455	.macro	tl1_dmmu_miss
1456	/*
1457	 * Load the context and the virtual page number from the tag access
1458	 * register.
1459	 */
1460	wr	%g0, ASI_DMMU, %asi
1461	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
1462
1463	/*
1464	 * Extract the context from the contents of the tag access register.
1465	 * If it's non-zero this is a fault on a user address.  Note that the
1466	 * faulting address is passed in %g1.
1467	 */
1468	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1469	brnz,a,pn %g6, tl1_dmmu_miss_user
1470	 mov	%g5, %g1
1471
1472	/*
1473	 * Check for the direct mapped physical region.  These addresses have
1474	 * the high bit set so they are negative.
1475	 */
1476	brlz,pn %g5, tl1_dmmu_miss_direct
1477	 EMPTY
1478
1479	/*
1480	 * Compute the address of the TTE.  The TSB mask and address of the
1481	 * TSB are patched at startup.
1482	 */
1483	.globl	tl1_dmmu_miss_patch_tsb_1
1484tl1_dmmu_miss_patch_tsb_1:
1485	sethi	%uhi(TSB_KERNEL), %g6
1486	or	%g6, %ulo(TSB_KERNEL), %g6
1487	sllx	%g6, 32, %g6
1488	sethi	%hi(TSB_KERNEL), %g7
1489	or	%g7, %g6, %g7
1490	.globl	tl1_dmmu_miss_patch_tsb_mask_1
1491tl1_dmmu_miss_patch_tsb_mask_1:
1492	sethi	%hi(TSB_KERNEL_MASK), %g6
1493	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1494
1495	srlx	%g5, TAR_VPN_SHIFT, %g5
1496	and	%g5, %g6, %g6
1497	sllx	%g6, TTE_SHIFT, %g6
1498	add	%g6, %g7, %g6
1499
1500	/*
1501	 * Load the TTE.
1502	 */
1503	.globl	tl1_dmmu_miss_patch_quad_ldd_1
1504tl1_dmmu_miss_patch_quad_ldd_1:
1505	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1506
1507	/*
1508	 * Check that it's valid and that the virtual page numbers match.
1509	 */
1510	brgez,pn %g7, tl1_dmmu_miss_trap
1511	 srlx	%g6, TV_SIZE_BITS, %g6
1512	cmp	%g5, %g6
1513	bne,pn %xcc, tl1_dmmu_miss_trap
1514	 EMPTY
1515
1516	/*
1517	 * Set the reference bit if it's currently clear.
1518	 */
1519	 andcc	%g7, TD_REF, %g0
1520	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
1521	 nop
1522
1523	/*
1524	 * Load the TTE data into the TLB and retry the instruction.
1525	 */
1526	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1527	retry
1528	.align	128
1529	.endm
1530
1531ENTRY(tl1_dmmu_miss_set_ref)
1532	/*
1533	 * Recompute the TTE address, which we clobbered loading the TTE.
1534	 * The TSB mask and address of the TSB are patched at startup.
1535	 */
1536	.globl	tl1_dmmu_miss_patch_tsb_mask_2
1537tl1_dmmu_miss_patch_tsb_2:
1538	sethi	%uhi(TSB_KERNEL), %g6
1539	or	%g6, %ulo(TSB_KERNEL), %g6
1540	sllx	%g6, 32, %g6
1541	sethi	%hi(TSB_KERNEL), %g7
1542	or	%g7, %g6, %g7
1543	.globl	tl1_dmmu_miss_patch_tsb_2
1544tl1_dmmu_miss_patch_tsb_mask_2:
1545	sethi	%hi(TSB_KERNEL_MASK), %g6
1546	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1547
1548	and	%g5, %g6, %g5
1549	sllx	%g5, TTE_SHIFT, %g5
1550	add	%g5, %g7, %g5
1551
1552	/*
1553	 * Set the reference bit.
1554	 */
1555	.globl	tl1_dmmu_miss_patch_asi_1
1556tl1_dmmu_miss_patch_asi_1:
1557	wr	%g0, TSB_ASI, %asi
1558	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1559
1560	/*
1561	 * May have become invalid during casxa, in which case start over.
1562	 */
1563	brgez,pn %g6, 1f
1564	 nop
1565
1566	/*
1567	 * Load the TTE data into the TLB and retry the instruction.
1568	 */
1569	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
15701:	retry
1571END(tl1_dmmu_miss_set_ref)
1572
1573ENTRY(tl1_dmmu_miss_trap)
1574	/*
1575	 * Switch to alternate globals.
1576	 */
1577	wrpr	%g0, PSTATE_ALT, %pstate
1578
1579	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1580
1581	KSTACK_CHECK
1582
1583	tl1_split
1584	clr	%o1
1585	set	trap, %o2
1586	mov	%g2, %o3
1587	ba	%xcc, tl1_trap
1588	 mov	T_DATA_MISS | T_KERNEL, %o0
1589END(tl1_dmmu_miss_trap)
1590
1591ENTRY(tl1_dmmu_miss_direct)
1592	/*
1593	 * Mask off the high bits of the virtual address to get the physical
1594	 * address, and or in the TTE bits.  The virtual address bits that
1595	 * correspond to the TTE valid and page size bits are left set, so
1596	 * they don't have to be included in the TTE bits below.  We know they
1597	 * are set because the virtual address is in the upper va hole.
1598	 * NB: if we are taking advantage of the ASI_ATOMIC_QUAD_LDD_PHYS
1599	 * and we get a miss on the directly accessed kernel TSB we must not
1600	 * set TD_CV in order to access it uniformly bypassing the D$.
1601	 */
1602	setx	TLB_DIRECT_ADDRESS_MASK, %g7, %g4
1603	and	%g5, %g4, %g4
1604	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1605	and	%g5, %g6, %g5
1606	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_1
1607tl1_dmmu_miss_direct_patch_tsb_phys_1:
1608	sethi	%uhi(TSB_KERNEL_PHYS), %g3
1609	or	%g3, %ulo(TSB_KERNEL_PHYS), %g3
1610	sllx	%g3, 32, %g3
1611	sethi	%hi(TSB_KERNEL_PHYS), %g3
1612	or	%g7, %g3, %g7
1613	cmp	%g4, %g7
1614	bl,pt	%xcc, 1f
1615	 or	%g5, TD_CP | TD_W, %g5
1616	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_end_1
1617tl1_dmmu_miss_direct_patch_tsb_phys_end_1:
1618	sethi	%uhi(TSB_KERNEL_PHYS_END), %g3
1619	or	%g3, %ulo(TSB_KERNEL_PHYS_END), %g3
1620	sllx	%g3, 32, %g3
1621	sethi	%hi(TSB_KERNEL_PHYS_END), %g7
1622	or	%g7, %g3, %g7
1623	cmp	%g4, %g7
1624	bg,a,pt	%xcc, 1f
1625	 nop
1626	ba,pt	%xcc, 2f
1627	 nop
16281:	or	%g5, TD_CV, %g5
1629
1630	/*
1631	 * Load the TTE data into the TLB and retry the instruction.
1632	 */
16332:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1634	retry
1635END(tl1_dmmu_miss_direct)
1636
1637	.macro	tl1_dmmu_prot
1638	ba,a	%xcc, tl1_dmmu_prot_1
1639	 nop
1640	.align	128
1641	.endm
1642
1643ENTRY(tl1_dmmu_prot_1)
1644	/*
1645	 * Load the context and the virtual page number from the tag access
1646	 * register.
1647	 */
1648	wr	%g0, ASI_DMMU, %asi
1649	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
1650
1651	/*
1652	 * Extract the context from the contents of the tag access register.
1653	 * If it's non-zero this is a fault on a user address.  Note that the
1654	 * faulting address is passed in %g1.
1655	 */
1656	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1657	brnz,a,pn %g6, tl1_dmmu_prot_user
1658	 mov	%g5, %g1
1659
1660	/*
1661	 * Compute the address of the TTE.  The TSB mask and address of the
1662	 * TSB are patched at startup.
1663	 */
1664	.globl	tl1_dmmu_prot_patch_tsb_1
1665tl1_dmmu_prot_patch_tsb_1:
1666	sethi	%uhi(TSB_KERNEL), %g6
1667	or	%g6, %ulo(TSB_KERNEL), %g6
1668	sllx	%g6, 32, %g6
1669	sethi	%hi(TSB_KERNEL), %g7
1670	or	%g7, %g6, %g7
1671	.globl	tl1_dmmu_prot_patch_tsb_mask_1
1672tl1_dmmu_prot_patch_tsb_mask_1:
1673	sethi	%hi(TSB_KERNEL_MASK), %g6
1674	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1675
1676	srlx	%g5, TAR_VPN_SHIFT, %g5
1677	and	%g5, %g6, %g6
1678	sllx	%g6, TTE_SHIFT, %g6
1679	add	%g6, %g7, %g6
1680
1681	/*
1682	 * Load the TTE.
1683	 */
1684	.globl	tl1_dmmu_prot_patch_quad_ldd_1
1685tl1_dmmu_prot_patch_quad_ldd_1:
1686	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1687
1688	/*
1689	 * Check that it's valid and writeable and that the virtual page
1690	 * numbers match.
1691	 */
1692	brgez,pn %g7, tl1_dmmu_prot_trap
1693	 andcc	%g7, TD_SW, %g0
1694	bz,pn	%xcc, tl1_dmmu_prot_trap
1695	 srlx	%g6, TV_SIZE_BITS, %g6
1696	cmp	%g5, %g6
1697	bne,pn	%xcc, tl1_dmmu_prot_trap
1698	 EMPTY
1699
1700	/*
1701	 * Delete the old TLB entry and clear the SFSR.
1702	 */
1703	 sllx	%g5, TAR_VPN_SHIFT, %g6
1704	or	%g6, TLB_DEMAP_NUCLEUS, %g6
1705	stxa	%g0, [%g6] ASI_DMMU_DEMAP
1706	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1707	membar	#Sync
1708
1709	/*
1710	 * Recompute the TTE address, which we clobbered loading the TTE.
1711	 * The TSB mask and address of the TSB are patched at startup.
1712	 */
1713	.globl	tl1_dmmu_prot_patch_tsb_2
1714tl1_dmmu_prot_patch_tsb_2:
1715	sethi	%uhi(TSB_KERNEL), %g6
1716	or	%g6, %ulo(TSB_KERNEL), %g6
1717	sllx	%g6, 32, %g6
1718	sethi	%hi(TSB_KERNEL), %g7
1719	or	%g7, %g6, %g7
1720	.globl	tl1_dmmu_prot_patch_tsb_mask_2
1721tl1_dmmu_prot_patch_tsb_mask_2:
1722	sethi	%hi(TSB_KERNEL_MASK), %g6
1723	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1724	and	%g5, %g6, %g5
1725	sllx	%g5, TTE_SHIFT, %g5
1726	add	%g5, %g7, %g5
1727
1728	/*
1729	 * Set the hardware write bit.
1730	 */
1731	.globl	tl1_dmmu_prot_patch_asi_1
1732tl1_dmmu_prot_patch_asi_1:
1733	wr	%g0, TSB_ASI, %asi
1734	TTE_SET_W(%g5, %g6, %g7, a, %asi)
1735
1736	/*
1737	 * May have become invalid during casxa, in which case start over.
1738	 */
1739	brgez,pn %g6, 1f
1740	 or	%g6, TD_W, %g6
1741
1742	/*
1743	 * Load the TTE data into the TLB and retry the instruction.
1744	 */
1745	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
17461:	retry
1747END(tl1_dmmu_prot_1)
1748
1749ENTRY(tl1_dmmu_prot_trap)
1750	/*
1751	 * Switch to alternate globals.
1752	 */
1753	wrpr	%g0, PSTATE_ALT, %pstate
1754
1755	/*
1756	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1757	 */
1758	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1759	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1760	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1761	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1762	membar	#Sync
1763
1764	tl1_split
1765	clr	%o1
1766	set	trap, %o2
1767	mov	%g2, %o3
1768	mov	%g3, %o4
1769	mov	%g4, %o5
1770	ba	%xcc, tl1_trap
1771	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1772END(tl1_dmmu_prot_trap)
1773
1774	.macro	tl1_spill_0_n
1775	SPILL(stx, %sp + SPOFF, 8, EMPTY)
1776	saved
1777	retry
1778	.align	32
1779	RSF_FATAL(T_SPILL)
1780	RSF_FATAL(T_SPILL)
1781	.endm
1782
1783	.macro	tl1_spill_2_n
1784	wr	%g0, ASI_AIUP, %asi
1785	SPILL(stxa, %sp + SPOFF, 8, %asi)
1786	saved
1787	retry
1788	.align	32
1789	RSF_SPILL_TOPCB
1790	RSF_SPILL_TOPCB
1791	.endm
1792
1793	.macro	tl1_spill_3_n
1794	wr	%g0, ASI_AIUP, %asi
1795	SPILL(stwa, %sp, 4, %asi)
1796	saved
1797	retry
1798	.align	32
1799	RSF_SPILL_TOPCB
1800	RSF_SPILL_TOPCB
1801	.endm
1802
1803	.macro	tl1_spill_7_n
1804	btst	1, %sp
1805	bnz,a,pn %xcc, tl1_spill_0_n
1806	 nop
1807	srl	%sp, 0, %sp
1808	SPILL(stw, %sp, 4, EMPTY)
1809	saved
1810	retry
1811	.align	32
1812	RSF_FATAL(T_SPILL)
1813	RSF_FATAL(T_SPILL)
1814	.endm
1815
1816	.macro	tl1_spill_0_o
1817	wr	%g0, ASI_AIUP, %asi
1818	SPILL(stxa, %sp + SPOFF, 8, %asi)
1819	saved
1820	retry
1821	.align	32
1822	RSF_SPILL_TOPCB
1823	RSF_SPILL_TOPCB
1824	.endm
1825
1826	.macro	tl1_spill_1_o
1827	wr	%g0, ASI_AIUP, %asi
1828	SPILL(stwa, %sp, 4, %asi)
1829	saved
1830	retry
1831	.align	32
1832	RSF_SPILL_TOPCB
1833	RSF_SPILL_TOPCB
1834	.endm
1835
1836	.macro	tl1_spill_2_o
1837	RSF_SPILL_TOPCB
1838	.align	128
1839	.endm
1840
1841	.macro	tl1_fill_0_n
1842	FILL(ldx, %sp + SPOFF, 8, EMPTY)
1843	restored
1844	retry
1845	.align	32
1846	RSF_FATAL(T_FILL)
1847	RSF_FATAL(T_FILL)
1848	.endm
1849
1850	.macro	tl1_fill_2_n
1851	wr	%g0, ASI_AIUP, %asi
1852	FILL(ldxa, %sp + SPOFF, 8, %asi)
1853	restored
1854	retry
1855	.align 32
1856	RSF_FILL_MAGIC
1857	RSF_FILL_MAGIC
1858	.endm
1859
1860	.macro	tl1_fill_3_n
1861	wr	%g0, ASI_AIUP, %asi
1862	FILL(lduwa, %sp, 4, %asi)
1863	restored
1864	retry
1865	.align 32
1866	RSF_FILL_MAGIC
1867	RSF_FILL_MAGIC
1868	.endm
1869
1870	.macro	tl1_fill_7_n
1871	btst	1, %sp
1872	bnz,a,pt %xcc, tl1_fill_0_n
1873	 nop
1874	srl	%sp, 0, %sp
1875	FILL(lduw, %sp, 4, EMPTY)
1876	restored
1877	retry
1878	.align	32
1879	RSF_FATAL(T_FILL)
1880	RSF_FATAL(T_FILL)
1881	.endm
1882
1883/*
1884 * This is used to spill windows that are still occupied with user
1885 * data on kernel entry to the pcb.
1886 */
1887ENTRY(tl1_spill_topcb)
1888	wrpr	%g0, PSTATE_ALT, %pstate
1889
1890	/* Free some globals for our use. */
1891	dec	24, ASP_REG
1892	stx	%g1, [ASP_REG + 0]
1893	stx	%g2, [ASP_REG + 8]
1894	stx	%g3, [ASP_REG + 16]
1895
1896	ldx	[PCB_REG + PCB_NSAVED], %g1
1897
1898	sllx	%g1, PTR_SHIFT, %g2
1899	add	%g2, PCB_REG, %g2
1900	stx	%sp, [%g2 + PCB_RWSP]
1901
1902	sllx	%g1, RW_SHIFT, %g2
1903	add	%g2, PCB_REG, %g2
1904	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1905
1906	inc	%g1
1907	stx	%g1, [PCB_REG + PCB_NSAVED]
1908
1909#if KTR_COMPILE & KTR_TRAP
1910	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1911	   , %g1, %g2, %g3, 7, 8, 9)
1912	rdpr	%tpc, %g2
1913	stx	%g2, [%g1 + KTR_PARM1]
1914	rdpr	%tnpc, %g2
1915	stx	%g2, [%g1 + KTR_PARM2]
1916	stx	%sp, [%g1 + KTR_PARM3]
1917	ldx	[PCB_REG + PCB_NSAVED], %g2
1918	stx	%g2, [%g1 + KTR_PARM4]
19199:
1920#endif
1921
1922	saved
1923
1924	ldx	[ASP_REG + 16], %g3
1925	ldx	[ASP_REG + 8], %g2
1926	ldx	[ASP_REG + 0], %g1
1927	inc	24, ASP_REG
1928	retry
1929END(tl1_spill_topcb)
1930
1931	.macro	tl1_spill_bad	count
1932	.rept	\count
1933	sir
1934	.align	128
1935	.endr
1936	.endm
1937
1938	.macro	tl1_fill_bad	count
1939	.rept	\count
1940	sir
1941	.align	128
1942	.endr
1943	.endm
1944
1945	.macro	tl1_soft	count
1946	.rept	\count
1947	tl1_gen	T_SOFT | T_KERNEL
1948	.endr
1949	.endm
1950
1951	.sect	.trap
1952	.globl	tl_trap_begin
1953tl_trap_begin:
1954	nop
1955
1956	.align	0x8000
1957	.globl	tl0_base
1958
1959tl0_base:
1960	tl0_reserved	8				! 0x0-0x7
1961tl0_insn_excptn:
1962	tl0_insn_excptn					! 0x8
1963	tl0_reserved	1				! 0x9
1964tl0_insn_error:
1965	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
1966	tl0_reserved	5				! 0xb-0xf
1967tl0_insn_illegal:
1968	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
1969tl0_priv_opcode:
1970	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
1971	tl0_reserved	14				! 0x12-0x1f
1972tl0_fp_disabled:
1973	tl0_gen		T_FP_DISABLED			! 0x20
1974tl0_fp_ieee:
1975	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
1976tl0_fp_other:
1977	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
1978tl0_tag_ovflw:
1979	tl0_gen		T_TAG_OVERFLOW			! 0x23
1980tl0_clean_window:
1981	clean_window					! 0x24
1982tl0_divide:
1983	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
1984	tl0_reserved	7				! 0x29-0x2f
1985tl0_data_excptn:
1986	tl0_data_excptn					! 0x30
1987	tl0_reserved	1				! 0x31
1988tl0_data_error:
1989	tl0_gen		T_DATA_ERROR			! 0x32
1990	tl0_reserved	1				! 0x33
1991tl0_align:
1992	tl0_align					! 0x34
1993tl0_align_lddf:
1994	tl0_gen		T_RESERVED			! 0x35
1995tl0_align_stdf:
1996	tl0_gen		T_RESERVED			! 0x36
1997tl0_priv_action:
1998	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
1999	tl0_reserved	9				! 0x38-0x40
2000tl0_intr_level:
2001	tl0_intr_level					! 0x41-0x4f
2002	tl0_reserved	16				! 0x50-0x5f
2003tl0_intr_vector:
2004	intr_vector					! 0x60
2005tl0_watch_phys:
2006	tl0_gen		T_PA_WATCHPOINT			! 0x61
2007tl0_watch_virt:
2008	tl0_gen		T_VA_WATCHPOINT			! 0x62
2009tl0_ecc:
2010	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
2011tl0_immu_miss:
2012	tl0_immu_miss					! 0x64
2013tl0_dmmu_miss:
2014	tl0_dmmu_miss					! 0x68
2015tl0_dmmu_prot:
2016	tl0_dmmu_prot					! 0x6c
2017	tl0_reserved	16				! 0x70-0x7f
2018tl0_spill_0_n:
2019	tl0_spill_0_n					! 0x80
2020tl0_spill_1_n:
2021	tl0_spill_1_n					! 0x84
2022	tl0_spill_bad	14				! 0x88-0xbf
2023tl0_fill_0_n:
2024	tl0_fill_0_n					! 0xc0
2025tl0_fill_1_n:
2026	tl0_fill_1_n					! 0xc4
2027	tl0_fill_bad	14				! 0xc8-0xff
2028tl0_soft:
2029	tl0_gen		T_SYSCALL			! 0x100
2030	tl0_gen		T_BREAKPOINT			! 0x101
2031	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
2032	tl0_reserved	1				! 0x103
2033	tl0_gen		T_CLEAN_WINDOW			! 0x104
2034	tl0_gen		T_RANGE_CHECK			! 0x105
2035	tl0_gen		T_FIX_ALIGNMENT			! 0x106
2036	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
2037	tl0_gen		T_SYSCALL			! 0x108
2038	tl0_gen		T_SYSCALL			! 0x109
2039	tl0_fp_restore					! 0x10a
2040	tl0_reserved	5				! 0x10b-0x10f
2041	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
2042	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
2043	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
2044	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
2045	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
2046	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
2047	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
2048	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
2049	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
2050	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
2051	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
2052	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
2053	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
2054	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
2055	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
2056	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
2057	tl0_reserved	32				! 0x120-0x13f
2058	tl0_gen		T_SYSCALL			! 0x140
2059	tl0_syscall					! 0x141
2060	tl0_gen		T_SYSCALL			! 0x142
2061	tl0_gen		T_SYSCALL			! 0x143
2062	tl0_reserved	188				! 0x144-0x1ff
2063
2064tl1_base:
2065	tl1_reserved	8				! 0x200-0x207
2066tl1_insn_excptn:
2067	tl1_insn_excptn					! 0x208
2068	tl1_reserved	1				! 0x209
2069tl1_insn_error:
2070	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
2071	tl1_reserved	5				! 0x20b-0x20f
2072tl1_insn_illegal:
2073	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
2074tl1_priv_opcode:
2075	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
2076	tl1_reserved	14				! 0x212-0x21f
2077tl1_fp_disabled:
2078	tl1_fp_disabled					! 0x220
2079tl1_fp_ieee:
2080	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
2081tl1_fp_other:
2082	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
2083tl1_tag_ovflw:
2084	tl1_gen		T_TAG_OVERFLOW			! 0x223
2085tl1_clean_window:
2086	clean_window					! 0x224
2087tl1_divide:
2088	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
2089	tl1_reserved	7				! 0x229-0x22f
2090tl1_data_excptn:
2091	tl1_data_excptn					! 0x230
2092	tl1_reserved	1				! 0x231
2093tl1_data_error:
2094	tl1_gen		T_DATA_ERROR			! 0x232
2095	tl1_reserved	1				! 0x233
2096tl1_align:
2097	tl1_align					! 0x234
2098tl1_align_lddf:
2099	tl1_gen		T_RESERVED			! 0x235
2100tl1_align_stdf:
2101	tl1_gen		T_RESERVED			! 0x236
2102tl1_priv_action:
2103	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
2104	tl1_reserved	9				! 0x238-0x240
2105tl1_intr_level:
2106	tl1_intr_level					! 0x241-0x24f
2107	tl1_reserved	16				! 0x250-0x25f
2108tl1_intr_vector:
2109	intr_vector					! 0x260
2110tl1_watch_phys:
2111	tl1_gen		T_PA_WATCHPOINT			! 0x261
2112tl1_watch_virt:
2113	tl1_gen		T_VA_WATCHPOINT			! 0x262
2114tl1_ecc:
2115	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
2116tl1_immu_miss:
2117	tl1_immu_miss					! 0x264
2118tl1_dmmu_miss:
2119	tl1_dmmu_miss					! 0x268
2120tl1_dmmu_prot:
2121	tl1_dmmu_prot					! 0x26c
2122	tl1_reserved	16				! 0x270-0x27f
2123tl1_spill_0_n:
2124	tl1_spill_0_n					! 0x280
2125	tl1_spill_bad	1				! 0x284
2126tl1_spill_2_n:
2127	tl1_spill_2_n					! 0x288
2128tl1_spill_3_n:
2129	tl1_spill_3_n					! 0x28c
2130	tl1_spill_bad	3				! 0x290-0x29b
2131tl1_spill_7_n:
2132	tl1_spill_7_n					! 0x29c
2133tl1_spill_0_o:
2134	tl1_spill_0_o					! 0x2a0
2135tl1_spill_1_o:
2136	tl1_spill_1_o					! 0x2a4
2137tl1_spill_2_o:
2138	tl1_spill_2_o					! 0x2a8
2139	tl1_spill_bad	5				! 0x2ac-0x2bf
2140tl1_fill_0_n:
2141	tl1_fill_0_n					! 0x2c0
2142	tl1_fill_bad	1				! 0x2c4
2143tl1_fill_2_n:
2144	tl1_fill_2_n					! 0x2c8
2145tl1_fill_3_n:
2146	tl1_fill_3_n					! 0x2cc
2147	tl1_fill_bad	3				! 0x2d0-0x2db
2148tl1_fill_7_n:
2149	tl1_fill_7_n					! 0x2dc
2150	tl1_fill_bad	8				! 0x2e0-0x2ff
2151	tl1_reserved	1				! 0x300
2152tl1_breakpoint:
2153	tl1_gen		T_BREAKPOINT			! 0x301
2154	tl1_gen		T_RSTRWP_PHYS			! 0x302
2155	tl1_gen		T_RSTRWP_VIRT			! 0x303
2156	tl1_reserved	252				! 0x304-0x3ff
2157
2158	.globl	tl_trap_end
2159tl_trap_end:
2160	nop
2161
2162/*
2163 * User trap entry point
2164 *
2165 * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2166 *     u_long sfsr)
2167 *
2168 * This handles redirecting a trap back to usermode as a user trap.  The user
2169 * program must have first registered a trap handler with the kernel using
2170 * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2171 * for it to return to the trapping code directly, it will not return through
2172 * the kernel.  The trap type is passed in %o0, all out registers must be
2173 * passed through to tl0_trap or to usermode untouched.  Note that the
2174 * parameters passed in out registers may be used by the user trap handler.
2175 * Do not change the registers they are passed in or you will break the ABI.
2176 *
2177 * If the trap type allows user traps, setup state to execute the user trap
2178 * handler and bounce back to usermode, otherwise branch to tl0_trap.
2179 */
2180ENTRY(tl0_utrap)
2181	/*
2182	 * Check if the trap type allows user traps.
2183	 */
2184	cmp	%o0, UT_MAX
2185	bge,a,pt %xcc, tl0_trap
2186	 nop
2187
2188	/*
2189	 * Load the user trap handler from the utrap table.
2190	 */
2191	ldx	[PCPU(CURTHREAD)], %l0
2192	ldx	[%l0 + TD_PROC], %l0
2193	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2194	brz,pt	%l0, tl0_trap
2195	 sllx	%o0, PTR_SHIFT, %l1
2196	ldx	[%l0 + %l1], %l0
2197	brz,a,pt %l0, tl0_trap
2198	 nop
2199
2200	/*
2201	 * If the save we did on entry to the kernel had to spill a window
2202	 * to the pcb, pretend we took a spill trap instead.  Any windows
2203	 * that are in the pcb must be copied out or the fill handler will
2204	 * not be able to find them, since the user trap handler returns
2205	 * directly to the trapping code.  Note that we only support precise
2206	 * user traps, which implies that the condition that caused the trap
2207	 * in the first place is still valid, so it will occur again when we
2208	 * re-execute the trapping instruction.
2209	 */
2210	ldx	[PCB_REG + PCB_NSAVED], %l1
2211	brnz,a,pn %l1, tl0_trap
2212	 mov	T_SPILL, %o0
2213
2214	/*
2215	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2216	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2217	 * it may be clobbered by an interrupt before the user trap code
2218	 * can read it, and we must pass %tstate in order to restore %ccr
2219	 * and %asi.  The %fsr must be stored to memory, so we use the
2220	 * temporary stack for that.
2221	 */
2222	rd	%fprs, %l1
2223	or	%l1, FPRS_FEF, %l2
2224	wr	%l2, 0, %fprs
2225	dec	8, ASP_REG
2226	stx	%fsr, [ASP_REG]
2227	ldx	[ASP_REG], %l4
2228	inc	8, ASP_REG
2229	wr	%l1, 0, %fprs
2230
2231	rdpr	%tstate, %l5
2232	rdpr	%tpc, %l6
2233	rdpr	%tnpc, %l7
2234
2235	/*
2236	 * Setup %tnpc to return to.
2237	 */
2238	wrpr	%l0, 0, %tnpc
2239
2240	/*
2241	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2242	 */
2243	rdpr	%wstate, %l1
2244	and	%l1, WSTATE_NORMAL_MASK, %l1
2245	wrpr	%l1, 0, %wstate
2246
2247	/*
2248	 * Setup %tstate for return, change the saved cwp to point to the
2249	 * current window instead of the window at the time of the trap.
2250	 */
2251	andn	%l5, TSTATE_CWP_MASK, %l1
2252	rdpr	%cwp, %l2
2253	wrpr	%l1, %l2, %tstate
2254
2255	/*
2256	 * Setup %sp.  Userland processes will crash if this is not setup.
2257	 */
2258	sub	%fp, CCFSZ, %sp
2259
2260	/*
2261	 * Execute the user trap handler.
2262	 */
2263	done
2264END(tl0_utrap)
2265
2266/*
2267 * (Real) User trap entry point
2268 *
2269 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2270 *     u_int sfsr)
2271 *
2272 * The following setup has been performed:
2273 *	- the windows have been split and the active user window has been saved
2274 *	  (maybe just to the pcb)
2275 *	- we are on alternate globals and interrupts are disabled
2276 *
2277 * We switch to the kernel stack, build a trapframe, switch to normal
2278 * globals, enable interrupts and call trap.
2279 *
2280 * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
2281 * it has been pre-set in alternate globals, so we read it from there and setup
2282 * the normal %g7 *before* enabling interrupts.  This avoids any possibility
2283 * of cpu migration and using the wrong pcpup.
2284 */
2285ENTRY(tl0_trap)
2286	/*
2287	 * Force kernel store order.
2288	 */
2289	wrpr	%g0, PSTATE_ALT, %pstate
2290
2291	rdpr	%tstate, %l0
2292	rdpr	%tpc, %l1
2293	rdpr	%tnpc, %l2
2294	rd	%y, %l3
2295	rd	%fprs, %l4
2296	rdpr	%wstate, %l5
2297
2298#if KTR_COMPILE & KTR_TRAP
2299	CATR(KTR_TRAP,
2300	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2301	    , %g1, %g2, %g3, 7, 8, 9)
2302	ldx	[PCPU(CURTHREAD)], %g2
2303	stx	%g2, [%g1 + KTR_PARM1]
2304	stx	%o0, [%g1 + KTR_PARM2]
2305	rdpr	%pil, %g2
2306	stx	%g2, [%g1 + KTR_PARM3]
2307	stx	%l1, [%g1 + KTR_PARM4]
2308	stx	%l2, [%g1 + KTR_PARM5]
2309	stx	%i6, [%g1 + KTR_PARM6]
23109:
2311#endif
2312
23131:	and	%l5, WSTATE_NORMAL_MASK, %l5
2314	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2315	wrpr	%l5, WSTATE_KERNEL, %wstate
2316	rdpr	%canrestore, %l6
2317	wrpr	%l6, 0, %otherwin
2318	wrpr	%g0, 0, %canrestore
2319
2320	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2321
2322	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2323	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2324	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2325	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2326	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2327
2328	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2329	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2330	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2331	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2332	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2333	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2334
2335	wr	%g0, FPRS_FEF, %fprs
2336	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2337	rd	%gsr, %l6
2338	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2339	wr	%g0, 0, %fprs
2340
2341	mov	PCB_REG, %l0
2342	mov	PCPU_REG, %l1
2343	wrpr	%g0, PSTATE_NORMAL, %pstate
2344
2345	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2346	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2347
2348	mov	%l0, PCB_REG
2349	mov	%l1, PCPU_REG
2350	wrpr	%g0, PSTATE_KERNEL, %pstate
2351
2352	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2353	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2354	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2355	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2356	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2357	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2358	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2359	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2360
2361	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2362	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2363	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2364	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2365	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2366
2367	set	tl0_ret - 8, %o7
2368	jmpl	%o2, %g0
2369	 add	%sp, CCFSZ + SPOFF, %o0
2370END(tl0_trap)
2371
2372/*
2373 * void tl0_intr(u_int level, u_int mask)
2374 */
2375ENTRY(tl0_intr)
2376	/*
2377	 * Force kernel store order.
2378	 */
2379	wrpr	%g0, PSTATE_ALT, %pstate
2380
2381	rdpr	%tstate, %l0
2382	rdpr	%tpc, %l1
2383	rdpr	%tnpc, %l2
2384	rd	%y, %l3
2385	rd	%fprs, %l4
2386	rdpr	%wstate, %l5
2387
2388#if KTR_COMPILE & KTR_INTR
2389	CATR(KTR_INTR,
2390	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2391	    , %g1, %g2, %g3, 7, 8, 9)
2392	ldx	[PCPU(CURTHREAD)], %g2
2393	stx	%g2, [%g1 + KTR_PARM1]
2394	stx	%o0, [%g1 + KTR_PARM2]
2395	rdpr	%pil, %g2
2396	stx	%g2, [%g1 + KTR_PARM3]
2397	stx	%l1, [%g1 + KTR_PARM4]
2398	stx	%l2, [%g1 + KTR_PARM5]
2399	stx	%i6, [%g1 + KTR_PARM6]
24009:
2401#endif
2402
2403	wrpr	%o0, 0, %pil
2404	wr	%o1, 0, %clear_softint
2405
2406	and	%l5, WSTATE_NORMAL_MASK, %l5
2407	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2408	wrpr	%l5, WSTATE_KERNEL, %wstate
2409	rdpr	%canrestore, %l6
2410	wrpr	%l6, 0, %otherwin
2411	wrpr	%g0, 0, %canrestore
2412
2413	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2414
2415	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2416	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2417	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2418	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2419	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2420	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2421
2422	wr	%g0, FPRS_FEF, %fprs
2423	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2424	rd	%gsr, %l6
2425	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2426	wr	%g0, 0, %fprs
2427
2428	mov	%o0, %l3
2429	mov	T_INTERRUPT, %o1
2430
2431	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2432	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2433
2434	mov	PCB_REG, %l0
2435	mov	PCPU_REG, %l1
2436	wrpr	%g0, PSTATE_NORMAL, %pstate
2437
2438	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2439	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2440	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2441	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2442	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2443	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2444	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2445
2446	mov	%l0, PCB_REG
2447	mov	%l1, PCPU_REG
2448	wrpr	%g0, PSTATE_KERNEL, %pstate
2449
2450	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2451	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2452	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2453	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2454	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2455	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2456	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2457	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2458
2459	SET(intr_handlers, %l1, %l0)
2460	sllx	%l3, IH_SHIFT, %l1
2461	ldx	[%l0 + %l1], %l1
2462	KASSERT(%l1, "tl0_intr: ih null")
2463	call	%l1
2464	 add	%sp, CCFSZ + SPOFF, %o0
2465
2466	/* %l3 contains PIL */
2467	SET(intrcnt, %l1, %l2)
2468	prefetcha [%l2] ASI_N, 1
2469	SET(pil_countp, %l1, %l0)
2470	sllx	%l3, 1, %l1
2471	lduh	[%l0 + %l1], %l0
2472	sllx	%l0, 3, %l0
2473	add	%l0, %l2, %l0
2474	ldx	[%l0], %l1
2475	inc	%l1
2476	stx	%l1, [%l0]
2477
2478	lduw	[PCPU(CNT) + V_INTR], %l0
2479	inc	%l0
2480	stw	%l0, [PCPU(CNT) + V_INTR]
2481
2482	ba,a	%xcc, tl0_ret
2483	 nop
2484END(tl0_intr)
2485
2486/*
2487 * Initiate return to usermode.
2488 *
2489 * Called with a trapframe on the stack.  The window that was setup in
2490 * tl0_trap may have been used by "fast" trap handlers that pretend to be
2491 * leaf functions, so all ins and locals may have been clobbered since
2492 * then.
2493 *
2494 * This code is rather long and complicated.
2495 */
2496ENTRY(tl0_ret)
2497	/*
2498	 * Check for pending asts atomically with returning.  We must raise
2499	 * the PIL before checking, and if no asts are found the PIL must
2500	 * remain raised until the retry is executed, or we risk missing asts
2501	 * caused by interrupts occurring after the test.  If the PIL is
2502	 * lowered, as it is when we call ast, the check must be re-executed.
2503	 */
2504	wrpr	%g0, PIL_TICK, %pil
2505	ldx	[PCPU(CURTHREAD)], %l0
2506	lduw	[%l0 + TD_FLAGS], %l1
2507	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2508	and	%l1, %l2, %l1
2509	brz,a,pt %l1, 1f
2510	 nop
2511
2512	/*
2513	 * We have an AST.  Re-enable interrupts and handle it, then restart
2514	 * the return sequence.
2515	 */
2516	wrpr	%g0, 0, %pil
2517	call	ast
2518	 add	%sp, CCFSZ + SPOFF, %o0
2519	ba,a	%xcc, tl0_ret
2520	 nop
2521
2522	/*
2523	 * Check for windows that were spilled to the pcb and need to be
2524	 * copied out.  This must be the last thing that is done before the
2525	 * return to usermode.  If there are still user windows in the cpu
2526	 * and we call a nested function after this, which causes them to be
2527	 * spilled to the pcb, they will not be copied out and the stack will
2528	 * be inconsistent.
2529	 */
25301:	ldx	[PCB_REG + PCB_NSAVED], %l1
2531	brz,a,pt %l1, 2f
2532	 nop
2533	wrpr	%g0, 0, %pil
2534	mov	T_SPILL, %o0
2535	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2536	call	trap
2537	 add	%sp, SPOFF + CCFSZ, %o0
2538	ba,a	%xcc, tl0_ret
2539	 nop
2540
2541	/*
2542	 * Restore the out and most global registers from the trapframe.
2543	 * The ins will become the outs when we restore below.
2544	 */
25452:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2546	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2547	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2548	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2549	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2550	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2551	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2552	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2553
2554	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2555	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2556	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2557	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2558	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2559
2560	/*
2561	 * Load everything we need to restore below before disabling
2562	 * interrupts.
2563	 */
2564	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2565	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
2566	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2567	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2568	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2569	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2570	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
2571
2572	/*
2573	 * Disable interrupts to restore the special globals.  They are not
2574	 * saved and restored for all kernel traps, so an interrupt at the
2575	 * wrong time would clobber them.
2576	 */
2577	wrpr	%g0, PSTATE_NORMAL, %pstate
2578
2579	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2580	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2581
2582	/*
2583	 * Switch to alternate globals.  This frees up some registers we
2584	 * can use after the restore changes our window.
2585	 */
2586	wrpr	%g0, PSTATE_ALT, %pstate
2587
2588	/*
2589	 * Drop %pil to zero.  It must have been zero at the time of the
2590	 * trap, since we were in usermode, but it was raised above in
2591	 * order to check for asts atomically.  We have interrupts disabled
2592	 * so any interrupts will not be serviced until we complete the
2593	 * return to usermode.
2594	 */
2595	wrpr	%g0, 0, %pil
2596
2597	/*
2598	 * Save %fprs in an alternate global so it can be restored after the
2599	 * restore instruction below.  If we restore it before the restore,
2600	 * and the restore traps we may run for a while with floating point
2601	 * enabled in the kernel, which we want to avoid.
2602	 */
2603	mov	%l0, %g1
2604
2605	/*
2606	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2607	 * so we set it temporarily and then clear it.
2608	 */
2609	wr	%g0, FPRS_FEF, %fprs
2610	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2611	wr	%l1, 0, %gsr
2612	wr	%g0, 0, %fprs
2613
2614	/*
2615	 * Restore program counters.  This could be done after the restore
2616	 * but we're out of alternate globals to store them in...
2617	 */
2618	wrpr	%l2, 0, %tnpc
2619	wrpr	%l3, 0, %tpc
2620
2621	/*
2622	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2623	 * will be affected by the restore below and we need to make sure it
2624	 * points to the current window at that time, not the window that was
2625	 * active at the time of the trap.
2626	 */
2627	andn	%l4, TSTATE_CWP_MASK, %g2
2628
2629	/*
2630	 * Restore %y.  Could also be below if we had more alternate globals.
2631	 */
2632	wr	%l5, 0, %y
2633
2634	/*
2635	 * Setup %wstate for return.  We need to restore the user window state
2636	 * which we saved in wstate.other when we trapped.  We also need to
2637	 * set the transition bit so the restore will be handled specially
2638	 * if it traps, use the xor feature of wrpr to do that.
2639	 */
2640	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
2641	wrpr	%g3, WSTATE_TRANSITION, %wstate
2642
2643	/*
2644	 * Setup window management registers for return.  If not all user
2645	 * windows were spilled in the kernel %otherwin will be non-zero,
2646	 * so we need to transfer it to %canrestore to correctly restore
2647	 * those windows.  Otherwise everything gets set to zero and the
2648	 * restore below will fill a window directly from the user stack.
2649	 */
2650	rdpr	%otherwin, %o0
2651	wrpr	%o0, 0, %canrestore
2652	wrpr	%g0, 0, %otherwin
2653	wrpr	%o0, 0, %cleanwin
2654
2655	/*
2656	 * Now do the restore.  If this instruction causes a fill trap which
2657	 * fails to fill a window from the user stack, we will resume at
2658	 * tl0_ret_fill_end and call back into the kernel.
2659	 */
2660	restore
2661tl0_ret_fill:
2662
2663	/*
2664	 * We made it.  We're back in the window that was active at the time
2665	 * of the trap, and ready to return to usermode.
2666	 */
2667
2668	/*
2669	 * Restore %frps.  This was saved in an alternate global above.
2670	 */
2671	wr	%g1, 0, %fprs
2672
2673	/*
2674	 * Fixup %tstate so the saved %cwp points to the current window and
2675	 * restore it.
2676	 */
2677	rdpr	%cwp, %g4
2678	wrpr	%g2, %g4, %tstate
2679
2680	/*
2681	 * Restore the user window state.  The transition bit was set above
2682	 * for special handling of the restore, this clears it.
2683	 */
2684	wrpr	%g3, 0, %wstate
2685
2686#if KTR_COMPILE & KTR_TRAP
2687	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2688	    , %g2, %g3, %g4, 7, 8, 9)
2689	ldx	[PCPU(CURTHREAD)], %g3
2690	stx	%g3, [%g2 + KTR_PARM1]
2691	rdpr	%pil, %g3
2692	stx	%g3, [%g2 + KTR_PARM2]
2693	rdpr	%tpc, %g3
2694	stx	%g3, [%g2 + KTR_PARM3]
2695	rdpr	%tnpc, %g3
2696	stx	%g3, [%g2 + KTR_PARM4]
2697	stx	%sp, [%g2 + KTR_PARM5]
26989:
2699#endif
2700
2701	/*
2702	 * Return to usermode.
2703	 */
2704	retry
2705tl0_ret_fill_end:
2706
2707#if KTR_COMPILE & KTR_TRAP
2708	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2709	    , %l0, %l1, %l2, 7, 8, 9)
2710	rdpr	%pstate, %l1
2711	stx	%l1, [%l0 + KTR_PARM1]
2712	stx	%l6, [%l0 + KTR_PARM2]
2713	stx	%sp, [%l0 + KTR_PARM3]
27149:
2715#endif
2716
2717	/*
2718	 * The restore above caused a fill trap and the fill handler was
2719	 * unable to fill a window from the user stack.  The special fill
2720	 * handler recognized this and punted, sending us here.  We need
2721	 * to carefully undo any state that was restored before the restore
2722	 * was executed and call trap again.  Trap will copyin a window
2723	 * from the user stack which will fault in the page we need so the
2724	 * restore above will succeed when we try again.  If this fails
2725	 * the process has trashed its stack, so we kill it.
2726	 */
2727
2728	/*
2729	 * Restore the kernel window state.  This was saved in %l6 above, and
2730	 * since the restore failed we're back in the same window.
2731	 */
2732	wrpr	%l6, 0, %wstate
2733
2734	/*
2735	 * Restore the normal globals which have predefined values in the
2736	 * kernel.  We clobbered them above restoring the user's globals
2737	 * so this is very important.
2738	 * XXX PSTATE_ALT must already be set.
2739	 */
2740	wrpr	%g0, PSTATE_ALT, %pstate
2741	mov	PCB_REG, %o0
2742	mov	PCPU_REG, %o1
2743	wrpr	%g0, PSTATE_NORMAL, %pstate
2744	mov	%o0, PCB_REG
2745	mov	%o1, PCPU_REG
2746	wrpr	%g0, PSTATE_KERNEL, %pstate
2747
2748	/*
2749	 * Simulate a fill trap and then start the whole return sequence over
2750	 * again.  This is special because it only copies in 1 window, not 2
2751	 * as we would for a normal failed fill.  This may be the first time
2752	 * the process has been run, so there may not be 2 windows worth of
2753	 * stack to copyin.
2754	 */
2755	mov	T_FILL_RET, %o0
2756	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2757	call	trap
2758	 add	%sp, SPOFF + CCFSZ, %o0
2759	ba,a	%xcc, tl0_ret
2760	 nop
2761END(tl0_ret)
2762
2763/*
2764 * Kernel trap entry point
2765 *
2766 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2767 *     u_int sfsr)
2768 *
2769 * This is easy because the stack is already setup and the windows don't need
2770 * to be split.  We build a trapframe and call trap(), the same as above, but
2771 * the outs don't need to be saved.
2772 */
2773ENTRY(tl1_trap)
2774	rdpr	%tstate, %l0
2775	rdpr	%tpc, %l1
2776	rdpr	%tnpc, %l2
2777	rdpr	%pil, %l3
2778	rd	%y, %l4
2779	rdpr	%wstate, %l5
2780
2781#if KTR_COMPILE & KTR_TRAP
2782	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2783	    , %g1, %g2, %g3, 7, 8, 9)
2784	ldx	[PCPU(CURTHREAD)], %g2
2785	stx	%g2, [%g1 + KTR_PARM1]
2786	stx	%o0, [%g1 + KTR_PARM2]
2787	stx	%l3, [%g1 + KTR_PARM3]
2788	stx	%l1, [%g1 + KTR_PARM4]
2789	stx	%i6, [%g1 + KTR_PARM5]
27909:
2791#endif
2792
2793	wrpr	%g0, 1, %tl
2794
2795	and	%l5, WSTATE_OTHER_MASK, %l5
2796	wrpr	%l5, WSTATE_KERNEL, %wstate
2797
2798	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2799	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2800	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2801	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2802	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2803
2804	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2805	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2806	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2807	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2808	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2809
2810	mov	PCB_REG, %l0
2811	mov	PCPU_REG, %l1
2812	wrpr	%g0, PSTATE_NORMAL, %pstate
2813
2814	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2815	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2816
2817	mov	%l0, PCB_REG
2818	mov	%l1, PCPU_REG
2819	wrpr	%g0, PSTATE_KERNEL, %pstate
2820
2821	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2822	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2823	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2824	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2825	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2826	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2827	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2828	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2829
2830	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2831	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2832	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2833	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2834	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2835
2836	set	tl1_ret - 8, %o7
2837	jmpl	%o2, %g0
2838	 add	%sp, CCFSZ + SPOFF, %o0
2839END(tl1_trap)
2840
2841ENTRY(tl1_ret)
2842	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2843	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2844	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2845	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2846	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2847	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2848	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2849	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2850
2851	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2852	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2853	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2854	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2855	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2856
2857	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2858	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
2859	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2860	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2861	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2862
2863	set	VM_MIN_PROM_ADDRESS, %l5
2864	cmp	%l1, %l5
2865	bl,a,pt	%xcc, 1f
2866	 nop
2867	set	VM_MAX_PROM_ADDRESS, %l5
2868	cmp	%l1, %l5
2869	bg,a,pt	%xcc, 1f
2870	 nop
2871
2872	wrpr	%g0, PSTATE_NORMAL, %pstate
2873
2874	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2875	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2876
28771:	wrpr	%g0, PSTATE_ALT, %pstate
2878
2879	andn	%l0, TSTATE_CWP_MASK, %g1
2880	mov	%l1, %g2
2881	mov	%l2, %g3
2882
2883	wrpr	%l3, 0, %pil
2884	wr	%l4, 0, %y
2885
2886	restore
2887
2888	wrpr	%g0, 2, %tl
2889
2890	rdpr	%cwp, %g4
2891	wrpr	%g1, %g4, %tstate
2892	wrpr	%g2, 0, %tpc
2893	wrpr	%g3, 0, %tnpc
2894
2895#if KTR_COMPILE & KTR_TRAP
2896	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2897	    , %g2, %g3, %g4, 7, 8, 9)
2898	ldx	[PCPU(CURTHREAD)], %g3
2899	stx	%g3, [%g2 + KTR_PARM1]
2900	rdpr	%pil, %g3
2901	stx	%g3, [%g2 + KTR_PARM2]
2902	rdpr	%tstate, %g3
2903	stx	%g3, [%g2 + KTR_PARM3]
2904	rdpr	%tpc, %g3
2905	stx	%g3, [%g2 + KTR_PARM4]
2906	stx	%sp, [%g2 + KTR_PARM5]
29079:
2908#endif
2909
2910	retry
2911END(tl1_ret)
2912
2913/*
2914 * void tl1_intr(u_int level, u_int mask)
2915 */
2916ENTRY(tl1_intr)
2917	rdpr	%tstate, %l0
2918	rdpr	%tpc, %l1
2919	rdpr	%tnpc, %l2
2920	rdpr	%pil, %l3
2921	rd	%y, %l4
2922	rdpr	%wstate, %l5
2923
2924#if KTR_COMPILE & KTR_INTR
2925	CATR(KTR_INTR,
2926	    "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
2927	    , %g1, %g2, %g3, 7, 8, 9)
2928	ldx	[PCPU(CURTHREAD)], %g2
2929	stx	%g2, [%g1 + KTR_PARM1]
2930	stx	%o0, [%g1 + KTR_PARM2]
2931	stx	%l3, [%g1 + KTR_PARM3]
2932	stx	%l1, [%g1 + KTR_PARM4]
2933	stx	%i6, [%g1 + KTR_PARM5]
29349:
2935#endif
2936
2937	wrpr	%o0, 0, %pil
2938	wr	%o1, 0, %clear_softint
2939
2940	wrpr	%g0, 1, %tl
2941
2942	and	%l5, WSTATE_OTHER_MASK, %l5
2943	wrpr	%l5, WSTATE_KERNEL, %wstate
2944
2945	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2946	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2947	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2948	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2949	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2950
2951	mov	%o0, %l7
2952	mov	T_INTERRUPT | T_KERNEL, %o1
2953
2954	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2955	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2956
2957	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2958	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2959
2960	mov	PCB_REG, %l4
2961	mov	PCPU_REG, %l5
2962	wrpr	%g0, PSTATE_NORMAL, %pstate
2963
2964	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2965	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2966	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2967	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2968	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2969
2970	mov	%l4, PCB_REG
2971	mov	%l5, PCPU_REG
2972	wrpr	%g0, PSTATE_KERNEL, %pstate
2973
2974	SET(intr_handlers, %l5, %l4)
2975	sllx	%l7, IH_SHIFT, %l5
2976	ldx	[%l4 + %l5], %l5
2977	KASSERT(%l5, "tl1_intr: ih null")
2978	call	%l5
2979	 add	%sp, CCFSZ + SPOFF, %o0
2980
2981	/* %l7 contains PIL */
2982	SET(intrcnt, %l5, %l4)
2983	prefetcha [%l4] ASI_N, 1
2984	SET(pil_countp, %l5, %l6)
2985	sllx	%l7, 1, %l5
2986	lduh	[%l5 + %l6], %l5
2987	sllx	%l5, 3, %l5
2988	add	%l5, %l4, %l4
2989	ldx	[%l4], %l5
2990	inc	%l5
2991	stx	%l5, [%l4]
2992
2993	lduw	[PCPU(CNT) + V_INTR], %l4
2994	inc	%l4
2995	stw	%l4, [PCPU(CNT) + V_INTR]
2996
2997	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2998
2999	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
3000	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
3001	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
3002	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
3003	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
3004
3005	wrpr	%g0, PSTATE_ALT, %pstate
3006
3007	andn	%l0, TSTATE_CWP_MASK, %g1
3008	mov	%l1, %g2
3009	mov	%l2, %g3
3010	wrpr	%l3, 0, %pil
3011	wr	%l4, 0, %y
3012
3013	restore
3014
3015	wrpr	%g0, 2, %tl
3016
3017	rdpr	%cwp, %g4
3018	wrpr	%g1, %g4, %tstate
3019	wrpr	%g2, 0, %tpc
3020	wrpr	%g3, 0, %tnpc
3021
3022#if KTR_COMPILE & KTR_INTR
3023	CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
3024	    , %g2, %g3, %g4, 7, 8, 9)
3025	ldx	[PCPU(CURTHREAD)], %g3
3026	stx	%g3, [%g2 + KTR_PARM1]
3027	rdpr	%pil, %g3
3028	stx	%g3, [%g2 + KTR_PARM2]
3029	rdpr	%tstate, %g3
3030	stx	%g3, [%g2 + KTR_PARM3]
3031	rdpr	%tpc, %g3
3032	stx	%g3, [%g2 + KTR_PARM4]
3033	stx	%sp, [%g2 + KTR_PARM5]
30349:
3035#endif
3036
3037	retry
3038END(tl1_intr)
3039
3040	.globl	tl_text_end
3041tl_text_end:
3042	nop
3043
3044/*
3045 * Freshly forked processes come here when switched to for the first time.
3046 * The arguments to fork_exit() have been setup in the locals, we must move
3047 * them to the outs.
3048 */
3049ENTRY(fork_trampoline)
3050#if KTR_COMPILE & KTR_PROC
3051	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
3052	    , %g1, %g2, %g3, 7, 8, 9)
3053	ldx	[PCPU(CURTHREAD)], %g2
3054	stx	%g2, [%g1 + KTR_PARM1]
3055	ldx	[%g2 + TD_PROC], %g2
3056	add	%g2, P_COMM, %g2
3057	stx	%g2, [%g1 + KTR_PARM2]
3058	rdpr	%cwp, %g2
3059	stx	%g2, [%g1 + KTR_PARM3]
30609:
3061#endif
3062	mov	%l0, %o0
3063	mov	%l1, %o1
3064	call	fork_exit
3065	 mov	%l2, %o2
3066	ba,a	%xcc, tl0_ret
3067	 nop
3068END(fork_trampoline)
3069