1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)	\
49	mtspr	exc_level##_SPRG,r8;			\
50	BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);		\
51	lwz	r0,GPR10-INT_FRAME_SIZE(r8);		\
52	stw	r0,GPR10(r11);				\
53	lwz	r0,GPR11-INT_FRAME_SIZE(r8);		\
54	stw	r0,GPR11(r11);				\
55	mfspr	r8,exc_level##_SPRG
56
57	.globl	mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59	TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
60	b	transfer_to_handler_full
61
62	.globl	debug_transfer_to_handler
63debug_transfer_to_handler:
64	TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65	b	transfer_to_handler_full
66
67	.globl	crit_transfer_to_handler
68crit_transfer_to_handler:
69	TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70	/* fall through */
71#endif
72
73#ifdef CONFIG_40x
74	.globl	crit_transfer_to_handler
75crit_transfer_to_handler:
76	lwz	r0,crit_r10@l(0)
77	stw	r0,GPR10(r11)
78	lwz	r0,crit_r11@l(0)
79	stw	r0,GPR11(r11)
80	/* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90	.globl	transfer_to_handler_full
91transfer_to_handler_full:
92	SAVE_NVGPRS(r11)
93	/* fall through */
94
95	.globl	transfer_to_handler
96transfer_to_handler:
97	stw	r2,GPR2(r11)
98	stw	r12,_NIP(r11)
99	stw	r9,_MSR(r11)
100	andi.	r2,r9,MSR_PR
101	mfctr	r12
102	mfspr	r2,SPRN_XER
103	stw	r12,_CTR(r11)
104	stw	r2,_XER(r11)
105	mfspr	r12,SPRN_SPRG3
106	addi	r2,r12,-THREAD
107	tovirt(r2,r2)			/* set r2 to current */
108	beq	2f			/* if from user, fix up THREAD.regs */
109	addi	r11,r1,STACK_FRAME_OVERHEAD
110	stw	r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112	/* Check to see if the dbcr0 register is set up to debug.  Use the
113	   single-step bit to do this. */
114	lwz	r12,THREAD_DBCR0(r12)
115	andis.	r12,r12,DBCR0_IC@h
116	beq+	3f
117	/* From user and task is ptraced - load up global dbcr0 */
118	li	r12,-1			/* clear all pending debug events */
119	mtspr	SPRN_DBSR,r12
120	lis	r11,global_dbcr0@ha
121	tophys(r11,r11)
122	addi	r11,r11,global_dbcr0@l
123	lwz	r12,0(r11)
124	mtspr	SPRN_DBCR0,r12
125	lwz	r12,4(r11)
126	addi	r12,r12,-1
127	stw	r12,4(r11)
128#endif
129	b	3f
130
1312:	/* if from kernel, check interrupted DOZE/NAP mode and
132         * check for stack overflow
133         */
134	lwz	r9,THREAD_INFO-THREAD(r12)
135	cmplw	r1,r9			/* if r1 <= current->thread_info */
136	ble-	stack_ovf		/* then the kernel stack overflowed */
1375:
138#ifdef CONFIG_6xx
139	tophys(r9,r9)			/* check local flags */
140	lwz	r12,TI_LOCAL_FLAGS(r9)
141	mtcrf	0x01,r12
142	bt-	31-TLF_NAPPING,4f
143#endif /* CONFIG_6xx */
144	.globl transfer_to_handler_cont
145transfer_to_handler_cont:
1463:
147	mflr	r9
148	lwz	r11,0(r9)		/* virtual address of handler */
149	lwz	r9,4(r9)		/* where to go when done */
150	mtspr	SPRN_SRR0,r11
151	mtspr	SPRN_SRR1,r10
152	mtlr	r9
153	SYNC
154	RFI				/* jump to handler, enable MMU */
155
156#ifdef CONFIG_6xx
1574:	rlwinm	r12,r12,0,~_TLF_NAPPING
158	stw	r12,TI_LOCAL_FLAGS(r9)
159	b	power_save_6xx_restore
160#endif
161
162/*
163 * On kernel stack overflow, load up an initial stack pointer
164 * and call StackOverflow(regs), which should not return.
165 */
166stack_ovf:
167	/* sometimes we use a statically-allocated stack, which is OK. */
168	lis	r12,_end@h
169	ori	r12,r12,_end@l
170	cmplw	r1,r12
171	ble	5b			/* r1 <= &_end is OK */
172	SAVE_NVGPRS(r11)
173	addi	r3,r1,STACK_FRAME_OVERHEAD
174	lis	r1,init_thread_union@ha
175	addi	r1,r1,init_thread_union@l
176	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
177	lis	r9,StackOverflow@ha
178	addi	r9,r9,StackOverflow@l
179	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180	FIX_SRR1(r10,r12)
181	mtspr	SPRN_SRR0,r9
182	mtspr	SPRN_SRR1,r10
183	SYNC
184	RFI
185
186/*
187 * Handle a system call.
188 */
189	.stabs	"arch/ppc/kernel/",N_SO,0,0,0f
190	.stabs	"entry.S",N_SO,0,0,0f
1910:
192
193_GLOBAL(DoSyscall)
194	stw	r3,ORIG_GPR3(r1)
195	li	r12,0
196	stw	r12,RESULT(r1)
197	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
198	rlwinm	r11,r11,0,4,2
199	stw	r11,_CCR(r1)
200#ifdef SHOW_SYSCALLS
201	bl	do_show_syscall
202#endif /* SHOW_SYSCALLS */
203	rlwinm	r10,r1,0,0,18	/* current_thread_info() */
204	lwz	r11,TI_FLAGS(r10)
205	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
206	bne-	syscall_dotrace
207syscall_dotrace_cont:
208	cmplwi	0,r0,NR_syscalls
209	lis	r10,sys_call_table@h
210	ori	r10,r10,sys_call_table@l
211	slwi	r0,r0,2
212	bge-	66f
213	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
214	mtlr	r10
215	addi	r9,r1,STACK_FRAME_OVERHEAD
216	PPC440EP_ERR42
217	blrl			/* Call handler */
218	.globl	ret_from_syscall
219ret_from_syscall:
220#ifdef SHOW_SYSCALLS
221	bl	do_show_syscall_exit
222#endif
223	mr	r6,r3
224	rlwinm	r12,r1,0,0,18	/* current_thread_info() */
225	/* disable interrupts so current_thread_info()->flags can't change */
226	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
227	SYNC
228	MTMSRD(r10)
229	lwz	r9,TI_FLAGS(r12)
230	li	r8,-_LAST_ERRNO
231	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
232	bne-	syscall_exit_work
233	cmplw	0,r3,r8
234	blt+	syscall_exit_cont
235	lwz	r11,_CCR(r1)			/* Load CR */
236	neg	r3,r3
237	oris	r11,r11,0x1000	/* Set SO bit in CR */
238	stw	r11,_CCR(r1)
239syscall_exit_cont:
240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241	/* If the process has its own DBCR0 value, load it up.  The single
242	   step bit tells us that dbcr0 should be loaded. */
243	lwz	r0,THREAD+THREAD_DBCR0(r2)
244	andis.	r10,r0,DBCR0_IC@h
245	bnel-	load_dbcr0
246#endif
247	stwcx.	r0,0,r1			/* to clear the reservation */
248	lwz	r4,_LINK(r1)
249	lwz	r5,_CCR(r1)
250	mtlr	r4
251	mtcr	r5
252	lwz	r7,_NIP(r1)
253	lwz	r8,_MSR(r1)
254	FIX_SRR1(r8, r0)
255	lwz	r2,GPR2(r1)
256	lwz	r1,GPR1(r1)
257	mtspr	SPRN_SRR0,r7
258	mtspr	SPRN_SRR1,r8
259	SYNC
260	RFI
261
26266:	li	r3,-ENOSYS
263	b	ret_from_syscall
264
265	.globl	ret_from_fork
266ret_from_fork:
267	REST_NVGPRS(r1)
268	bl	schedule_tail
269	li	r3,0
270	b	ret_from_syscall
271
272/* Traced system call support */
273syscall_dotrace:
274	SAVE_NVGPRS(r1)
275	li	r0,0xc00
276	stw	r0,TRAP(r1)
277	addi	r3,r1,STACK_FRAME_OVERHEAD
278	bl	do_syscall_trace_enter
279	lwz	r0,GPR0(r1)	/* Restore original registers */
280	lwz	r3,GPR3(r1)
281	lwz	r4,GPR4(r1)
282	lwz	r5,GPR5(r1)
283	lwz	r6,GPR6(r1)
284	lwz	r7,GPR7(r1)
285	lwz	r8,GPR8(r1)
286	REST_NVGPRS(r1)
287	b	syscall_dotrace_cont
288
289syscall_exit_work:
290	andi.	r0,r9,_TIF_RESTOREALL
291	beq+	0f
292	REST_NVGPRS(r1)
293	b	2f
2940:	cmplw	0,r3,r8
295	blt+	1f
296	andi.	r0,r9,_TIF_NOERROR
297	bne-	1f
298	lwz	r11,_CCR(r1)			/* Load CR */
299	neg	r3,r3
300	oris	r11,r11,0x1000	/* Set SO bit in CR */
301	stw	r11,_CCR(r1)
302
3031:	stw	r6,RESULT(r1)	/* Save result */
304	stw	r3,GPR3(r1)	/* Update return value */
3052:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
306	beq	4f
307
308	/* Clear per-syscall TIF flags if any are set.  */
309
310	li	r11,_TIF_PERSYSCALL_MASK
311	addi	r12,r12,TI_FLAGS
3123:	lwarx	r8,0,r12
313	andc	r8,r8,r11
314#ifdef CONFIG_IBM405_ERR77
315	dcbt	0,r12
316#endif
317	stwcx.	r8,0,r12
318	bne-	3b
319	subi	r12,r12,TI_FLAGS
320
3214:	/* Anything which requires enabling interrupts? */
322	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
323	beq	ret_from_except
324
325	/* Re-enable interrupts */
326	ori	r10,r10,MSR_EE
327	SYNC
328	MTMSRD(r10)
329
330	/* Save NVGPRS if they're not saved already */
331	lwz	r4,TRAP(r1)
332	andi.	r4,r4,1
333	beq	5f
334	SAVE_NVGPRS(r1)
335	li	r4,0xc00
336	stw	r4,TRAP(r1)
3375:
338	addi	r3,r1,STACK_FRAME_OVERHEAD
339	bl	do_syscall_trace_leave
340	b	ret_from_except_full
341
342#ifdef SHOW_SYSCALLS
343do_show_syscall:
344#ifdef SHOW_SYSCALLS_TASK
345	lis	r11,show_syscalls_task@ha
346	lwz	r11,show_syscalls_task@l(r11)
347	cmp	0,r2,r11
348	bnelr
349#endif
350	stw	r31,GPR31(r1)
351	mflr	r31
352	lis	r3,7f@ha
353	addi	r3,r3,7f@l
354	lwz	r4,GPR0(r1)
355	lwz	r5,GPR3(r1)
356	lwz	r6,GPR4(r1)
357	lwz	r7,GPR5(r1)
358	lwz	r8,GPR6(r1)
359	lwz	r9,GPR7(r1)
360	bl	printk
361	lis	r3,77f@ha
362	addi	r3,r3,77f@l
363	lwz	r4,GPR8(r1)
364	mr	r5,r2
365	bl	printk
366	lwz	r0,GPR0(r1)
367	lwz	r3,GPR3(r1)
368	lwz	r4,GPR4(r1)
369	lwz	r5,GPR5(r1)
370	lwz	r6,GPR6(r1)
371	lwz	r7,GPR7(r1)
372	lwz	r8,GPR8(r1)
373	mtlr	r31
374	lwz	r31,GPR31(r1)
375	blr
376
377do_show_syscall_exit:
378#ifdef SHOW_SYSCALLS_TASK
379	lis	r11,show_syscalls_task@ha
380	lwz	r11,show_syscalls_task@l(r11)
381	cmp	0,r2,r11
382	bnelr
383#endif
384	stw	r31,GPR31(r1)
385	mflr	r31
386	stw	r3,RESULT(r1)	/* Save result */
387	mr	r4,r3
388	lis	r3,79f@ha
389	addi	r3,r3,79f@l
390	bl	printk
391	lwz	r3,RESULT(r1)
392	mtlr	r31
393	lwz	r31,GPR31(r1)
394	blr
395
3967:	.string	"syscall %d(%x, %x, %x, %x, %x, "
39777:	.string	"%x), current=%p\n"
39879:	.string	" -> %x\n"
399	.align	2,0
400
401#ifdef SHOW_SYSCALLS_TASK
402	.data
403	.globl	show_syscalls_task
404show_syscalls_task:
405	.long	-1
406	.text
407#endif
408#endif /* SHOW_SYSCALLS */
409
410/*
411 * The fork/clone functions need to copy the full register set into
412 * the child process. Therefore we need to save all the nonvolatile
413 * registers (r13 - r31) before calling the C code.
414 */
415	.globl	ppc_fork
416ppc_fork:
417	SAVE_NVGPRS(r1)
418	lwz	r0,TRAP(r1)
419	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
420	stw	r0,TRAP(r1)		/* register set saved */
421	b	sys_fork
422
423	.globl	ppc_vfork
424ppc_vfork:
425	SAVE_NVGPRS(r1)
426	lwz	r0,TRAP(r1)
427	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
428	stw	r0,TRAP(r1)		/* register set saved */
429	b	sys_vfork
430
431	.globl	ppc_clone
432ppc_clone:
433	SAVE_NVGPRS(r1)
434	lwz	r0,TRAP(r1)
435	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
436	stw	r0,TRAP(r1)		/* register set saved */
437	b	sys_clone
438
439	.globl	ppc_swapcontext
440ppc_swapcontext:
441	SAVE_NVGPRS(r1)
442	lwz	r0,TRAP(r1)
443	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
444	stw	r0,TRAP(r1)		/* register set saved */
445	b	sys_swapcontext
446
447/*
448 * Top-level page fault handling.
449 * This is in assembler because if do_page_fault tells us that
450 * it is a bad kernel page fault, we want to save the non-volatile
451 * registers before calling bad_page_fault.
452 */
453	.globl	handle_page_fault
454handle_page_fault:
455	stw	r4,_DAR(r1)
456	addi	r3,r1,STACK_FRAME_OVERHEAD
457	bl	do_page_fault
458	cmpwi	r3,0
459	beq+	ret_from_except
460	SAVE_NVGPRS(r1)
461	lwz	r0,TRAP(r1)
462	clrrwi	r0,r0,1
463	stw	r0,TRAP(r1)
464	mr	r5,r3
465	addi	r3,r1,STACK_FRAME_OVERHEAD
466	lwz	r4,_DAR(r1)
467	bl	bad_page_fault
468	b	ret_from_except_full
469
470/*
471 * This routine switches between two different tasks.  The process
472 * state of one is saved on its kernel stack.  Then the state
473 * of the other is restored from its kernel stack.  The memory
474 * management hardware is updated to the second process's state.
475 * Finally, we can return to the second process.
476 * On entry, r3 points to the THREAD for the current task, r4
477 * points to the THREAD for the new task.
478 *
479 * This routine is always called with interrupts disabled.
480 *
481 * Note: there are two ways to get to the "going out" portion
482 * of this code; either by coming in via the entry (_switch)
483 * or via "fork" which must set up an environment equivalent
484 * to the "_switch" path.  If you change this , you'll have to
485 * change the fork code also.
486 *
487 * The code which creates the new task context is in 'copy_thread'
488 * in arch/ppc/kernel/process.c
489 */
490_GLOBAL(_switch)
491	stwu	r1,-INT_FRAME_SIZE(r1)
492	mflr	r0
493	stw	r0,INT_FRAME_SIZE+4(r1)
494	/* r3-r12 are caller saved -- Cort */
495	SAVE_NVGPRS(r1)
496	stw	r0,_NIP(r1)	/* Return to switch caller */
497	mfmsr	r11
498	li	r0,MSR_FP	/* Disable floating-point */
499#ifdef CONFIG_ALTIVEC
500BEGIN_FTR_SECTION
501	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
502	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
503	stw	r12,THREAD+THREAD_VRSAVE(r2)
504END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
505#endif /* CONFIG_ALTIVEC */
506#ifdef CONFIG_SPE
507	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
508	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
509	stw	r12,THREAD+THREAD_SPEFSCR(r2)
510#endif /* CONFIG_SPE */
511	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
512	beq+	1f
513	andc	r11,r11,r0
514	MTMSRD(r11)
515	isync
5161:	stw	r11,_MSR(r1)
517	mfcr	r10
518	stw	r10,_CCR(r1)
519	stw	r1,KSP(r3)	/* Set old stack pointer */
520
521#ifdef CONFIG_SMP
522	/* We need a sync somewhere here to make sure that if the
523	 * previous task gets rescheduled on another CPU, it sees all
524	 * stores it has performed on this one.
525	 */
526	sync
527#endif /* CONFIG_SMP */
528
529	tophys(r0,r4)
530	CLR_TOP32(r0)
531	mtspr	SPRN_SPRG3,r0	/* Update current THREAD phys addr */
532	lwz	r1,KSP(r4)	/* Load new stack pointer */
533
534	/* save the old current 'last' for return value */
535	mr	r3,r2
536	addi	r2,r4,-THREAD	/* Update current */
537
538#ifdef CONFIG_ALTIVEC
539BEGIN_FTR_SECTION
540	lwz	r0,THREAD+THREAD_VRSAVE(r2)
541	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
542END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
543#endif /* CONFIG_ALTIVEC */
544#ifdef CONFIG_SPE
545	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
546	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
547#endif /* CONFIG_SPE */
548
549	lwz	r0,_CCR(r1)
550	mtcrf	0xFF,r0
551	/* r3-r12 are destroyed -- Cort */
552	REST_NVGPRS(r1)
553
554	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
555	mtlr	r4
556	addi	r1,r1,INT_FRAME_SIZE
557	blr
558
559	.globl	fast_exception_return
560fast_exception_return:
561#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
562	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
563	beq	1f			/* if not, we've got problems */
564#endif
565
5662:	REST_4GPRS(3, r11)
567	lwz	r10,_CCR(r11)
568	REST_GPR(1, r11)
569	mtcr	r10
570	lwz	r10,_LINK(r11)
571	mtlr	r10
572	REST_GPR(10, r11)
573	mtspr	SPRN_SRR1,r9
574	mtspr	SPRN_SRR0,r12
575	REST_GPR(9, r11)
576	REST_GPR(12, r11)
577	lwz	r11,GPR11(r11)
578	SYNC
579	RFI
580
581#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
582/* check if the exception happened in a restartable section */
5831:	lis	r3,exc_exit_restart_end@ha
584	addi	r3,r3,exc_exit_restart_end@l
585	cmplw	r12,r3
586	bge	3f
587	lis	r4,exc_exit_restart@ha
588	addi	r4,r4,exc_exit_restart@l
589	cmplw	r12,r4
590	blt	3f
591	lis	r3,fee_restarts@ha
592	tophys(r3,r3)
593	lwz	r5,fee_restarts@l(r3)
594	addi	r5,r5,1
595	stw	r5,fee_restarts@l(r3)
596	mr	r12,r4		/* restart at exc_exit_restart */
597	b	2b
598
599	.section .bss
600	.align	2
601fee_restarts:
602	.space	4
603	.previous
604
605/* aargh, a nonrecoverable interrupt, panic */
606/* aargh, we don't know which trap this is */
607/* but the 601 doesn't implement the RI bit, so assume it's OK */
6083:
609BEGIN_FTR_SECTION
610	b	2b
611END_FTR_SECTION_IFSET(CPU_FTR_601)
612	li	r10,-1
613	stw	r10,TRAP(r11)
614	addi	r3,r1,STACK_FRAME_OVERHEAD
615	lis	r10,MSR_KERNEL@h
616	ori	r10,r10,MSR_KERNEL@l
617	bl	transfer_to_handler_full
618	.long	nonrecoverable_exception
619	.long	ret_from_except
620#endif
621
622	.globl	ret_from_except_full
623ret_from_except_full:
624	REST_NVGPRS(r1)
625	/* fall through */
626
627	.globl	ret_from_except
628ret_from_except:
629	/* Hard-disable interrupts so that current_thread_info()->flags
630	 * can't change between when we test it and when we return
631	 * from the interrupt. */
632	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
633	SYNC			/* Some chip revs have problems here... */
634	MTMSRD(r10)		/* disable interrupts */
635
636	lwz	r3,_MSR(r1)	/* Returning to user mode? */
637	andi.	r0,r3,MSR_PR
638	beq	resume_kernel
639
640user_exc_return:		/* r10 contains MSR_KERNEL here */
641	/* Check current_thread_info()->flags */
642	rlwinm	r9,r1,0,0,18
643	lwz	r9,TI_FLAGS(r9)
644	andi.	r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
645	bne	do_work
646
647restore_user:
648#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
649	/* Check whether this process has its own DBCR0 value.  The single
650	   step bit tells us that dbcr0 should be loaded. */
651	lwz	r0,THREAD+THREAD_DBCR0(r2)
652	andis.	r10,r0,DBCR0_IC@h
653	bnel-	load_dbcr0
654#endif
655
656#ifdef CONFIG_PREEMPT
657	b	restore
658
659/* N.B. the only way to get here is from the beq following ret_from_except. */
660resume_kernel:
661	/* check current_thread_info->preempt_count */
662	rlwinm	r9,r1,0,0,18
663	lwz	r0,TI_PREEMPT(r9)
664	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
665	bne	restore
666	lwz	r0,TI_FLAGS(r9)
667	andi.	r0,r0,_TIF_NEED_RESCHED
668	beq+	restore
669	andi.	r0,r3,MSR_EE	/* interrupts off? */
670	beq	restore		/* don't schedule if so */
6711:	bl	preempt_schedule_irq
672	rlwinm	r9,r1,0,0,18
673	lwz	r3,TI_FLAGS(r9)
674	andi.	r0,r3,_TIF_NEED_RESCHED
675	bne-	1b
676#else
677resume_kernel:
678#endif /* CONFIG_PREEMPT */
679
680	/* interrupts are hard-disabled at this point */
681restore:
682	lwz	r0,GPR0(r1)
683	lwz	r2,GPR2(r1)
684	REST_4GPRS(3, r1)
685	REST_2GPRS(7, r1)
686
687	lwz	r10,_XER(r1)
688	lwz	r11,_CTR(r1)
689	mtspr	SPRN_XER,r10
690	mtctr	r11
691
692	PPC405_ERR77(0,r1)
693	stwcx.	r0,0,r1			/* to clear the reservation */
694
695#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
696	lwz	r9,_MSR(r1)
697	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
698	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
699
700	lwz	r10,_CCR(r1)
701	lwz	r11,_LINK(r1)
702	mtcrf	0xFF,r10
703	mtlr	r11
704
705	/*
706	 * Once we put values in SRR0 and SRR1, we are in a state
707	 * where exceptions are not recoverable, since taking an
708	 * exception will trash SRR0 and SRR1.  Therefore we clear the
709	 * MSR:RI bit to indicate this.  If we do take an exception,
710	 * we can't return to the point of the exception but we
711	 * can restart the exception exit path at the label
712	 * exc_exit_restart below.  -- paulus
713	 */
714	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
715	SYNC
716	MTMSRD(r10)		/* clear the RI bit */
717	.globl exc_exit_restart
718exc_exit_restart:
719	lwz	r9,_MSR(r1)
720	lwz	r12,_NIP(r1)
721	FIX_SRR1(r9,r10)
722	mtspr	SPRN_SRR0,r12
723	mtspr	SPRN_SRR1,r9
724	REST_4GPRS(9, r1)
725	lwz	r1,GPR1(r1)
726	.globl exc_exit_restart_end
727exc_exit_restart_end:
728	SYNC
729	RFI
730
731#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
732	/*
733	 * This is a bit different on 4xx/Book-E because it doesn't have
734	 * the RI bit in the MSR.
735	 * The TLB miss handler checks if we have interrupted
736	 * the exception exit path and restarts it if so
737	 * (well maybe one day it will... :).
738	 */
739	lwz	r11,_LINK(r1)
740	mtlr	r11
741	lwz	r10,_CCR(r1)
742	mtcrf	0xff,r10
743	REST_2GPRS(9, r1)
744	.globl exc_exit_restart
745exc_exit_restart:
746	lwz	r11,_NIP(r1)
747	lwz	r12,_MSR(r1)
748exc_exit_start:
749	mtspr	SPRN_SRR0,r11
750	mtspr	SPRN_SRR1,r12
751	REST_2GPRS(11, r1)
752	lwz	r1,GPR1(r1)
753	.globl exc_exit_restart_end
754exc_exit_restart_end:
755	PPC405_ERR77_SYNC
756	rfi
757	b	.			/* prevent prefetch past rfi */
758
759/*
760 * Returning from a critical interrupt in user mode doesn't need
761 * to be any different from a normal exception.  For a critical
762 * interrupt in the kernel, we just return (without checking for
763 * preemption) since the interrupt may have happened at some crucial
764 * place (e.g. inside the TLB miss handler), and because we will be
765 * running with r1 pointing into critical_stack, not the current
766 * process's kernel stack (and therefore current_thread_info() will
767 * give the wrong answer).
768 * We have to restore various SPRs that may have been in use at the
769 * time of the critical interrupt.
770 *
771 */
772#ifdef CONFIG_40x
773#define PPC_40x_TURN_OFF_MSR_DR						    \
774	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
775	 * assume the instructions here are mapped by a pinned TLB entry */ \
776	li	r10,MSR_IR;						    \
777	mtmsr	r10;							    \
778	isync;								    \
779	tophys(r1, r1);
780#else
781#define PPC_40x_TURN_OFF_MSR_DR
782#endif
783
784#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
785	REST_NVGPRS(r1);						\
786	lwz	r3,_MSR(r1);						\
787	andi.	r3,r3,MSR_PR;						\
788	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
789	bne	user_exc_return;					\
790	lwz	r0,GPR0(r1);						\
791	lwz	r2,GPR2(r1);						\
792	REST_4GPRS(3, r1);						\
793	REST_2GPRS(7, r1);						\
794	lwz	r10,_XER(r1);						\
795	lwz	r11,_CTR(r1);						\
796	mtspr	SPRN_XER,r10;						\
797	mtctr	r11;							\
798	PPC405_ERR77(0,r1);						\
799	stwcx.	r0,0,r1;		/* to clear the reservation */	\
800	lwz	r11,_LINK(r1);						\
801	mtlr	r11;							\
802	lwz	r10,_CCR(r1);						\
803	mtcrf	0xff,r10;						\
804	PPC_40x_TURN_OFF_MSR_DR;					\
805	lwz	r9,_DEAR(r1);						\
806	lwz	r10,_ESR(r1);						\
807	mtspr	SPRN_DEAR,r9;						\
808	mtspr	SPRN_ESR,r10;						\
809	lwz	r11,_NIP(r1);						\
810	lwz	r12,_MSR(r1);						\
811	mtspr	exc_lvl_srr0,r11;					\
812	mtspr	exc_lvl_srr1,r12;					\
813	lwz	r9,GPR9(r1);						\
814	lwz	r12,GPR12(r1);						\
815	lwz	r10,GPR10(r1);						\
816	lwz	r11,GPR11(r1);						\
817	lwz	r1,GPR1(r1);						\
818	PPC405_ERR77_SYNC;						\
819	exc_lvl_rfi;							\
820	b	.;		/* prevent prefetch past exc_lvl_rfi */
821
822	.globl	ret_from_crit_exc
823ret_from_crit_exc:
824	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
825
826#ifdef CONFIG_BOOKE
827	.globl	ret_from_debug_exc
828ret_from_debug_exc:
829	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
830
831	.globl	ret_from_mcheck_exc
832ret_from_mcheck_exc:
833	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
834#endif /* CONFIG_BOOKE */
835
836/*
837 * Load the DBCR0 value for a task that is being ptraced,
838 * having first saved away the global DBCR0.  Note that r0
839 * has the dbcr0 value to set upon entry to this.
840 */
841load_dbcr0:
842	mfmsr	r10		/* first disable debug exceptions */
843	rlwinm	r10,r10,0,~MSR_DE
844	mtmsr	r10
845	isync
846	mfspr	r10,SPRN_DBCR0
847	lis	r11,global_dbcr0@ha
848	addi	r11,r11,global_dbcr0@l
849	stw	r10,0(r11)
850	mtspr	SPRN_DBCR0,r0
851	lwz	r10,4(r11)
852	addi	r10,r10,1
853	stw	r10,4(r11)
854	li	r11,-1
855	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
856	blr
857
858	.section .bss
859	.align	4
860global_dbcr0:
861	.space	8
862	.previous
863#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
864
865do_work:			/* r10 contains MSR_KERNEL here */
866	andi.	r0,r9,_TIF_NEED_RESCHED
867	beq	do_user_signal
868
869do_resched:			/* r10 contains MSR_KERNEL here */
870	ori	r10,r10,MSR_EE
871	SYNC
872	MTMSRD(r10)		/* hard-enable interrupts */
873	bl	schedule
874recheck:
875	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
876	SYNC
877	MTMSRD(r10)		/* disable interrupts */
878	rlwinm	r9,r1,0,0,18
879	lwz	r9,TI_FLAGS(r9)
880	andi.	r0,r9,_TIF_NEED_RESCHED
881	bne-	do_resched
882	andi.	r0,r9,_TIF_SIGPENDING
883	beq	restore_user
884do_user_signal:			/* r10 contains MSR_KERNEL here */
885	ori	r10,r10,MSR_EE
886	SYNC
887	MTMSRD(r10)		/* hard-enable interrupts */
888	/* save r13-r31 in the exception frame, if not already done */
889	lwz	r3,TRAP(r1)
890	andi.	r0,r3,1
891	beq	2f
892	SAVE_NVGPRS(r1)
893	rlwinm	r3,r3,0,0,30
894	stw	r3,TRAP(r1)
8952:	li	r3,0
896	addi	r4,r1,STACK_FRAME_OVERHEAD
897	bl	do_signal
898	REST_NVGPRS(r1)
899	b	recheck
900
901/*
902 * We come here when we are at the end of handling an exception
903 * that occurred at a place where taking an exception will lose
904 * state information, such as the contents of SRR0 and SRR1.
905 */
906nonrecoverable:
907	lis	r10,exc_exit_restart_end@ha
908	addi	r10,r10,exc_exit_restart_end@l
909	cmplw	r12,r10
910	bge	3f
911	lis	r11,exc_exit_restart@ha
912	addi	r11,r11,exc_exit_restart@l
913	cmplw	r12,r11
914	blt	3f
915	lis	r10,ee_restarts@ha
916	lwz	r12,ee_restarts@l(r10)
917	addi	r12,r12,1
918	stw	r12,ee_restarts@l(r10)
919	mr	r12,r11		/* restart at exc_exit_restart */
920	blr
9213:	/* OK, we can't recover, kill this process */
922	/* but the 601 doesn't implement the RI bit, so assume it's OK */
923BEGIN_FTR_SECTION
924	blr
925END_FTR_SECTION_IFSET(CPU_FTR_601)
926	lwz	r3,TRAP(r1)
927	andi.	r0,r3,1
928	beq	4f
929	SAVE_NVGPRS(r1)
930	rlwinm	r3,r3,0,0,30
931	stw	r3,TRAP(r1)
9324:	addi	r3,r1,STACK_FRAME_OVERHEAD
933	bl	nonrecoverable_exception
934	/* shouldn't return */
935	b	4b
936
937	.section .bss
938	.align	2
939ee_restarts:
940	.space	4
941	.previous
942