trap.c revision 262042
1184257Slulf/*-
2186743Slulf * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3184257Slulf * Copyright (C) 1995, 1996 TooLs GmbH.
4184257Slulf * All rights reserved.
5184257Slulf *
6184257Slulf * Redistribution and use in source and binary forms, with or without
7184257Slulf * modification, are permitted provided that the following conditions
8184257Slulf * are met:
9184257Slulf * 1. Redistributions of source code must retain the above copyright
10184257Slulf *    notice, this list of conditions and the following disclaimer.
11184257Slulf * 2. Redistributions in binary form must reproduce the above copyright
12184257Slulf *    notice, this list of conditions and the following disclaimer in the
13184257Slulf *    documentation and/or other materials provided with the distribution.
14184257Slulf * 3. All advertising materials mentioning features or use of this software
15184257Slulf *    must display the following acknowledgement:
16184257Slulf *	This product includes software developed by TooLs GmbH.
17184257Slulf * 4. The name of TooLs GmbH may not be used to endorse or promote products
18184257Slulf *    derived from this software without specific prior written permission.
19184257Slulf *
20184257Slulf * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21184257Slulf * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22184257Slulf * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23184257Slulf * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24184257Slulf * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25184257Slulf * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26184257Slulf * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27184257Slulf * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28184257Slulf * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29184257Slulf * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30184257Slulf *
31184257Slulf * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32184257Slulf */
33184257Slulf
34184257Slulf#include <sys/cdefs.h>
35184257Slulf__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/trap.c 262042 2014-02-17 12:57:13Z avg $");
36184257Slulf
37186698Slulf#include "opt_kdtrace.h"
38184257Slulf
39184257Slulf#include <sys/param.h>
40184257Slulf#include <sys/kdb.h>
41184257Slulf#include <sys/proc.h>
42184257Slulf#include <sys/ktr.h>
43184257Slulf#include <sys/lock.h>
44184257Slulf#include <sys/mutex.h>
45184257Slulf#include <sys/pioctl.h>
46184257Slulf#include <sys/ptrace.h>
47184257Slulf#include <sys/reboot.h>
48184257Slulf#include <sys/syscall.h>
49184257Slulf#include <sys/sysent.h>
50184257Slulf#include <sys/systm.h>
51184257Slulf#include <sys/uio.h>
52184257Slulf#include <sys/signalvar.h>
53184257Slulf#include <sys/vmmeter.h>
54184257Slulf
55184257Slulf#include <security/audit/audit.h>
56184257Slulf
57184257Slulf#include <vm/vm.h>
58184257Slulf#include <vm/pmap.h>
59184257Slulf#include <vm/vm_extern.h>
60184257Slulf#include <vm/vm_param.h>
61184257Slulf#include <vm/vm_kern.h>
62184257Slulf#include <vm/vm_map.h>
63184257Slulf#include <vm/vm_page.h>
64184257Slulf
65184257Slulf#include <machine/_inttypes.h>
66184257Slulf#include <machine/altivec.h>
67184257Slulf#include <machine/cpu.h>
68185134Slulf#include <machine/db_machdep.h>
69185134Slulf#include <machine/fpu.h>
70184257Slulf#include <machine/frame.h>
71184257Slulf#include <machine/pcb.h>
72184257Slulf#include <machine/pmap.h>
73185130Slulf#include <machine/psl.h>
74184257Slulf#include <machine/trap.h>
75184257Slulf#include <machine/spr.h>
76184257Slulf#include <machine/sr.h>
77184257Slulf
78184257Slulfstatic void	trap_fatal(struct trapframe *frame);
79185094Slulfstatic void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
80184257Slulf		    int user);
81184257Slulfstatic int	trap_pfault(struct trapframe *frame, int user);
82184257Slulfstatic int	fix_unaligned(struct thread *td, struct trapframe *frame);
83184257Slulfstatic int	ppc_instr_emulate(struct trapframe *frame);
84184257Slulfstatic int	handle_onfault(struct trapframe *frame);
85184257Slulfstatic void	syscall(struct trapframe *frame);
86184257Slulf
87185130Slulf#ifdef __powerpc64__
88184257Slulf       void	handle_kernel_slb_spill(int, register_t, register_t);
89184257Slulfstatic int	handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
90184257Slulfextern int	n_slbs;
91184257Slulf#endif
92184257Slulf
93184257Slulfint	setfault(faultbuf);		/* defined in locore.S */
94184257Slulf
95184257Slulf/* Why are these not defined in a header? */
96184257Slulfint	badaddr(void *, size_t);
97184257Slulfint	badaddr_read(void *, size_t, int *);
98184257Slulf
99184257Slulfstruct powerpc_exception {
100184257Slulf	u_int	vector;
101184257Slulf	char	*name;
102184257Slulf};
103184257Slulf
104184257Slulf#ifdef KDTRACE_HOOKS
105184257Slulf#include <sys/dtrace_bsd.h>
106184257Slulf
107184257Slulf/*
108184257Slulf * This is a hook which is initialised by the dtrace module
109184257Slulf * to handle traps which might occur during DTrace probe
110184257Slulf * execution.
111184257Slulf */
112184257Slulfdtrace_trap_func_t	dtrace_trap_func;
113184257Slulf
114184257Slulfdtrace_doubletrap_func_t	dtrace_doubletrap_func;
115184257Slulf
116184257Slulf/*
117185130Slulf * This is a hook which is initialised by the systrace module
118184257Slulf * when it is loaded. This keeps the DTrace syscall provider
119184257Slulf * implementation opaque.
120184257Slulf */
121184257Slulfsystrace_probe_func_t	systrace_probe_func;
122184257Slulf
123184257Slulf/*
124184257Slulf * These hooks are necessary for the pid and usdt providers.
125184257Slulf */
126184257Slulfdtrace_pid_probe_ptr_t		dtrace_pid_probe_ptr;
127184257Slulfdtrace_return_probe_ptr_t	dtrace_return_probe_ptr;
128184257Slulfint (*dtrace_invop_jump_addr)(struct trapframe *);
129184257Slulf#endif
130184257Slulf
131184257Slulfstatic struct powerpc_exception powerpc_exceptions[] = {
132184257Slulf	{ 0x0100, "system reset" },
133184257Slulf	{ 0x0200, "machine check" },
134184257Slulf	{ 0x0300, "data storage interrupt" },
135184257Slulf	{ 0x0380, "data segment exception" },
136184257Slulf	{ 0x0400, "instruction storage interrupt" },
137184257Slulf	{ 0x0480, "instruction segment exception" },
138184257Slulf	{ 0x0500, "external interrupt" },
139184257Slulf	{ 0x0600, "alignment" },
140184257Slulf	{ 0x0700, "program" },
141184257Slulf	{ 0x0800, "floating-point unavailable" },
142185128Slulf	{ 0x0900, "decrementer" },
143184257Slulf	{ 0x0c00, "system call" },
144184257Slulf	{ 0x0d00, "trace" },
145184257Slulf	{ 0x0e00, "floating-point assist" },
146184257Slulf	{ 0x0f00, "performance monitoring" },
147184257Slulf	{ 0x0f20, "altivec unavailable" },
148184257Slulf	{ 0x1000, "instruction tlb miss" },
149184257Slulf	{ 0x1100, "data load tlb miss" },
150184257Slulf	{ 0x1200, "data store tlb miss" },
151184257Slulf	{ 0x1300, "instruction breakpoint" },
152184257Slulf	{ 0x1400, "system management" },
153184257Slulf	{ 0x1600, "altivec assist" },
154184257Slulf	{ 0x1700, "thermal management" },
155184257Slulf	{ 0x2000, "run mode/trace" },
156184257Slulf	{ 0x3000, NULL }
157185130Slulf};
158184257Slulf
159184257Slulfstatic const char *
160184257Slulftrapname(u_int vector)
161185130Slulf{
162184257Slulf	struct	powerpc_exception *pe;
163184257Slulf
164184257Slulf	for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
165184257Slulf		if (pe->vector == vector)
166184257Slulf			return (pe->name);
167184257Slulf	}
168184257Slulf
169184257Slulf	return ("unknown");
170184257Slulf}
171184257Slulf
172184257Slulfvoid
173184257Slulftrap(struct trapframe *frame)
174184257Slulf{
175184257Slulf	struct thread	*td;
176185130Slulf	struct proc	*p;
177184257Slulf#ifdef KDTRACE_HOOKS
178184257Slulf	uint32_t inst;
179185130Slulf#endif
180184257Slulf	int		sig, type, user;
181184257Slulf	u_int		ucode;
182184257Slulf	ksiginfo_t	ksi;
183184257Slulf
184184257Slulf	PCPU_INC(cnt.v_trap);
185184257Slulf
186184257Slulf	td = curthread;
187184257Slulf	p = td->td_proc;
188184257Slulf
189184257Slulf	type = ucode = frame->exc;
190184257Slulf	sig = 0;
191184257Slulf	user = frame->srr1 & PSL_PR;
192184257Slulf
193184257Slulf	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
194184257Slulf	    trapname(type), user ? "user" : "kernel");
195184257Slulf
196184257Slulf#ifdef KDTRACE_HOOKS
197184257Slulf	/*
198184257Slulf	 * A trap can occur while DTrace executes a probe. Before
199184257Slulf	 * executing the probe, DTrace blocks re-scheduling and sets
200184257Slulf	 * a flag in it's per-cpu flags to indicate that it doesn't
201184257Slulf	 * want to fault. On returning from the probe, the no-fault
202184257Slulf	 * flag is cleared and finally re-scheduling is enabled.
203184257Slulf	 *
204184257Slulf	 * If the DTrace kernel module has registered a trap handler,
205184257Slulf	 * call it and if it returns non-zero, assume that it has
206184257Slulf	 * handled the trap and modified the trap frame so that this
207184257Slulf	 * function can return normally.
208184257Slulf	 */
209184257Slulf	/*
210184257Slulf	 * XXXDTRACE: add pid probe handler here (if ever)
211184257Slulf	 */
212184257Slulf	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
213184257Slulf		return;
214184257Slulf#endif
215184257Slulf
216184257Slulf	if (user) {
217184257Slulf		td->td_pticks = 0;
218184257Slulf		td->td_frame = frame;
219184257Slulf		if (td->td_ucred != p->p_ucred)
220184257Slulf			cred_update_thread(td);
221184257Slulf
222184257Slulf		/* User Mode Traps */
223184257Slulf		switch (type) {
224		case EXC_RUNMODETRC:
225		case EXC_TRC:
226			frame->srr1 &= ~PSL_SE;
227			sig = SIGTRAP;
228			break;
229
230#ifdef __powerpc64__
231		case EXC_ISE:
232		case EXC_DSE:
233			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
234			    (type == EXC_ISE) ? frame->srr0 :
235			    frame->cpu.aim.dar) != 0)
236				sig = SIGSEGV;
237			break;
238#endif
239		case EXC_DSI:
240		case EXC_ISI:
241			sig = trap_pfault(frame, 1);
242			break;
243
244		case EXC_SC:
245			syscall(frame);
246			break;
247
248		case EXC_FPU:
249			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
250			    ("FPU already enabled for thread"));
251			enable_fpu(td);
252			break;
253
254		case EXC_VEC:
255			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
256			    ("Altivec already enabled for thread"));
257			enable_vec(td);
258			break;
259
260		case EXC_VECAST_G4:
261		case EXC_VECAST_G5:
262			/*
263			 * We get a VPU assist exception for IEEE mode
264			 * vector operations on denormalized floats.
265			 * Emulating this is a giant pain, so for now,
266			 * just switch off IEEE mode and treat them as
267			 * zero.
268			 */
269
270			save_vec(td);
271			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
272			enable_vec(td);
273			break;
274
275		case EXC_ALI:
276			if (fix_unaligned(td, frame) != 0)
277				sig = SIGBUS;
278			else
279				frame->srr0 += 4;
280			break;
281
282		case EXC_PGM:
283			/* Identify the trap reason */
284			if (frame->srr1 & EXC_PGM_TRAP) {
285#ifdef KDTRACE_HOOKS
286				inst = fuword32((const void *)frame->srr0);
287				if (inst == 0x0FFFDDDD && dtrace_pid_probe_ptr != NULL) {
288					struct reg regs;
289					fill_regs(td, &regs);
290					(*dtrace_pid_probe_ptr)(&regs);
291					break;
292				}
293#endif
294 				sig = SIGTRAP;
295			} else if (ppc_instr_emulate(frame) == 0)
296				frame->srr0 += 4;
297			else
298				sig = SIGILL;
299			break;
300
301		default:
302			trap_fatal(frame);
303		}
304	} else {
305		/* Kernel Mode Traps */
306
307		KASSERT(cold || td->td_ucred != NULL,
308		    ("kernel trap doesn't have ucred"));
309		switch (type) {
310#ifdef KDTRACE_HOOKS
311		case EXC_PGM:
312			if (frame->srr1 & EXC_PGM_TRAP) {
313				if (*(uint32_t *)frame->srr0 == 0x7c810808) {
314					if (dtrace_invop_jump_addr != NULL) {
315						dtrace_invop_jump_addr(frame);
316						return;
317					}
318				}
319			}
320			break;
321#endif
322#ifdef __powerpc64__
323		case EXC_DSE:
324			if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) {
325				__asm __volatile ("slbmte %0, %1" ::
326					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
327					"r"(USER_SLB_SLBE));
328				return;
329			}
330			break;
331#endif
332		case EXC_DSI:
333			if (trap_pfault(frame, 0) == 0)
334 				return;
335			break;
336		case EXC_MCHK:
337			if (handle_onfault(frame))
338 				return;
339			break;
340		default:
341			break;
342		}
343		trap_fatal(frame);
344	}
345
346	if (sig != 0) {
347		if (p->p_sysent->sv_transtrap != NULL)
348			sig = (p->p_sysent->sv_transtrap)(sig, type);
349		ksiginfo_init_trap(&ksi);
350		ksi.ksi_signo = sig;
351		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
352		/* ksi.ksi_addr = ? */
353		ksi.ksi_trapno = type;
354		trapsignal(td, &ksi);
355	}
356
357	userret(td, frame);
358}
359
360static void
361trap_fatal(struct trapframe *frame)
362{
363
364	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
365#ifdef KDB
366	if ((debugger_on_panic || kdb_active) &&
367	    kdb_trap(frame->exc, 0, frame))
368		return;
369#endif
370	panic("%s trap", trapname(frame->exc));
371}
372
373static void
374printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
375{
376
377	printf("\n");
378	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
379	    user ? "user" : "kernel");
380	printf("\n");
381	printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
382	switch (vector) {
383	case EXC_DSE:
384	case EXC_DSI:
385		printf("   virtual address = 0x%" PRIxPTR "\n",
386		    frame->cpu.aim.dar);
387		printf("   dsisr           = 0x%" PRIxPTR "\n",
388		    frame->cpu.aim.dsisr);
389		break;
390	case EXC_ISE:
391	case EXC_ISI:
392		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
393		break;
394	}
395	printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
396	printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
397	printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
398	printf("   curthread       = %p\n", curthread);
399	if (curthread != NULL)
400		printf("          pid = %d, comm = %s\n",
401		    curthread->td_proc->p_pid, curthread->td_name);
402	printf("\n");
403}
404
405/*
406 * Handles a fatal fault when we have onfault state to recover.  Returns
407 * non-zero if there was onfault recovery state available.
408 */
409static int
410handle_onfault(struct trapframe *frame)
411{
412	struct		thread *td;
413	faultbuf	*fb;
414
415	td = curthread;
416	fb = td->td_pcb->pcb_onfault;
417	if (fb != NULL) {
418		frame->srr0 = (*fb)[0];
419		frame->fixreg[1] = (*fb)[1];
420		frame->fixreg[2] = (*fb)[2];
421		frame->fixreg[3] = 1;
422		frame->cr = (*fb)[3];
423		bcopy(&(*fb)[4], &frame->fixreg[13],
424		    19 * sizeof(register_t));
425		return (1);
426	}
427	return (0);
428}
429
430int
431cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
432{
433	struct proc *p;
434	struct trapframe *frame;
435	caddr_t	params;
436	size_t argsz;
437	int error, n, i;
438
439	p = td->td_proc;
440	frame = td->td_frame;
441
442	sa->code = frame->fixreg[0];
443	params = (caddr_t)(frame->fixreg + FIRSTARG);
444	n = NARGREG;
445
446	if (sa->code == SYS_syscall) {
447		/*
448		 * code is first argument,
449		 * followed by actual args.
450		 */
451		sa->code = *(register_t *) params;
452		params += sizeof(register_t);
453		n -= 1;
454	} else if (sa->code == SYS___syscall) {
455		/*
456		 * Like syscall, but code is a quad,
457		 * so as to maintain quad alignment
458		 * for the rest of the args.
459		 */
460		if (SV_PROC_FLAG(p, SV_ILP32)) {
461			params += sizeof(register_t);
462			sa->code = *(register_t *) params;
463			params += sizeof(register_t);
464			n -= 2;
465		} else {
466			sa->code = *(register_t *) params;
467			params += sizeof(register_t);
468			n -= 1;
469		}
470	}
471
472 	if (p->p_sysent->sv_mask)
473		sa->code &= p->p_sysent->sv_mask;
474	if (sa->code >= p->p_sysent->sv_size)
475		sa->callp = &p->p_sysent->sv_table[0];
476	else
477		sa->callp = &p->p_sysent->sv_table[sa->code];
478
479	sa->narg = sa->callp->sy_narg;
480
481	if (SV_PROC_FLAG(p, SV_ILP32)) {
482		argsz = sizeof(uint32_t);
483
484		for (i = 0; i < n; i++)
485			sa->args[i] = ((u_register_t *)(params))[i] &
486			    0xffffffff;
487	} else {
488		argsz = sizeof(uint64_t);
489
490		for (i = 0; i < n; i++)
491			sa->args[i] = ((u_register_t *)(params))[i];
492	}
493
494	if (sa->narg > n)
495		error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
496			       (sa->narg - n) * argsz);
497	else
498		error = 0;
499
500#ifdef __powerpc64__
501	if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
502		/* Expand the size of arguments copied from the stack */
503
504		for (i = sa->narg; i >= n; i--)
505			sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
506	}
507#endif
508
509	if (error == 0) {
510		td->td_retval[0] = 0;
511		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
512	}
513	return (error);
514}
515
516#include "../../kern/subr_syscall.c"
517
518void
519syscall(struct trapframe *frame)
520{
521	struct thread *td;
522	struct syscall_args sa;
523	int error;
524
525	td = curthread;
526	td->td_frame = frame;
527
528#ifdef __powerpc64__
529	/*
530	 * Speculatively restore last user SLB segment, which we know is
531	 * invalid already, since we are likely to do copyin()/copyout().
532	 */
533	__asm __volatile ("slbmte %0, %1; isync" ::
534            "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
535#endif
536
537	error = syscallenter(td, &sa);
538	syscallret(td, error, &sa);
539}
540
541#ifdef __powerpc64__
542/* Handle kernel SLB faults -- runs in real mode, all seat belts off */
543void
544handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
545{
546	struct slb *slbcache;
547	uint64_t slbe, slbv;
548	uint64_t esid, addr;
549	int i;
550
551	addr = (type == EXC_ISE) ? srr0 : dar;
552	slbcache = PCPU_GET(slb);
553	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
554	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
555
556	/* See if the hardware flushed this somehow (can happen in LPARs) */
557	for (i = 0; i < n_slbs; i++)
558		if (slbcache[i].slbe == (slbe | (uint64_t)i))
559			return;
560
561	/* Not in the map, needs to actually be added */
562	slbv = kernel_va_to_slbv(addr);
563	if (slbcache[USER_SLB_SLOT].slbe == 0) {
564		for (i = 0; i < n_slbs; i++) {
565			if (i == USER_SLB_SLOT)
566				continue;
567			if (!(slbcache[i].slbe & SLBE_VALID))
568				goto fillkernslb;
569		}
570
571		if (i == n_slbs)
572			slbcache[USER_SLB_SLOT].slbe = 1;
573	}
574
575	/* Sacrifice a random SLB entry that is not the user entry */
576	i = mftb() % n_slbs;
577	if (i == USER_SLB_SLOT)
578		i = (i+1) % n_slbs;
579
580fillkernslb:
581	/* Write new entry */
582	slbcache[i].slbv = slbv;
583	slbcache[i].slbe = slbe | (uint64_t)i;
584
585	/* Trap handler will restore from cache on exit */
586}
587
588static int
589handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
590{
591	struct slb *user_entry;
592	uint64_t esid;
593	int i;
594
595	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
596
597	PMAP_LOCK(pm);
598	user_entry = user_va_to_slb_entry(pm, addr);
599
600	if (user_entry == NULL) {
601		/* allocate_vsid auto-spills it */
602		(void)allocate_user_vsid(pm, esid, 0);
603	} else {
604		/*
605		 * Check that another CPU has not already mapped this.
606		 * XXX: Per-thread SLB caches would be better.
607		 */
608		for (i = 0; i < pm->pm_slb_len; i++)
609			if (pm->pm_slb[i] == user_entry)
610				break;
611
612		if (i == pm->pm_slb_len)
613			slb_insert_user(pm, user_entry);
614	}
615	PMAP_UNLOCK(pm);
616
617	return (0);
618}
619#endif
620
621static int
622trap_pfault(struct trapframe *frame, int user)
623{
624	vm_offset_t	eva, va;
625	struct		thread *td;
626	struct		proc *p;
627	vm_map_t	map;
628	vm_prot_t	ftype;
629	int		rv;
630	register_t	user_sr;
631
632	td = curthread;
633	p = td->td_proc;
634	if (frame->exc == EXC_ISI) {
635		eva = frame->srr0;
636		ftype = VM_PROT_EXECUTE;
637		if (frame->srr1 & SRR1_ISI_PFAULT)
638			ftype |= VM_PROT_READ;
639	} else {
640		eva = frame->cpu.aim.dar;
641		if (frame->cpu.aim.dsisr & DSISR_STORE)
642			ftype = VM_PROT_WRITE;
643		else
644			ftype = VM_PROT_READ;
645	}
646
647	if (user) {
648		map = &p->p_vmspace->vm_map;
649	} else {
650		if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
651			if (p->p_vmspace == NULL)
652				return (SIGSEGV);
653
654			map = &p->p_vmspace->vm_map;
655
656			user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
657			eva &= ADDR_PIDX | ADDR_POFF;
658			eva |= user_sr << ADDR_SR_SHFT;
659		} else {
660			map = kernel_map;
661		}
662	}
663	va = trunc_page(eva);
664
665	if (map != kernel_map) {
666		/*
667		 * Keep swapout from messing with us during this
668		 *	critical time.
669		 */
670		PROC_LOCK(p);
671		++p->p_lock;
672		PROC_UNLOCK(p);
673
674		/* Fault in the user page: */
675		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
676
677		PROC_LOCK(p);
678		--p->p_lock;
679		PROC_UNLOCK(p);
680		/*
681		 * XXXDTRACE: add dtrace_doubletrap_func here?
682		 */
683	} else {
684		/*
685		 * Don't have to worry about process locking or stacks in the
686		 * kernel.
687		 */
688		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
689	}
690
691	if (rv == KERN_SUCCESS)
692		return (0);
693
694	if (!user && handle_onfault(frame))
695		return (0);
696
697	return (SIGSEGV);
698}
699
700int
701badaddr(void *addr, size_t size)
702{
703	return (badaddr_read(addr, size, NULL));
704}
705
706int
707badaddr_read(void *addr, size_t size, int *rptr)
708{
709	struct thread	*td;
710	faultbuf	env;
711	int		x;
712
713	/* Get rid of any stale machine checks that have been waiting.  */
714	__asm __volatile ("sync; isync");
715
716	td = curthread;
717
718	if (setfault(env)) {
719		td->td_pcb->pcb_onfault = 0;
720		__asm __volatile ("sync");
721		return 1;
722	}
723
724	__asm __volatile ("sync");
725
726	switch (size) {
727	case 1:
728		x = *(volatile int8_t *)addr;
729		break;
730	case 2:
731		x = *(volatile int16_t *)addr;
732		break;
733	case 4:
734		x = *(volatile int32_t *)addr;
735		break;
736	default:
737		panic("badaddr: invalid size (%zd)", size);
738	}
739
740	/* Make sure we took the machine check, if we caused one. */
741	__asm __volatile ("sync; isync");
742
743	td->td_pcb->pcb_onfault = 0;
744	__asm __volatile ("sync");	/* To be sure. */
745
746	/* Use the value to avoid reorder. */
747	if (rptr)
748		*rptr = x;
749
750	return (0);
751}
752
753/*
754 * For now, this only deals with the particular unaligned access case
755 * that gcc tends to generate.  Eventually it should handle all of the
756 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
757 */
758
759static int
760fix_unaligned(struct thread *td, struct trapframe *frame)
761{
762	struct thread	*fputhread;
763	int		indicator, reg;
764	double		*fpr;
765
766	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
767
768	switch (indicator) {
769	case EXC_ALI_LFD:
770	case EXC_ALI_STFD:
771		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
772		fpr = &td->td_pcb->pcb_fpu.fpr[reg];
773		fputhread = PCPU_GET(fputhread);
774
775		/* Juggle the FPU to ensure that we've initialized
776		 * the FPRs, and that their current state is in
777		 * the PCB.
778		 */
779		if (fputhread != td) {
780			if (fputhread)
781				save_fpu(fputhread);
782			enable_fpu(td);
783		}
784		save_fpu(td);
785
786		if (indicator == EXC_ALI_LFD) {
787			if (copyin((void *)frame->cpu.aim.dar, fpr,
788			    sizeof(double)) != 0)
789				return -1;
790			enable_fpu(td);
791		} else {
792			if (copyout(fpr, (void *)frame->cpu.aim.dar,
793			    sizeof(double)) != 0)
794				return -1;
795		}
796		return 0;
797		break;
798	}
799
800	return -1;
801}
802
803static int
804ppc_instr_emulate(struct trapframe *frame)
805{
806	uint32_t instr;
807	int reg;
808
809	instr = fuword32((void *)frame->srr0);
810
811	if ((instr & 0xfc1fffff) == 0x7c1f42a6) {	/* mfpvr */
812		reg = (instr & ~0xfc1fffff) >> 21;
813		frame->fixreg[reg] = mfpvr();
814		return (0);
815	}
816
817	return (-1);
818}
819
820