trap.c revision 266005
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/trap.c 266005 2014-05-14 04:57:55Z ian $");
36
37#include "opt_kdtrace.h"
38
39#include <sys/param.h>
40#include <sys/kdb.h>
41#include <sys/proc.h>
42#include <sys/ktr.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/pioctl.h>
46#include <sys/ptrace.h>
47#include <sys/reboot.h>
48#include <sys/syscall.h>
49#include <sys/sysent.h>
50#include <sys/systm.h>
51#include <sys/uio.h>
52#include <sys/signalvar.h>
53#include <sys/vmmeter.h>
54
55#include <security/audit/audit.h>
56
57#include <vm/vm.h>
58#include <vm/pmap.h>
59#include <vm/vm_extern.h>
60#include <vm/vm_param.h>
61#include <vm/vm_kern.h>
62#include <vm/vm_map.h>
63#include <vm/vm_page.h>
64
65#include <machine/_inttypes.h>
66#include <machine/altivec.h>
67#include <machine/cpu.h>
68#include <machine/db_machdep.h>
69#include <machine/fpu.h>
70#include <machine/frame.h>
71#include <machine/pcb.h>
72#include <machine/pmap.h>
73#include <machine/psl.h>
74#include <machine/trap.h>
75#include <machine/spr.h>
76#include <machine/sr.h>
77
78static void	trap_fatal(struct trapframe *frame);
79static void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
80		    int user);
81static int	trap_pfault(struct trapframe *frame, int user);
82static int	fix_unaligned(struct thread *td, struct trapframe *frame);
83static int	handle_onfault(struct trapframe *frame);
84static void	syscall(struct trapframe *frame);
85
86#ifdef __powerpc64__
87       void	handle_kernel_slb_spill(int, register_t, register_t);
88static int	handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
89extern int	n_slbs;
90#endif
91
92int	setfault(faultbuf);		/* defined in locore.S */
93
94/* Why are these not defined in a header? */
95int	badaddr(void *, size_t);
96int	badaddr_read(void *, size_t, int *);
97
98struct powerpc_exception {
99	u_int	vector;
100	char	*name;
101};
102
103#ifdef KDTRACE_HOOKS
104#include <sys/dtrace_bsd.h>
105
106/*
107 * This is a hook which is initialised by the dtrace module
108 * to handle traps which might occur during DTrace probe
109 * execution.
110 */
111dtrace_trap_func_t	dtrace_trap_func;
112
113dtrace_doubletrap_func_t	dtrace_doubletrap_func;
114
115/*
116 * This is a hook which is initialised by the systrace module
117 * when it is loaded. This keeps the DTrace syscall provider
118 * implementation opaque.
119 */
120systrace_probe_func_t	systrace_probe_func;
121
122/*
123 * These hooks are necessary for the pid and usdt providers.
124 */
125dtrace_pid_probe_ptr_t		dtrace_pid_probe_ptr;
126dtrace_return_probe_ptr_t	dtrace_return_probe_ptr;
127int (*dtrace_invop_jump_addr)(struct trapframe *);
128#endif
129
130static struct powerpc_exception powerpc_exceptions[] = {
131	{ 0x0100, "system reset" },
132	{ 0x0200, "machine check" },
133	{ 0x0300, "data storage interrupt" },
134	{ 0x0380, "data segment exception" },
135	{ 0x0400, "instruction storage interrupt" },
136	{ 0x0480, "instruction segment exception" },
137	{ 0x0500, "external interrupt" },
138	{ 0x0600, "alignment" },
139	{ 0x0700, "program" },
140	{ 0x0800, "floating-point unavailable" },
141	{ 0x0900, "decrementer" },
142	{ 0x0c00, "system call" },
143	{ 0x0d00, "trace" },
144	{ 0x0e00, "floating-point assist" },
145	{ 0x0f00, "performance monitoring" },
146	{ 0x0f20, "altivec unavailable" },
147	{ 0x1000, "instruction tlb miss" },
148	{ 0x1100, "data load tlb miss" },
149	{ 0x1200, "data store tlb miss" },
150	{ 0x1300, "instruction breakpoint" },
151	{ 0x1400, "system management" },
152	{ 0x1600, "altivec assist" },
153	{ 0x1700, "thermal management" },
154	{ 0x2000, "run mode/trace" },
155	{ 0x3000, NULL }
156};
157
158static const char *
159trapname(u_int vector)
160{
161	struct	powerpc_exception *pe;
162
163	for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
164		if (pe->vector == vector)
165			return (pe->name);
166	}
167
168	return ("unknown");
169}
170
171void
172trap(struct trapframe *frame)
173{
174	struct thread	*td;
175	struct proc	*p;
176#ifdef KDTRACE_HOOKS
177	uint32_t inst;
178#endif
179	int		sig, type, user;
180	u_int		ucode;
181	ksiginfo_t	ksi;
182
183	PCPU_INC(cnt.v_trap);
184
185	td = curthread;
186	p = td->td_proc;
187
188	type = ucode = frame->exc;
189	sig = 0;
190	user = frame->srr1 & PSL_PR;
191
192	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
193	    trapname(type), user ? "user" : "kernel");
194
195#ifdef KDTRACE_HOOKS
196	/*
197	 * A trap can occur while DTrace executes a probe. Before
198	 * executing the probe, DTrace blocks re-scheduling and sets
199	 * a flag in it's per-cpu flags to indicate that it doesn't
200	 * want to fault. On returning from the probe, the no-fault
201	 * flag is cleared and finally re-scheduling is enabled.
202	 *
203	 * If the DTrace kernel module has registered a trap handler,
204	 * call it and if it returns non-zero, assume that it has
205	 * handled the trap and modified the trap frame so that this
206	 * function can return normally.
207	 */
208	/*
209	 * XXXDTRACE: add pid probe handler here (if ever)
210	 */
211	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
212		return;
213#endif
214
215	if (user) {
216		td->td_pticks = 0;
217		td->td_frame = frame;
218		if (td->td_ucred != p->p_ucred)
219			cred_update_thread(td);
220
221		/* User Mode Traps */
222		switch (type) {
223		case EXC_RUNMODETRC:
224		case EXC_TRC:
225			frame->srr1 &= ~PSL_SE;
226			sig = SIGTRAP;
227			break;
228
229#ifdef __powerpc64__
230		case EXC_ISE:
231		case EXC_DSE:
232			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
233			    (type == EXC_ISE) ? frame->srr0 :
234			    frame->cpu.aim.dar) != 0)
235				sig = SIGSEGV;
236			break;
237#endif
238		case EXC_DSI:
239		case EXC_ISI:
240			sig = trap_pfault(frame, 1);
241			break;
242
243		case EXC_SC:
244			syscall(frame);
245			break;
246
247		case EXC_FPU:
248			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
249			    ("FPU already enabled for thread"));
250			enable_fpu(td);
251			break;
252
253		case EXC_VEC:
254			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
255			    ("Altivec already enabled for thread"));
256			enable_vec(td);
257			break;
258
259		case EXC_VECAST_G4:
260		case EXC_VECAST_G5:
261			/*
262			 * We get a VPU assist exception for IEEE mode
263			 * vector operations on denormalized floats.
264			 * Emulating this is a giant pain, so for now,
265			 * just switch off IEEE mode and treat them as
266			 * zero.
267			 */
268
269			save_vec(td);
270			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
271			enable_vec(td);
272			break;
273
274		case EXC_ALI:
275			if (fix_unaligned(td, frame) != 0)
276				sig = SIGBUS;
277			else
278				frame->srr0 += 4;
279			break;
280
281		case EXC_PGM:
282			/* Identify the trap reason */
283			if (frame->srr1 & EXC_PGM_TRAP) {
284#ifdef KDTRACE_HOOKS
285				inst = fuword32((const void *)frame->srr0);
286				if (inst == 0x0FFFDDDD && dtrace_pid_probe_ptr != NULL) {
287					struct reg regs;
288					fill_regs(td, &regs);
289					(*dtrace_pid_probe_ptr)(&regs);
290					break;
291				}
292#endif
293 				sig = SIGTRAP;
294			} else {
295				sig = ppc_instr_emulate(frame, td->td_pcb);
296			}
297			break;
298
299		default:
300			trap_fatal(frame);
301		}
302	} else {
303		/* Kernel Mode Traps */
304
305		KASSERT(cold || td->td_ucred != NULL,
306		    ("kernel trap doesn't have ucred"));
307		switch (type) {
308#ifdef KDTRACE_HOOKS
309		case EXC_PGM:
310			if (frame->srr1 & EXC_PGM_TRAP) {
311				if (*(uint32_t *)frame->srr0 == 0x7c810808) {
312					if (dtrace_invop_jump_addr != NULL) {
313						dtrace_invop_jump_addr(frame);
314						return;
315					}
316				}
317			}
318			break;
319#endif
320#ifdef __powerpc64__
321		case EXC_DSE:
322			if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) {
323				__asm __volatile ("slbmte %0, %1" ::
324					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
325					"r"(USER_SLB_SLBE));
326				return;
327			}
328			break;
329#endif
330		case EXC_DSI:
331			if (trap_pfault(frame, 0) == 0)
332 				return;
333			break;
334		case EXC_MCHK:
335			if (handle_onfault(frame))
336 				return;
337			break;
338		default:
339			break;
340		}
341		trap_fatal(frame);
342	}
343
344	if (sig != 0) {
345		if (p->p_sysent->sv_transtrap != NULL)
346			sig = (p->p_sysent->sv_transtrap)(sig, type);
347		ksiginfo_init_trap(&ksi);
348		ksi.ksi_signo = sig;
349		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
350		/* ksi.ksi_addr = ? */
351		ksi.ksi_trapno = type;
352		trapsignal(td, &ksi);
353	}
354
355	userret(td, frame);
356}
357
358static void
359trap_fatal(struct trapframe *frame)
360{
361
362	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
363#ifdef KDB
364	if ((debugger_on_panic || kdb_active) &&
365	    kdb_trap(frame->exc, 0, frame))
366		return;
367#endif
368	panic("%s trap", trapname(frame->exc));
369}
370
371static void
372printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
373{
374
375	printf("\n");
376	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
377	    user ? "user" : "kernel");
378	printf("\n");
379	printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
380	switch (vector) {
381	case EXC_DSE:
382	case EXC_DSI:
383		printf("   virtual address = 0x%" PRIxPTR "\n",
384		    frame->cpu.aim.dar);
385		printf("   dsisr           = 0x%" PRIxPTR "\n",
386		    frame->cpu.aim.dsisr);
387		break;
388	case EXC_ISE:
389	case EXC_ISI:
390		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
391		break;
392	}
393	printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
394	printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
395	printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
396	printf("   curthread       = %p\n", curthread);
397	if (curthread != NULL)
398		printf("          pid = %d, comm = %s\n",
399		    curthread->td_proc->p_pid, curthread->td_name);
400	printf("\n");
401}
402
403/*
404 * Handles a fatal fault when we have onfault state to recover.  Returns
405 * non-zero if there was onfault recovery state available.
406 */
407static int
408handle_onfault(struct trapframe *frame)
409{
410	struct		thread *td;
411	faultbuf	*fb;
412
413	td = curthread;
414	fb = td->td_pcb->pcb_onfault;
415	if (fb != NULL) {
416		frame->srr0 = (*fb)[0];
417		frame->fixreg[1] = (*fb)[1];
418		frame->fixreg[2] = (*fb)[2];
419		frame->fixreg[3] = 1;
420		frame->cr = (*fb)[3];
421		bcopy(&(*fb)[4], &frame->fixreg[13],
422		    19 * sizeof(register_t));
423		return (1);
424	}
425	return (0);
426}
427
428int
429cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
430{
431	struct proc *p;
432	struct trapframe *frame;
433	caddr_t	params;
434	size_t argsz;
435	int error, n, i;
436
437	p = td->td_proc;
438	frame = td->td_frame;
439
440	sa->code = frame->fixreg[0];
441	params = (caddr_t)(frame->fixreg + FIRSTARG);
442	n = NARGREG;
443
444	if (sa->code == SYS_syscall) {
445		/*
446		 * code is first argument,
447		 * followed by actual args.
448		 */
449		sa->code = *(register_t *) params;
450		params += sizeof(register_t);
451		n -= 1;
452	} else if (sa->code == SYS___syscall) {
453		/*
454		 * Like syscall, but code is a quad,
455		 * so as to maintain quad alignment
456		 * for the rest of the args.
457		 */
458		if (SV_PROC_FLAG(p, SV_ILP32)) {
459			params += sizeof(register_t);
460			sa->code = *(register_t *) params;
461			params += sizeof(register_t);
462			n -= 2;
463		} else {
464			sa->code = *(register_t *) params;
465			params += sizeof(register_t);
466			n -= 1;
467		}
468	}
469
470 	if (p->p_sysent->sv_mask)
471		sa->code &= p->p_sysent->sv_mask;
472	if (sa->code >= p->p_sysent->sv_size)
473		sa->callp = &p->p_sysent->sv_table[0];
474	else
475		sa->callp = &p->p_sysent->sv_table[sa->code];
476
477	sa->narg = sa->callp->sy_narg;
478
479	if (SV_PROC_FLAG(p, SV_ILP32)) {
480		argsz = sizeof(uint32_t);
481
482		for (i = 0; i < n; i++)
483			sa->args[i] = ((u_register_t *)(params))[i] &
484			    0xffffffff;
485	} else {
486		argsz = sizeof(uint64_t);
487
488		for (i = 0; i < n; i++)
489			sa->args[i] = ((u_register_t *)(params))[i];
490	}
491
492	if (sa->narg > n)
493		error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
494			       (sa->narg - n) * argsz);
495	else
496		error = 0;
497
498#ifdef __powerpc64__
499	if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
500		/* Expand the size of arguments copied from the stack */
501
502		for (i = sa->narg; i >= n; i--)
503			sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
504	}
505#endif
506
507	if (error == 0) {
508		td->td_retval[0] = 0;
509		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
510	}
511	return (error);
512}
513
514#include "../../kern/subr_syscall.c"
515
516void
517syscall(struct trapframe *frame)
518{
519	struct thread *td;
520	struct syscall_args sa;
521	int error;
522
523	td = curthread;
524	td->td_frame = frame;
525
526#ifdef __powerpc64__
527	/*
528	 * Speculatively restore last user SLB segment, which we know is
529	 * invalid already, since we are likely to do copyin()/copyout().
530	 */
531	__asm __volatile ("slbmte %0, %1; isync" ::
532            "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
533#endif
534
535	error = syscallenter(td, &sa);
536	syscallret(td, error, &sa);
537}
538
539#ifdef __powerpc64__
540/* Handle kernel SLB faults -- runs in real mode, all seat belts off */
541void
542handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
543{
544	struct slb *slbcache;
545	uint64_t slbe, slbv;
546	uint64_t esid, addr;
547	int i;
548
549	addr = (type == EXC_ISE) ? srr0 : dar;
550	slbcache = PCPU_GET(slb);
551	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
552	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
553
554	/* See if the hardware flushed this somehow (can happen in LPARs) */
555	for (i = 0; i < n_slbs; i++)
556		if (slbcache[i].slbe == (slbe | (uint64_t)i))
557			return;
558
559	/* Not in the map, needs to actually be added */
560	slbv = kernel_va_to_slbv(addr);
561	if (slbcache[USER_SLB_SLOT].slbe == 0) {
562		for (i = 0; i < n_slbs; i++) {
563			if (i == USER_SLB_SLOT)
564				continue;
565			if (!(slbcache[i].slbe & SLBE_VALID))
566				goto fillkernslb;
567		}
568
569		if (i == n_slbs)
570			slbcache[USER_SLB_SLOT].slbe = 1;
571	}
572
573	/* Sacrifice a random SLB entry that is not the user entry */
574	i = mftb() % n_slbs;
575	if (i == USER_SLB_SLOT)
576		i = (i+1) % n_slbs;
577
578fillkernslb:
579	/* Write new entry */
580	slbcache[i].slbv = slbv;
581	slbcache[i].slbe = slbe | (uint64_t)i;
582
583	/* Trap handler will restore from cache on exit */
584}
585
586static int
587handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
588{
589	struct slb *user_entry;
590	uint64_t esid;
591	int i;
592
593	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
594
595	PMAP_LOCK(pm);
596	user_entry = user_va_to_slb_entry(pm, addr);
597
598	if (user_entry == NULL) {
599		/* allocate_vsid auto-spills it */
600		(void)allocate_user_vsid(pm, esid, 0);
601	} else {
602		/*
603		 * Check that another CPU has not already mapped this.
604		 * XXX: Per-thread SLB caches would be better.
605		 */
606		for (i = 0; i < pm->pm_slb_len; i++)
607			if (pm->pm_slb[i] == user_entry)
608				break;
609
610		if (i == pm->pm_slb_len)
611			slb_insert_user(pm, user_entry);
612	}
613	PMAP_UNLOCK(pm);
614
615	return (0);
616}
617#endif
618
619static int
620trap_pfault(struct trapframe *frame, int user)
621{
622	vm_offset_t	eva, va;
623	struct		thread *td;
624	struct		proc *p;
625	vm_map_t	map;
626	vm_prot_t	ftype;
627	int		rv;
628	register_t	user_sr;
629
630	td = curthread;
631	p = td->td_proc;
632	if (frame->exc == EXC_ISI) {
633		eva = frame->srr0;
634		ftype = VM_PROT_EXECUTE;
635		if (frame->srr1 & SRR1_ISI_PFAULT)
636			ftype |= VM_PROT_READ;
637	} else {
638		eva = frame->cpu.aim.dar;
639		if (frame->cpu.aim.dsisr & DSISR_STORE)
640			ftype = VM_PROT_WRITE;
641		else
642			ftype = VM_PROT_READ;
643	}
644
645	if (user) {
646		map = &p->p_vmspace->vm_map;
647	} else {
648		if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
649			if (p->p_vmspace == NULL)
650				return (SIGSEGV);
651
652			map = &p->p_vmspace->vm_map;
653
654			user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
655			eva &= ADDR_PIDX | ADDR_POFF;
656			eva |= user_sr << ADDR_SR_SHFT;
657		} else {
658			map = kernel_map;
659		}
660	}
661	va = trunc_page(eva);
662
663	if (map != kernel_map) {
664		/*
665		 * Keep swapout from messing with us during this
666		 *	critical time.
667		 */
668		PROC_LOCK(p);
669		++p->p_lock;
670		PROC_UNLOCK(p);
671
672		/* Fault in the user page: */
673		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
674
675		PROC_LOCK(p);
676		--p->p_lock;
677		PROC_UNLOCK(p);
678		/*
679		 * XXXDTRACE: add dtrace_doubletrap_func here?
680		 */
681	} else {
682		/*
683		 * Don't have to worry about process locking or stacks in the
684		 * kernel.
685		 */
686		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
687	}
688
689	if (rv == KERN_SUCCESS)
690		return (0);
691
692	if (!user && handle_onfault(frame))
693		return (0);
694
695	return (SIGSEGV);
696}
697
698int
699badaddr(void *addr, size_t size)
700{
701	return (badaddr_read(addr, size, NULL));
702}
703
704int
705badaddr_read(void *addr, size_t size, int *rptr)
706{
707	struct thread	*td;
708	faultbuf	env;
709	int		x;
710
711	/* Get rid of any stale machine checks that have been waiting.  */
712	__asm __volatile ("sync; isync");
713
714	td = curthread;
715
716	if (setfault(env)) {
717		td->td_pcb->pcb_onfault = 0;
718		__asm __volatile ("sync");
719		return 1;
720	}
721
722	__asm __volatile ("sync");
723
724	switch (size) {
725	case 1:
726		x = *(volatile int8_t *)addr;
727		break;
728	case 2:
729		x = *(volatile int16_t *)addr;
730		break;
731	case 4:
732		x = *(volatile int32_t *)addr;
733		break;
734	default:
735		panic("badaddr: invalid size (%zd)", size);
736	}
737
738	/* Make sure we took the machine check, if we caused one. */
739	__asm __volatile ("sync; isync");
740
741	td->td_pcb->pcb_onfault = 0;
742	__asm __volatile ("sync");	/* To be sure. */
743
744	/* Use the value to avoid reorder. */
745	if (rptr)
746		*rptr = x;
747
748	return (0);
749}
750
751/*
752 * For now, this only deals with the particular unaligned access case
753 * that gcc tends to generate.  Eventually it should handle all of the
754 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
755 */
756
757static int
758fix_unaligned(struct thread *td, struct trapframe *frame)
759{
760	struct thread	*fputhread;
761	int		indicator, reg;
762	double		*fpr;
763
764	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
765
766	switch (indicator) {
767	case EXC_ALI_LFD:
768	case EXC_ALI_STFD:
769		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
770		fpr = &td->td_pcb->pcb_fpu.fpr[reg];
771		fputhread = PCPU_GET(fputhread);
772
773		/* Juggle the FPU to ensure that we've initialized
774		 * the FPRs, and that their current state is in
775		 * the PCB.
776		 */
777		if (fputhread != td) {
778			if (fputhread)
779				save_fpu(fputhread);
780			enable_fpu(td);
781		}
782		save_fpu(td);
783
784		if (indicator == EXC_ALI_LFD) {
785			if (copyin((void *)frame->cpu.aim.dar, fpr,
786			    sizeof(double)) != 0)
787				return -1;
788			enable_fpu(td);
789		} else {
790			if (copyout(fpr, (void *)frame->cpu.aim.dar,
791			    sizeof(double)) != 0)
792				return -1;
793		}
794		return 0;
795		break;
796	}
797
798	return -1;
799}
800
801