machdep.c revision 286055
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
35 *	from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: stable/10/sys/sparc64/sparc64/machdep.c 286055 2015-07-30 02:06:29Z marius $");
40
41#include "opt_compat.h"
42#include "opt_ddb.h"
43#include "opt_kstack_pages.h"
44
45#include <sys/param.h>
46#include <sys/malloc.h>
47#include <sys/proc.h>
48#include <sys/systm.h>
49#include <sys/bio.h>
50#include <sys/buf.h>
51#include <sys/bus.h>
52#include <sys/cpu.h>
53#include <sys/cons.h>
54#include <sys/eventhandler.h>
55#include <sys/exec.h>
56#include <sys/imgact.h>
57#include <sys/interrupt.h>
58#include <sys/kdb.h>
59#include <sys/kernel.h>
60#include <sys/ktr.h>
61#include <sys/linker.h>
62#include <sys/lock.h>
63#include <sys/msgbuf.h>
64#include <sys/mutex.h>
65#include <sys/pcpu.h>
66#include <sys/ptrace.h>
67#include <sys/reboot.h>
68#include <sys/rwlock.h>
69#include <sys/signalvar.h>
70#include <sys/smp.h>
71#include <sys/syscallsubr.h>
72#include <sys/sysent.h>
73#include <sys/sysproto.h>
74#include <sys/timetc.h>
75#include <sys/ucontext.h>
76
77#include <dev/ofw/openfirm.h>
78
79#include <vm/vm.h>
80#include <vm/vm_extern.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_page.h>
83#include <vm/vm_map.h>
84#include <vm/vm_object.h>
85#include <vm/vm_pager.h>
86#include <vm/vm_param.h>
87
88#include <ddb/ddb.h>
89
90#include <machine/bus.h>
91#include <machine/cache.h>
92#include <machine/cmt.h>
93#include <machine/cpu.h>
94#include <machine/fireplane.h>
95#include <machine/fp.h>
96#include <machine/fsr.h>
97#include <machine/intr_machdep.h>
98#include <machine/jbus.h>
99#include <machine/md_var.h>
100#include <machine/metadata.h>
101#include <machine/ofw_machdep.h>
102#include <machine/ofw_mem.h>
103#include <machine/pcb.h>
104#include <machine/pmap.h>
105#include <machine/pstate.h>
106#include <machine/reg.h>
107#include <machine/sigframe.h>
108#include <machine/smp.h>
109#include <machine/tick.h>
110#include <machine/tlb.h>
111#include <machine/tstate.h>
112#include <machine/upa.h>
113#include <machine/ver.h>
114
115typedef int ofw_vec_t(void *);
116
117#ifdef DDB
118extern vm_offset_t ksym_start, ksym_end;
119#endif
120
121int dtlb_slots;
122int itlb_slots;
123struct tlb_entry *kernel_tlbs;
124int kernel_tlb_slots;
125
126int cold = 1;
127long Maxmem;
128long realmem;
129
130void *dpcpu0;
131char pcpu0[PCPU_PAGES * PAGE_SIZE];
132struct trapframe frame0;
133
134vm_offset_t kstack0;
135vm_paddr_t kstack0_phys;
136
137struct kva_md_info kmi;
138
139u_long ofw_vec;
140u_long ofw_tba;
141u_int tba_taken_over;
142
143char sparc64_model[32];
144
145static int cpu_use_vis = 1;
146
147cpu_block_copy_t *cpu_block_copy;
148cpu_block_zero_t *cpu_block_zero;
149
150static phandle_t find_bsp(phandle_t node, uint32_t bspid, u_int cpu_impl);
151void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3,
152    ofw_vec_t *vec);
153static void sparc64_shutdown_final(void *dummy, int howto);
154
155static void cpu_startup(void *arg);
156SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
157
158CTASSERT((1 << INT_SHIFT) == sizeof(int));
159CTASSERT((1 << PTR_SHIFT) == sizeof(char *));
160
161CTASSERT(sizeof(struct reg) == 256);
162CTASSERT(sizeof(struct fpreg) == 272);
163CTASSERT(sizeof(struct __mcontext) == 512);
164
165CTASSERT((sizeof(struct pcb) & (64 - 1)) == 0);
166CTASSERT((offsetof(struct pcb, pcb_kfp) & (64 - 1)) == 0);
167CTASSERT((offsetof(struct pcb, pcb_ufp) & (64 - 1)) == 0);
168CTASSERT(sizeof(struct pcb) <= ((KSTACK_PAGES * PAGE_SIZE) / 8));
169
170CTASSERT(sizeof(struct pcpu) <= ((PCPU_PAGES * PAGE_SIZE) / 2));
171
172static void
173cpu_startup(void *arg)
174{
175	vm_paddr_t physsz;
176	int i;
177
178	physsz = 0;
179	for (i = 0; i < sparc64_nmemreg; i++)
180		physsz += sparc64_memreg[i].mr_size;
181	printf("real memory  = %lu (%lu MB)\n", physsz,
182	    physsz / (1024 * 1024));
183	realmem = (long)physsz / PAGE_SIZE;
184
185	vm_ksubmap_init(&kmi);
186
187	bufinit();
188	vm_pager_bufferinit();
189
190	EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
191	    SHUTDOWN_PRI_LAST);
192
193	printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
194	    cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
195
196	if (bootverbose)
197		printf("machine: %s\n", sparc64_model);
198
199	cpu_identify(rdpr(ver), PCPU_GET(clock), curcpu);
200}
201
202void
203cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
204{
205	struct intr_request *ir;
206	int i;
207
208	pcpu->pc_irtail = &pcpu->pc_irhead;
209	for (i = 0; i < IR_FREE; i++) {
210		ir = &pcpu->pc_irpool[i];
211		ir->ir_next = pcpu->pc_irfree;
212		pcpu->pc_irfree = ir;
213	}
214}
215
216void
217spinlock_enter(void)
218{
219	struct thread *td;
220	register_t pil;
221
222	td = curthread;
223	if (td->td_md.md_spinlock_count == 0) {
224		pil = rdpr(pil);
225		wrpr(pil, 0, PIL_TICK);
226		td->td_md.md_spinlock_count = 1;
227		td->td_md.md_saved_pil = pil;
228	} else
229		td->td_md.md_spinlock_count++;
230	critical_enter();
231}
232
233void
234spinlock_exit(void)
235{
236	struct thread *td;
237	register_t pil;
238
239	td = curthread;
240	critical_exit();
241	pil = td->td_md.md_saved_pil;
242	td->td_md.md_spinlock_count--;
243	if (td->td_md.md_spinlock_count == 0)
244		wrpr(pil, pil, 0);
245}
246
247static phandle_t
248find_bsp(phandle_t node, uint32_t bspid, u_int cpu_impl)
249{
250	char type[sizeof("cpu")];
251	phandle_t child;
252	uint32_t cpuid;
253
254	for (; node != 0; node = OF_peer(node)) {
255		child = OF_child(node);
256		if (child > 0) {
257			child = find_bsp(child, bspid, cpu_impl);
258			if (child > 0)
259				return (child);
260		} else {
261			if (OF_getprop(node, "device_type", type,
262			    sizeof(type)) <= 0)
263				continue;
264			if (strcmp(type, "cpu") != 0)
265				continue;
266			if (OF_getprop(node, cpu_cpuid_prop(cpu_impl), &cpuid,
267			    sizeof(cpuid)) <= 0)
268				continue;
269			if (cpuid == bspid)
270				return (node);
271		}
272	}
273	return (0);
274}
275
276const char *
277cpu_cpuid_prop(u_int cpu_impl)
278{
279
280	switch (cpu_impl) {
281	case CPU_IMPL_SPARC64:
282	case CPU_IMPL_SPARC64V:
283	case CPU_IMPL_ULTRASPARCI:
284	case CPU_IMPL_ULTRASPARCII:
285	case CPU_IMPL_ULTRASPARCIIi:
286	case CPU_IMPL_ULTRASPARCIIe:
287		return ("upa-portid");
288	case CPU_IMPL_ULTRASPARCIII:
289	case CPU_IMPL_ULTRASPARCIIIp:
290	case CPU_IMPL_ULTRASPARCIIIi:
291	case CPU_IMPL_ULTRASPARCIIIip:
292		return ("portid");
293	case CPU_IMPL_ULTRASPARCIV:
294	case CPU_IMPL_ULTRASPARCIVp:
295		return ("cpuid");
296	default:
297		return ("");
298	}
299}
300
301uint32_t
302cpu_get_mid(u_int cpu_impl)
303{
304
305	switch (cpu_impl) {
306	case CPU_IMPL_SPARC64:
307	case CPU_IMPL_SPARC64V:
308	case CPU_IMPL_ULTRASPARCI:
309	case CPU_IMPL_ULTRASPARCII:
310	case CPU_IMPL_ULTRASPARCIIi:
311	case CPU_IMPL_ULTRASPARCIIe:
312		return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG)));
313	case CPU_IMPL_ULTRASPARCIII:
314	case CPU_IMPL_ULTRASPARCIIIp:
315		return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG,
316		    ASI_FIREPLANE_CONFIG_REG)));
317	case CPU_IMPL_ULTRASPARCIIIi:
318	case CPU_IMPL_ULTRASPARCIIIip:
319		return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG)));
320	case CPU_IMPL_ULTRASPARCIV:
321	case CPU_IMPL_ULTRASPARCIVp:
322		return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID)));
323	default:
324		return (0);
325	}
326}
327
328void
329sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
330{
331	char *env;
332	struct pcpu *pc;
333	vm_offset_t end;
334	vm_offset_t va;
335	caddr_t kmdp;
336	phandle_t root;
337	u_int cpu_impl;
338
339	end = 0;
340	kmdp = NULL;
341
342	/*
343	 * Find out what kind of CPU we have first, for anything that changes
344	 * behaviour.
345	 */
346	cpu_impl = VER_IMPL(rdpr(ver));
347
348	/*
349	 * Do CPU-specific initialization.
350	 */
351	if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
352		cheetah_init(cpu_impl);
353	else if (cpu_impl == CPU_IMPL_SPARC64V)
354		zeus_init(cpu_impl);
355
356	/*
357	 * Clear (S)TICK timer (including NPT).
358	 */
359	tick_clear(cpu_impl);
360
361	/*
362	 * UltraSparc II[e,i] based systems come up with the tick interrupt
363	 * enabled and a handler that resets the tick counter, causing DELAY()
364	 * to not work properly when used early in boot.
365	 * UltraSPARC III based systems come up with the system tick interrupt
366	 * enabled, causing an interrupt storm on startup since they are not
367	 * handled.
368	 */
369	tick_stop(cpu_impl);
370
371	/*
372	 * Set up Open Firmware entry points.
373	 */
374	ofw_tba = rdpr(tba);
375	ofw_vec = (u_long)vec;
376
377	/*
378	 * Parse metadata if present and fetch parameters.  Must be before the
379	 * console is inited so cninit() gets the right value of boothowto.
380	 */
381	if (mdp != NULL) {
382		preload_metadata = mdp;
383		kmdp = preload_search_by_type("elf kernel");
384		if (kmdp != NULL) {
385			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
386			kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
387			end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
388			kernel_tlb_slots = MD_FETCH(kmdp, MODINFOMD_DTLB_SLOTS,
389			    int);
390			kernel_tlbs = (void *)preload_search_info(kmdp,
391			    MODINFO_METADATA | MODINFOMD_DTLB);
392		}
393	}
394
395	init_param1();
396
397	/*
398	 * Initialize Open Firmware (needed for console).
399	 */
400	OF_install(OFW_STD_DIRECT, 0);
401	OF_init(ofw_entry);
402
403	/*
404	 * Prime our per-CPU data page for use.  Note, we are using it for
405	 * our stack, so don't pass the real size (PAGE_SIZE) to pcpu_init
406	 * or it'll zero it out from under us.
407	 */
408	pc = (struct pcpu *)(pcpu0 + (PCPU_PAGES * PAGE_SIZE)) - 1;
409	pcpu_init(pc, 0, sizeof(struct pcpu));
410	pc->pc_addr = (vm_offset_t)pcpu0;
411	pc->pc_impl = cpu_impl;
412	pc->pc_mid = cpu_get_mid(cpu_impl);
413	pc->pc_tlb_ctx = TLB_CTX_USER_MIN;
414	pc->pc_tlb_ctx_min = TLB_CTX_USER_MIN;
415	pc->pc_tlb_ctx_max = TLB_CTX_USER_MAX;
416
417	/*
418	 * Determine the OFW node and frequency of the BSP (and ensure the
419	 * BSP is in the device tree in the first place).
420	 */
421	root = OF_peer(0);
422	pc->pc_node = find_bsp(root, pc->pc_mid, cpu_impl);
423	if (pc->pc_node == 0)
424		OF_panic("%s: cannot find boot CPU node", __func__);
425	if (OF_getprop(pc->pc_node, "clock-frequency", &pc->pc_clock,
426	    sizeof(pc->pc_clock)) <= 0)
427		OF_panic("%s: cannot determine boot CPU clock", __func__);
428
429	/*
430	 * Panic if there is no metadata.  Most likely the kernel was booted
431	 * directly, instead of through loader(8).
432	 */
433	if (mdp == NULL || kmdp == NULL || end == 0 ||
434	    kernel_tlb_slots == 0 || kernel_tlbs == NULL)
435		OF_panic("%s: missing loader metadata.\nThis probably means "
436		    "you are not using loader(8).", __func__);
437
438	/*
439	 * Work around the broken loader behavior of not demapping no
440	 * longer used kernel TLB slots when unloading the kernel or
441	 * modules.
442	 */
443	for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M;
444	    va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) {
445		if (bootverbose)
446			OF_printf("demapping unused kernel TLB slot "
447			    "(va %#lx - %#lx)\n", va, va + PAGE_SIZE_4M - 1);
448		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
449		    ASI_DMMU_DEMAP, 0);
450		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
451		    ASI_IMMU_DEMAP, 0);
452		flush(KERNBASE);
453		kernel_tlb_slots--;
454	}
455
456	/*
457	 * Determine the TLB slot maxima, which are expected to be
458	 * equal across all CPUs.
459	 * NB: for cheetah-class CPUs, these properties only refer
460	 * to the t16s.
461	 */
462	if (OF_getprop(pc->pc_node, "#dtlb-entries", &dtlb_slots,
463	    sizeof(dtlb_slots)) == -1)
464		OF_panic("%s: cannot determine number of dTLB slots",
465		    __func__);
466	if (OF_getprop(pc->pc_node, "#itlb-entries", &itlb_slots,
467	    sizeof(itlb_slots)) == -1)
468		OF_panic("%s: cannot determine number of iTLB slots",
469		    __func__);
470
471	/*
472	 * Initialize and enable the caches.  Note that this may include
473	 * applying workarounds.
474	 */
475	cache_init(pc);
476	cache_enable(cpu_impl);
477	uma_set_align(pc->pc_cache.dc_linesize - 1);
478
479	cpu_block_copy = bcopy;
480	cpu_block_zero = bzero;
481	getenv_int("machdep.use_vis", &cpu_use_vis);
482	if (cpu_use_vis) {
483		switch (cpu_impl) {
484		case CPU_IMPL_SPARC64:
485		case CPU_IMPL_ULTRASPARCI:
486		case CPU_IMPL_ULTRASPARCII:
487		case CPU_IMPL_ULTRASPARCIIi:
488		case CPU_IMPL_ULTRASPARCIIe:
489		case CPU_IMPL_ULTRASPARCIII:	/* NB: we've disabled P$. */
490		case CPU_IMPL_ULTRASPARCIIIp:
491		case CPU_IMPL_ULTRASPARCIIIi:
492		case CPU_IMPL_ULTRASPARCIV:
493		case CPU_IMPL_ULTRASPARCIVp:
494		case CPU_IMPL_ULTRASPARCIIIip:
495			cpu_block_copy = spitfire_block_copy;
496			cpu_block_zero = spitfire_block_zero;
497			break;
498		case CPU_IMPL_SPARC64V:
499			cpu_block_copy = zeus_block_copy;
500			cpu_block_zero = zeus_block_zero;
501			break;
502		}
503	}
504
505#ifdef SMP
506	mp_init();
507#endif
508
509	/*
510	 * Initialize virtual memory and calculate physmem.
511	 */
512	pmap_bootstrap(cpu_impl);
513
514	/*
515	 * Initialize tunables.
516	 */
517	init_param2(physmem);
518	env = getenv("kernelname");
519	if (env != NULL) {
520		strlcpy(kernelname, env, sizeof(kernelname));
521		freeenv(env);
522	}
523
524	/*
525	 * Initialize the interrupt tables.
526	 */
527	intr_init1();
528
529	/*
530	 * Initialize proc0, set kstack0, frame0, curthread and curpcb.
531	 */
532	proc_linkup0(&proc0, &thread0);
533	proc0.p_md.md_sigtramp = NULL;
534	proc0.p_md.md_utrap = NULL;
535	thread0.td_kstack = kstack0;
536	thread0.td_kstack_pages = KSTACK_PAGES;
537	thread0.td_pcb = (struct pcb *)
538	    (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
539	frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
540	thread0.td_frame = &frame0;
541	pc->pc_curthread = &thread0;
542	pc->pc_curpcb = thread0.td_pcb;
543
544	/*
545	 * Initialize global registers.
546	 */
547	cpu_setregs(pc);
548
549	/*
550	 * Take over the trap table via the PROM.  Using the PROM for this
551	 * is necessary in order to set obp-control-relinquished to true
552	 * within the PROM so obtaining /virtual-memory/translations doesn't
553	 * trigger a fatal reset error or worse things further down the road.
554	 * XXX it should be possible to use this solely instead of writing
555	 * %tba in cpu_setregs().  Doing so causes a hang however.
556	 *
557	 * NB: the low-level console drivers require a working DELAY() and
558	 * some compiler optimizations may cause the curthread accesses of
559	 * mutex(9) to be factored out even if the latter aren't actually
560	 * called.  Both of these require PCPU_REG to be set.  However, we
561	 * can't set PCPU_REG without also taking over the trap table or the
562	 * firmware will overwrite it.
563	 */
564	sun4u_set_traptable(tl0_base);
565
566	/*
567	 * Initialize the dynamic per-CPU area for the BSP and the message
568	 * buffer (after setting the trap table).
569	 */
570	dpcpu_init(dpcpu0, 0);
571	msgbufinit(msgbufp, msgbufsize);
572
573	/*
574	 * Initialize mutexes.
575	 */
576	mutex_init();
577
578	/*
579	 * Initialize console now that we have a reasonable set of system
580	 * services.
581	 */
582	cninit();
583
584	/*
585	 * Finish the interrupt initialization now that mutexes work and
586	 * enable them.
587	 */
588	intr_init2();
589	wrpr(pil, 0, 0);
590	wrpr(pstate, 0, PSTATE_KERNEL);
591
592	OF_getprop(root, "name", sparc64_model, sizeof(sparc64_model) - 1);
593
594	kdb_init();
595
596#ifdef KDB
597	if (boothowto & RB_KDB)
598		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
599#endif
600}
601
602void
603sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
604{
605	struct trapframe *tf;
606	struct sigframe *sfp;
607	struct sigacts *psp;
608	struct sigframe sf;
609	struct thread *td;
610	struct frame *fp;
611	struct proc *p;
612	u_long sp;
613	int oonstack;
614	int sig;
615
616	oonstack = 0;
617	td = curthread;
618	p = td->td_proc;
619	PROC_LOCK_ASSERT(p, MA_OWNED);
620	sig = ksi->ksi_signo;
621	psp = p->p_sigacts;
622	mtx_assert(&psp->ps_mtx, MA_OWNED);
623	tf = td->td_frame;
624	sp = tf->tf_sp + SPOFF;
625	oonstack = sigonstack(sp);
626
627	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
628	    catcher, sig);
629
630	/* Make sure we have a signal trampoline to return to. */
631	if (p->p_md.md_sigtramp == NULL) {
632		/*
633		 * No signal trampoline... kill the process.
634		 */
635		CTR0(KTR_SIG, "sendsig: no sigtramp");
636		printf("sendsig: %s is too old, rebuild it\n", p->p_comm);
637		sigexit(td, sig);
638		/* NOTREACHED */
639	}
640
641	/* Save user context. */
642	bzero(&sf, sizeof(sf));
643	get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
644	sf.sf_uc.uc_sigmask = *mask;
645	sf.sf_uc.uc_stack = td->td_sigstk;
646	sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
647	    ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
648
649	/* Allocate and validate space for the signal handler context. */
650	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
651	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
652		sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
653		    td->td_sigstk.ss_size - sizeof(struct sigframe));
654	} else
655		sfp = (struct sigframe *)sp - 1;
656	mtx_unlock(&psp->ps_mtx);
657	PROC_UNLOCK(p);
658
659	fp = (struct frame *)sfp - 1;
660
661	/* Translate the signal if appropriate. */
662	if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
663		sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
664
665	/* Build the argument list for the signal handler. */
666	tf->tf_out[0] = sig;
667	tf->tf_out[2] = (register_t)&sfp->sf_uc;
668	tf->tf_out[4] = (register_t)catcher;
669	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
670		/* Signal handler installed with SA_SIGINFO. */
671		tf->tf_out[1] = (register_t)&sfp->sf_si;
672
673		/* Fill in POSIX parts. */
674		sf.sf_si = ksi->ksi_info;
675		sf.sf_si.si_signo = sig; /* maybe a translated signal */
676	} else {
677		/* Old FreeBSD-style arguments. */
678		tf->tf_out[1] = ksi->ksi_code;
679		tf->tf_out[3] = (register_t)ksi->ksi_addr;
680	}
681
682	/* Copy the sigframe out to the user's stack. */
683	if (rwindow_save(td) != 0 || copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
684	    suword(&fp->fr_in[6], tf->tf_out[6]) != 0) {
685		/*
686		 * Something is wrong with the stack pointer.
687		 * ...Kill the process.
688		 */
689		CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
690		PROC_LOCK(p);
691		sigexit(td, SIGILL);
692		/* NOTREACHED */
693	}
694
695	tf->tf_tpc = (u_long)p->p_md.md_sigtramp;
696	tf->tf_tnpc = tf->tf_tpc + 4;
697	tf->tf_sp = (u_long)fp - SPOFF;
698
699	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#lx sp=%#lx", td, tf->tf_tpc,
700	    tf->tf_sp);
701
702	PROC_LOCK(p);
703	mtx_lock(&psp->ps_mtx);
704}
705
706#ifndef	_SYS_SYSPROTO_H_
707struct sigreturn_args {
708	ucontext_t *ucp;
709};
710#endif
711
712/*
713 * MPSAFE
714 */
715int
716sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
717{
718	struct proc *p;
719	mcontext_t *mc;
720	ucontext_t uc;
721	int error;
722
723	p = td->td_proc;
724	if (rwindow_save(td)) {
725		PROC_LOCK(p);
726		sigexit(td, SIGILL);
727	}
728
729	CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
730	if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
731		CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
732		return (EFAULT);
733	}
734
735	mc = &uc.uc_mcontext;
736	error = set_mcontext(td, mc);
737	if (error != 0)
738		return (error);
739
740	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
741
742	CTR4(KTR_SIG, "sigreturn: return td=%p pc=%#lx sp=%#lx tstate=%#lx",
743	    td, mc->_mc_tpc, mc->_mc_sp, mc->_mc_tstate);
744	return (EJUSTRETURN);
745}
746
747/*
748 * Construct a PCB from a trapframe. This is called from kdb_trap() where
749 * we want to start a backtrace from the function that caused us to enter
750 * the debugger. We have the context in the trapframe, but base the trace
751 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
752 * enough for a backtrace.
753 */
754void
755makectx(struct trapframe *tf, struct pcb *pcb)
756{
757
758	pcb->pcb_pc = tf->tf_tpc;
759	pcb->pcb_sp = tf->tf_sp;
760}
761
762int
763get_mcontext(struct thread *td, mcontext_t *mc, int flags)
764{
765	struct trapframe *tf;
766	struct pcb *pcb;
767
768	tf = td->td_frame;
769	pcb = td->td_pcb;
770	/*
771	 * Copy the registers which will be restored by tl0_ret() from the
772	 * trapframe.
773	 * Note that we skip %g7 which is used as the userland TLS register
774	 * and %wstate.
775	 */
776	mc->_mc_flags = _MC_VERSION;
777	mc->mc_global[1] = tf->tf_global[1];
778	mc->mc_global[2] = tf->tf_global[2];
779	mc->mc_global[3] = tf->tf_global[3];
780	mc->mc_global[4] = tf->tf_global[4];
781	mc->mc_global[5] = tf->tf_global[5];
782	mc->mc_global[6] = tf->tf_global[6];
783	if (flags & GET_MC_CLEAR_RET) {
784		mc->mc_out[0] = 0;
785		mc->mc_out[1] = 0;
786	} else {
787		mc->mc_out[0] = tf->tf_out[0];
788		mc->mc_out[1] = tf->tf_out[1];
789	}
790	mc->mc_out[2] = tf->tf_out[2];
791	mc->mc_out[3] = tf->tf_out[3];
792	mc->mc_out[4] = tf->tf_out[4];
793	mc->mc_out[5] = tf->tf_out[5];
794	mc->mc_out[6] = tf->tf_out[6];
795	mc->mc_out[7] = tf->tf_out[7];
796	mc->_mc_fprs = tf->tf_fprs;
797	mc->_mc_fsr = tf->tf_fsr;
798	mc->_mc_gsr = tf->tf_gsr;
799	mc->_mc_tnpc = tf->tf_tnpc;
800	mc->_mc_tpc = tf->tf_tpc;
801	mc->_mc_tstate = tf->tf_tstate;
802	mc->_mc_y = tf->tf_y;
803	critical_enter();
804	if ((tf->tf_fprs & FPRS_FEF) != 0) {
805		savefpctx(pcb->pcb_ufp);
806		tf->tf_fprs &= ~FPRS_FEF;
807		pcb->pcb_flags |= PCB_FEF;
808	}
809	if ((pcb->pcb_flags & PCB_FEF) != 0) {
810		bcopy(pcb->pcb_ufp, mc->mc_fp, sizeof(mc->mc_fp));
811		mc->_mc_fprs |= FPRS_FEF;
812	}
813	critical_exit();
814	return (0);
815}
816
817int
818set_mcontext(struct thread *td, mcontext_t *mc)
819{
820	struct trapframe *tf;
821	struct pcb *pcb;
822
823	if (!TSTATE_SECURE(mc->_mc_tstate) ||
824	    (mc->_mc_flags & ((1L << _MC_VERSION_BITS) - 1)) != _MC_VERSION)
825		return (EINVAL);
826	tf = td->td_frame;
827	pcb = td->td_pcb;
828	/* Make sure the windows are spilled first. */
829	flushw();
830	/*
831	 * Copy the registers which will be restored by tl0_ret() to the
832	 * trapframe.
833	 * Note that we skip %g7 which is used as the userland TLS register
834	 * and %wstate.
835	 */
836	tf->tf_global[1] = mc->mc_global[1];
837	tf->tf_global[2] = mc->mc_global[2];
838	tf->tf_global[3] = mc->mc_global[3];
839	tf->tf_global[4] = mc->mc_global[4];
840	tf->tf_global[5] = mc->mc_global[5];
841	tf->tf_global[6] = mc->mc_global[6];
842	tf->tf_out[0] = mc->mc_out[0];
843	tf->tf_out[1] = mc->mc_out[1];
844	tf->tf_out[2] = mc->mc_out[2];
845	tf->tf_out[3] = mc->mc_out[3];
846	tf->tf_out[4] = mc->mc_out[4];
847	tf->tf_out[5] = mc->mc_out[5];
848	tf->tf_out[6] = mc->mc_out[6];
849	tf->tf_out[7] = mc->mc_out[7];
850	tf->tf_fprs = mc->_mc_fprs;
851	tf->tf_fsr = mc->_mc_fsr;
852	tf->tf_gsr = mc->_mc_gsr;
853	tf->tf_tnpc = mc->_mc_tnpc;
854	tf->tf_tpc = mc->_mc_tpc;
855	tf->tf_tstate = mc->_mc_tstate;
856	tf->tf_y = mc->_mc_y;
857	if ((mc->_mc_fprs & FPRS_FEF) != 0) {
858		tf->tf_fprs = 0;
859		bcopy(mc->mc_fp, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
860		pcb->pcb_flags |= PCB_FEF;
861	}
862	return (0);
863}
864
865/*
866 * Exit the kernel and execute a firmware call that will not return, as
867 * specified by the arguments.
868 */
869void
870cpu_shutdown(void *args)
871{
872
873#ifdef SMP
874	cpu_mp_shutdown();
875#endif
876	ofw_exit(args);
877}
878
879/*
880 * Flush the D-cache for non-DMA I/O so that the I-cache can
881 * be made coherent later.
882 */
883void
884cpu_flush_dcache(void *ptr, size_t len)
885{
886
887	/* TBD */
888}
889
890/* Get current clock frequency for the given CPU ID. */
891int
892cpu_est_clockrate(int cpu_id, uint64_t *rate)
893{
894	struct pcpu *pc;
895
896	pc = pcpu_find(cpu_id);
897	if (pc == NULL || rate == NULL)
898		return (EINVAL);
899	*rate = pc->pc_clock;
900	return (0);
901}
902
903/*
904 * Duplicate OF_exit() with a different firmware call function that restores
905 * the trap table, otherwise a RED state exception is triggered in at least
906 * some firmware versions.
907 */
908void
909cpu_halt(void)
910{
911	static struct {
912		cell_t name;
913		cell_t nargs;
914		cell_t nreturns;
915	} args = {
916		(cell_t)"exit",
917		0,
918		0
919	};
920
921	cpu_shutdown(&args);
922}
923
924static void
925sparc64_shutdown_final(void *dummy, int howto)
926{
927	static struct {
928		cell_t name;
929		cell_t nargs;
930		cell_t nreturns;
931	} args = {
932		(cell_t)"SUNW,power-off",
933		0,
934		0
935	};
936
937	/* Turn the power off? */
938	if ((howto & RB_POWEROFF) != 0)
939		cpu_shutdown(&args);
940	/* In case of halt, return to the firmware. */
941	if ((howto & RB_HALT) != 0)
942		cpu_halt();
943}
944
945void
946cpu_idle(int busy)
947{
948
949	/* Insert code to halt (until next interrupt) for the idle loop. */
950}
951
952int
953cpu_idle_wakeup(int cpu)
954{
955
956	return (1);
957}
958
959int
960ptrace_set_pc(struct thread *td, u_long addr)
961{
962
963	td->td_frame->tf_tpc = addr;
964	td->td_frame->tf_tnpc = addr + 4;
965	return (0);
966}
967
968int
969ptrace_single_step(struct thread *td)
970{
971
972	/* TODO; */
973	return (0);
974}
975
976int
977ptrace_clear_single_step(struct thread *td)
978{
979
980	/* TODO; */
981	return (0);
982}
983
984void
985exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
986{
987	struct trapframe *tf;
988	struct pcb *pcb;
989	struct proc *p;
990	u_long sp;
991
992	/* XXX no cpu_exec */
993	p = td->td_proc;
994	p->p_md.md_sigtramp = NULL;
995	if (p->p_md.md_utrap != NULL) {
996		utrap_free(p->p_md.md_utrap);
997		p->p_md.md_utrap = NULL;
998	}
999
1000	pcb = td->td_pcb;
1001	tf = td->td_frame;
1002	sp = rounddown(stack, 16);
1003	bzero(pcb, sizeof(*pcb));
1004	bzero(tf, sizeof(*tf));
1005	tf->tf_out[0] = stack;
1006	tf->tf_out[3] = p->p_sysent->sv_psstrings;
1007	tf->tf_out[6] = sp - SPOFF - sizeof(struct frame);
1008	tf->tf_tnpc = imgp->entry_addr + 4;
1009	tf->tf_tpc = imgp->entry_addr;
1010	/*
1011	 * While we could adhere to the memory model indicated in the ELF
1012	 * header, it turns out that just always using TSO performs best.
1013	 */
1014	tf->tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_MM_TSO;
1015
1016	td->td_retval[0] = tf->tf_out[0];
1017	td->td_retval[1] = tf->tf_out[1];
1018}
1019
1020int
1021fill_regs(struct thread *td, struct reg *regs)
1022{
1023
1024	bcopy(td->td_frame, regs, sizeof(*regs));
1025	return (0);
1026}
1027
1028int
1029set_regs(struct thread *td, struct reg *regs)
1030{
1031	struct trapframe *tf;
1032
1033	if (!TSTATE_SECURE(regs->r_tstate))
1034		return (EINVAL);
1035	tf = td->td_frame;
1036	regs->r_wstate = tf->tf_wstate;
1037	bcopy(regs, tf, sizeof(*regs));
1038	return (0);
1039}
1040
1041int
1042fill_dbregs(struct thread *td, struct dbreg *dbregs)
1043{
1044
1045	return (ENOSYS);
1046}
1047
1048int
1049set_dbregs(struct thread *td, struct dbreg *dbregs)
1050{
1051
1052	return (ENOSYS);
1053}
1054
1055int
1056fill_fpregs(struct thread *td, struct fpreg *fpregs)
1057{
1058	struct trapframe *tf;
1059	struct pcb *pcb;
1060
1061	pcb = td->td_pcb;
1062	tf = td->td_frame;
1063	bcopy(pcb->pcb_ufp, fpregs->fr_regs, sizeof(fpregs->fr_regs));
1064	fpregs->fr_fsr = tf->tf_fsr;
1065	fpregs->fr_gsr = tf->tf_gsr;
1066	return (0);
1067}
1068
1069int
1070set_fpregs(struct thread *td, struct fpreg *fpregs)
1071{
1072	struct trapframe *tf;
1073	struct pcb *pcb;
1074
1075	pcb = td->td_pcb;
1076	tf = td->td_frame;
1077	tf->tf_fprs &= ~FPRS_FEF;
1078	bcopy(fpregs->fr_regs, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
1079	tf->tf_fsr = fpregs->fr_fsr;
1080	tf->tf_gsr = fpregs->fr_gsr;
1081	return (0);
1082}
1083
1084struct md_utrap *
1085utrap_alloc(void)
1086{
1087	struct md_utrap *ut;
1088
1089	ut = malloc(sizeof(struct md_utrap), M_SUBPROC, M_WAITOK | M_ZERO);
1090	ut->ut_refcnt = 1;
1091	return (ut);
1092}
1093
1094void
1095utrap_free(struct md_utrap *ut)
1096{
1097	int refcnt;
1098
1099	if (ut == NULL)
1100		return;
1101	mtx_pool_lock(mtxpool_sleep, ut);
1102	ut->ut_refcnt--;
1103	refcnt = ut->ut_refcnt;
1104	mtx_pool_unlock(mtxpool_sleep, ut);
1105	if (refcnt == 0)
1106		free(ut, M_SUBPROC);
1107}
1108
1109struct md_utrap *
1110utrap_hold(struct md_utrap *ut)
1111{
1112
1113	if (ut == NULL)
1114		return (NULL);
1115	mtx_pool_lock(mtxpool_sleep, ut);
1116	ut->ut_refcnt++;
1117	mtx_pool_unlock(mtxpool_sleep, ut);
1118	return (ut);
1119}
1120