machdep.c revision 268199
1/*-
2 * Copyright (c) 2003,2004 Marcel Moolenaar
3 * Copyright (c) 2000,2001 Doug Rabson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/machdep.c 268199 2014-07-02 23:37:14Z marcel $");
30
31#include "opt_compat.h"
32#include "opt_ddb.h"
33#include "opt_kstack_pages.h"
34#include "opt_sched.h"
35
36#include <sys/param.h>
37#include <sys/proc.h>
38#include <sys/systm.h>
39#include <sys/bio.h>
40#include <sys/buf.h>
41#include <sys/bus.h>
42#include <sys/cons.h>
43#include <sys/cpu.h>
44#include <sys/eventhandler.h>
45#include <sys/exec.h>
46#include <sys/imgact.h>
47#include <sys/kdb.h>
48#include <sys/kernel.h>
49#include <sys/linker.h>
50#include <sys/lock.h>
51#include <sys/malloc.h>
52#include <sys/mbuf.h>
53#include <sys/msgbuf.h>
54#include <sys/pcpu.h>
55#include <sys/ptrace.h>
56#include <sys/random.h>
57#include <sys/reboot.h>
58#include <sys/rwlock.h>
59#include <sys/sched.h>
60#include <sys/signalvar.h>
61#include <sys/syscall.h>
62#include <sys/syscallsubr.h>
63#include <sys/sysctl.h>
64#include <sys/sysproto.h>
65#include <sys/ucontext.h>
66#include <sys/uio.h>
67#include <sys/uuid.h>
68#include <sys/vmmeter.h>
69#include <sys/vnode.h>
70
71#include <ddb/ddb.h>
72
73#include <net/netisr.h>
74
75#include <vm/vm.h>
76#include <vm/vm_extern.h>
77#include <vm/vm_kern.h>
78#include <vm/vm_page.h>
79#include <vm/vm_map.h>
80#include <vm/vm_object.h>
81#include <vm/vm_pager.h>
82
83#include <machine/bootinfo.h>
84#include <machine/cpu.h>
85#include <machine/efi.h>
86#include <machine/elf.h>
87#include <machine/fpu.h>
88#include <machine/intr.h>
89#include <machine/kdb.h>
90#include <machine/mca.h>
91#include <machine/md_var.h>
92#include <machine/pal.h>
93#include <machine/pcb.h>
94#include <machine/reg.h>
95#include <machine/sal.h>
96#include <machine/sigframe.h>
97#ifdef SMP
98#include <machine/smp.h>
99#endif
100#include <machine/unwind.h>
101#include <machine/vmparam.h>
102
103/*
104 * For atomicity reasons, we demand that pc_curthread is the first
105 * field in the struct pcpu. It allows us to read the pointer with
106 * a single atomic instruction:
107 *	ld8 %curthread = [r13]
108 * Otherwise we would first have to calculate the load address and
109 * store the result in a temporary register and that for the load:
110 *	add %temp = %offsetof(struct pcpu), r13
111 *	ld8 %curthread = [%temp]
112 * A context switch inbetween the add and the ld8 could have the
113 * thread migrate to a different core. In that case,  %curthread
114 * would be the thread running on the original core and not actually
115 * the current thread.
116 */
117CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
118
119static SYSCTL_NODE(_hw, OID_AUTO, freq, CTLFLAG_RD, 0, "");
120static SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RD, 0, "");
121
122static u_int bus_freq;
123SYSCTL_UINT(_hw_freq, OID_AUTO, bus, CTLFLAG_RD, &bus_freq, 0,
124    "Bus clock frequency");
125
126static u_int cpu_freq;
127SYSCTL_UINT(_hw_freq, OID_AUTO, cpu, CTLFLAG_RD, &cpu_freq, 0,
128    "CPU clock frequency");
129
130static u_int itc_freq;
131SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTLFLAG_RD, &itc_freq, 0,
132    "ITC frequency");
133
134int cold = 1;
135
136struct bootinfo *bootinfo;
137
138struct pcpu pcpu0;
139
140extern u_int64_t kernel_text[], _end[];
141
142extern u_int64_t ia64_gateway_page[];
143extern u_int64_t break_sigtramp[];
144extern u_int64_t epc_sigtramp[];
145
146struct fpswa_iface *fpswa_iface;
147
148vm_size_t ia64_pal_size;
149vm_paddr_t ia64_pal_base;
150vm_offset_t ia64_port_base;
151
152u_int64_t ia64_lapic_addr = PAL_PIB_DEFAULT_ADDR;
153
154struct ia64_pib *ia64_pib;
155
156static int ia64_sync_icache_needed;
157
158char machine[] = MACHINE;
159SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
160
161static char cpu_model[64];
162SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
163    "The CPU model name");
164
165static char cpu_family[64];
166SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
167    "The CPU family name");
168
169#ifdef DDB
170extern vm_offset_t ksym_start, ksym_end;
171#endif
172
173struct msgbuf *msgbufp = NULL;
174
175/* Other subsystems (e.g., ACPI) can hook this later. */
176void (*cpu_idle_hook)(sbintime_t) = NULL;
177
178struct kva_md_info kmi;
179
180#define	Mhz	1000000L
181#define	Ghz	(1000L*Mhz)
182
183static void
184identifycpu(void)
185{
186	char vendor[17];
187	char *family_name, *model_name;
188	u_int64_t features, tmp;
189	int number, revision, model, family, archrev;
190
191	/*
192	 * Assumes little-endian.
193	 */
194	*(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
195	*(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
196	vendor[16] = '\0';
197
198	tmp = ia64_get_cpuid(3);
199	number = (tmp >> 0) & 0xff;
200	revision = (tmp >> 8) & 0xff;
201	model = (tmp >> 16) & 0xff;
202	family = (tmp >> 24) & 0xff;
203	archrev = (tmp >> 32) & 0xff;
204
205	family_name = model_name = "unknown";
206	switch (family) {
207	case 0x07:
208		family_name = "Itanium";
209		model_name = "Merced";
210		break;
211	case 0x1f:
212		family_name = "Itanium 2";
213		switch (model) {
214		case 0x00:
215			model_name = "McKinley";
216			break;
217		case 0x01:
218			/*
219			 * Deerfield is a low-voltage variant based on the
220			 * Madison core. We need circumstantial evidence
221			 * (i.e. the clock frequency) to identify those.
222			 * Allow for roughly 1% error margin.
223			 */
224			if (cpu_freq > 990 && cpu_freq < 1010)
225				model_name = "Deerfield";
226			else
227				model_name = "Madison";
228			break;
229		case 0x02:
230			model_name = "Madison II";
231			break;
232		}
233		break;
234	case 0x20:
235		ia64_sync_icache_needed = 1;
236
237		family_name = "Itanium 2";
238		switch (model) {
239		case 0x00:
240			model_name = "Montecito";
241			break;
242		case 0x01:
243			model_name = "Montvale";
244			break;
245		}
246		break;
247	}
248	snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
249	snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
250
251	features = ia64_get_cpuid(4);
252
253	printf("CPU: %s (", model_name);
254	if (cpu_freq)
255		printf("%u MHz ", cpu_freq);
256	printf("%s)\n", family_name);
257	printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
258	printf("  Features = 0x%b\n", (u_int32_t) features,
259	    "\020"
260	    "\001LB"	/* long branch (brl) instruction. */
261	    "\002SD"	/* Spontaneous deferral. */
262	    "\003AO"	/* 16-byte atomic operations (ld, st, cmpxchg). */ );
263}
264
265static void
266cpu_startup(void *dummy)
267{
268	char nodename[16];
269	struct pcpu *pc;
270	struct pcpu_stats *pcs;
271
272	/*
273	 * Good {morning,afternoon,evening,night}.
274	 */
275	identifycpu();
276
277#ifdef PERFMON
278	perfmon_init();
279#endif
280	printf("real memory  = %ld (%ld MB)\n", ptoa(realmem),
281	    ptoa(realmem) / 1048576);
282
283	vm_ksubmap_init(&kmi);
284
285	printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
286	    ptoa(cnt.v_free_count) / 1048576);
287
288	if (fpswa_iface == NULL)
289		printf("Warning: no FPSWA package supplied\n");
290	else
291		printf("FPSWA Revision = 0x%lx, Entry = %p\n",
292		    (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
293
294	/*
295	 * Set up buffers, so they can be used to read disk labels.
296	 */
297	bufinit();
298	vm_pager_bufferinit();
299
300	/*
301	 * Traverse the MADT to discover IOSAPIC and Local SAPIC
302	 * information.
303	 */
304	ia64_probe_sapics();
305	ia64_pib = pmap_mapdev(ia64_lapic_addr, sizeof(*ia64_pib));
306
307	ia64_mca_init();
308
309	/*
310	 * Create sysctl tree for per-CPU information.
311	 */
312	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
313		snprintf(nodename, sizeof(nodename), "%u", pc->pc_cpuid);
314		sysctl_ctx_init(&pc->pc_md.sysctl_ctx);
315		pc->pc_md.sysctl_tree = SYSCTL_ADD_NODE(&pc->pc_md.sysctl_ctx,
316		    SYSCTL_STATIC_CHILDREN(_machdep_cpu), OID_AUTO, nodename,
317		    CTLFLAG_RD, NULL, "");
318		if (pc->pc_md.sysctl_tree == NULL)
319			continue;
320
321		pcs = &pc->pc_md.stats;
322
323		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
324		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
325		    "nasts", CTLFLAG_RD, &pcs->pcs_nasts,
326		    "Number of IPI_AST interrupts");
327
328		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
329		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
330		    "nclks", CTLFLAG_RD, &pcs->pcs_nclks,
331		    "Number of clock interrupts");
332
333		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
334		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
335		    "nextints", CTLFLAG_RD, &pcs->pcs_nextints,
336		    "Number of ExtINT interrupts");
337
338		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
339		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
340		    "nhardclocks", CTLFLAG_RD, &pcs->pcs_nhardclocks,
341		    "Number of IPI_HARDCLOCK interrupts");
342
343		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
344		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
345		    "nhighfps", CTLFLAG_RD, &pcs->pcs_nhighfps,
346		    "Number of IPI_HIGH_FP interrupts");
347
348		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
349		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
350		    "nhwints", CTLFLAG_RD, &pcs->pcs_nhwints,
351		    "Number of hardware (device) interrupts");
352
353		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
354		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
355		    "npreempts", CTLFLAG_RD, &pcs->pcs_npreempts,
356		    "Number of IPI_PREEMPT interrupts");
357
358		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
359		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
360		    "nrdvs", CTLFLAG_RD, &pcs->pcs_nrdvs,
361		    "Number of IPI_RENDEZVOUS interrupts");
362
363		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
364		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
365		    "nstops", CTLFLAG_RD, &pcs->pcs_nstops,
366		    "Number of IPI_STOP interrupts");
367
368		SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
369		    SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
370		    "nstrays", CTLFLAG_RD, &pcs->pcs_nstrays,
371		    "Number of stray interrupts");
372	}
373}
374SYSINIT(cpu_startup, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
375
376void
377cpu_flush_dcache(void *ptr, size_t len)
378{
379	vm_offset_t lim, va;
380
381	va = (uintptr_t)ptr & ~31;
382	lim = (uintptr_t)ptr + len;
383	while (va < lim) {
384		ia64_fc(va);
385		va += 32;
386	}
387
388	ia64_srlz_d();
389}
390
391/* Get current clock frequency for the given cpu id. */
392int
393cpu_est_clockrate(int cpu_id, uint64_t *rate)
394{
395
396	if (pcpu_find(cpu_id) == NULL || rate == NULL)
397		return (EINVAL);
398	*rate = (u_long)cpu_freq * 1000000ul;
399	return (0);
400}
401
402void
403cpu_halt()
404{
405
406	efi_reset_system();
407}
408
409void
410cpu_idle(int busy)
411{
412	register_t ie;
413	sbintime_t sbt = -1;
414
415	if (!busy) {
416		critical_enter();
417		sbt = cpu_idleclock();
418	}
419
420	ie = intr_disable();
421	KASSERT(ie != 0, ("%s called with interrupts disabled\n", __func__));
422
423	if (sched_runnable())
424		ia64_enable_intr();
425	else if (cpu_idle_hook != NULL) {
426		(*cpu_idle_hook)(sbt);
427		/* The hook must enable interrupts! */
428	} else {
429		ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
430		ia64_enable_intr();
431	}
432
433	if (!busy) {
434		cpu_activeclock();
435		critical_exit();
436	}
437}
438
439int
440cpu_idle_wakeup(int cpu)
441{
442
443	return (0);
444}
445
446void
447cpu_reset()
448{
449
450	efi_reset_system();
451}
452
453void
454cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
455{
456	struct pcb *oldpcb, *newpcb;
457
458	oldpcb = old->td_pcb;
459#ifdef COMPAT_FREEBSD32
460	ia32_savectx(oldpcb);
461#endif
462	if (PCPU_GET(fpcurthread) == old)
463		old->td_frame->tf_special.psr |= IA64_PSR_DFH;
464	if (!savectx(oldpcb)) {
465		newpcb = new->td_pcb;
466		oldpcb->pcb_current_pmap =
467		    pmap_switch(newpcb->pcb_current_pmap);
468
469		atomic_store_rel_ptr(&old->td_lock, mtx);
470
471#if defined(SCHED_ULE) && defined(SMP)
472		while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
473			cpu_spinwait();
474#endif
475
476		PCPU_SET(curthread, new);
477
478#ifdef COMPAT_FREEBSD32
479		ia32_restorectx(newpcb);
480#endif
481
482		if (PCPU_GET(fpcurthread) == new)
483			new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
484		restorectx(newpcb);
485		/* We should not get here. */
486		panic("cpu_switch: restorectx() returned");
487		/* NOTREACHED */
488	}
489}
490
491void
492cpu_throw(struct thread *old __unused, struct thread *new)
493{
494	struct pcb *newpcb;
495
496	newpcb = new->td_pcb;
497	(void)pmap_switch(newpcb->pcb_current_pmap);
498
499#if defined(SCHED_ULE) && defined(SMP)
500	while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
501		cpu_spinwait();
502#endif
503
504	PCPU_SET(curthread, new);
505
506#ifdef COMPAT_FREEBSD32
507	ia32_restorectx(newpcb);
508#endif
509
510	restorectx(newpcb);
511	/* We should not get here. */
512	panic("cpu_throw: restorectx() returned");
513	/* NOTREACHED */
514}
515
516void
517cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
518{
519
520	/*
521	 * Set pc_acpi_id to "uninitialized".
522	 * See sys/dev/acpica/acpi_cpu.c
523	 */
524	pcpu->pc_acpi_id = 0xffffffff;
525}
526
527void
528cpu_pcpu_setup(struct pcpu *pc, u_int acpi_id, u_int sapic_id)
529{
530
531	pc->pc_acpi_id = acpi_id;
532	pc->pc_md.lid = IA64_LID_SET_SAPIC_ID(sapic_id);
533}
534
535void
536spinlock_enter(void)
537{
538	struct thread *td;
539	int intr;
540
541	td = curthread;
542	if (td->td_md.md_spinlock_count == 0) {
543		intr = intr_disable();
544		td->td_md.md_spinlock_count = 1;
545		td->td_md.md_saved_intr = intr;
546	} else
547		td->td_md.md_spinlock_count++;
548	critical_enter();
549}
550
551void
552spinlock_exit(void)
553{
554	struct thread *td;
555	int intr;
556
557	td = curthread;
558	critical_exit();
559	intr = td->td_md.md_saved_intr;
560	td->td_md.md_spinlock_count--;
561	if (td->td_md.md_spinlock_count == 0)
562		intr_restore(intr);
563}
564
565void
566kdb_cpu_trap(int vector, int code __unused)
567{
568
569	__asm __volatile("flushrs;;");
570
571	/* Restart after the break instruction. */
572	if (vector == IA64_VEC_BREAK &&
573	    kdb_frame->tf_special.ifa == IA64_FIXED_BREAK)
574		kdb_frame->tf_special.psr += IA64_PSR_RI_1;
575}
576
577void
578map_vhpt(uintptr_t vhpt)
579{
580	pt_entry_t pte;
581	uint64_t psr;
582
583	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
584	    PTE_PL_KERN | PTE_AR_RW;
585	pte |= vhpt & PTE_PPN_MASK;
586
587	__asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
588	    "r"(pmap_vhpt_log2size << 2));
589
590	__asm __volatile("mov   %0=psr" : "=r"(psr));
591	__asm __volatile("rsm   psr.ic|psr.i");
592	ia64_srlz_i();
593	ia64_set_ifa(vhpt);
594	ia64_set_itir(pmap_vhpt_log2size << 2);
595	ia64_srlz_d();
596	__asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
597	__asm __volatile("mov   psr.l=%0" :: "r" (psr));
598	ia64_srlz_i();
599}
600
601void
602map_pal_code(void)
603{
604	pt_entry_t pte;
605	vm_offset_t va;
606	vm_size_t sz;
607	uint64_t psr;
608	u_int shft;
609
610	if (ia64_pal_size == 0)
611		return;
612
613	va = IA64_PHYS_TO_RR7(ia64_pal_base);
614
615	sz = ia64_pal_size;
616	shft = 0;
617	while (sz > 1) {
618		shft++;
619		sz >>= 1;
620	}
621
622	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
623	    PTE_PL_KERN | PTE_AR_RWX;
624	pte |= ia64_pal_base & PTE_PPN_MASK;
625
626	__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: "r"(va), "r"(shft<<2));
627
628	__asm __volatile("mov	%0=psr" : "=r"(psr));
629	__asm __volatile("rsm	psr.ic|psr.i");
630	ia64_srlz_i();
631	ia64_set_ifa(va);
632	ia64_set_itir(shft << 2);
633	ia64_srlz_d();
634	__asm __volatile("itr.d	dtr[%0]=%1" :: "r"(4), "r"(pte));
635	ia64_srlz_d();
636	__asm __volatile("itr.i	itr[%0]=%1" :: "r"(1), "r"(pte));
637	__asm __volatile("mov	psr.l=%0" :: "r" (psr));
638	ia64_srlz_i();
639}
640
641void
642map_gateway_page(void)
643{
644	pt_entry_t pte;
645	uint64_t psr;
646
647	pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
648	    PTE_PL_KERN | PTE_AR_X_RX;
649	pte |= ia64_tpa((uint64_t)ia64_gateway_page) & PTE_PPN_MASK;
650
651	__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
652	    "r"(VM_MAXUSER_ADDRESS), "r"(PAGE_SHIFT << 2));
653
654	__asm __volatile("mov	%0=psr" : "=r"(psr));
655	__asm __volatile("rsm	psr.ic|psr.i");
656	ia64_srlz_i();
657	ia64_set_ifa(VM_MAXUSER_ADDRESS);
658	ia64_set_itir(PAGE_SHIFT << 2);
659	ia64_srlz_d();
660	__asm __volatile("itr.d	dtr[%0]=%1" :: "r"(5), "r"(pte));
661	ia64_srlz_d();
662	__asm __volatile("itr.i	itr[%0]=%1" :: "r"(2), "r"(pte));
663	__asm __volatile("mov	psr.l=%0" :: "r" (psr));
664	ia64_srlz_i();
665
666	/* Expose the mapping to userland in ar.k5 */
667	ia64_set_k5(VM_MAXUSER_ADDRESS);
668}
669
670static u_int
671freq_ratio(u_long base, u_long ratio)
672{
673	u_long f;
674
675	f = (base * (ratio >> 32)) / (ratio & 0xfffffffful);
676	return ((f + 500000) / 1000000);
677}
678
679static void
680calculate_frequencies(void)
681{
682	struct ia64_sal_result sal;
683	struct ia64_pal_result pal;
684	register_t ie;
685
686	ie = intr_disable();
687	sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
688	pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
689	intr_restore(ie);
690
691	if (sal.sal_status == 0 && pal.pal_status == 0) {
692		if (bootverbose) {
693			printf("Platform clock frequency %ld Hz\n",
694			       sal.sal_result[0]);
695			printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
696			       "ITC ratio %ld/%ld\n",
697			       pal.pal_result[0] >> 32,
698			       pal.pal_result[0] & ((1L << 32) - 1),
699			       pal.pal_result[1] >> 32,
700			       pal.pal_result[1] & ((1L << 32) - 1),
701			       pal.pal_result[2] >> 32,
702			       pal.pal_result[2] & ((1L << 32) - 1));
703		}
704		cpu_freq = freq_ratio(sal.sal_result[0], pal.pal_result[0]);
705		bus_freq = freq_ratio(sal.sal_result[0], pal.pal_result[1]);
706		itc_freq = freq_ratio(sal.sal_result[0], pal.pal_result[2]);
707	}
708}
709
710struct ia64_init_return
711ia64_init(void)
712{
713	struct ia64_init_return ret;
714	struct efi_md *md;
715	pt_entry_t *pbvm_pgtbl_ent, *pbvm_pgtbl_lim;
716	char *p;
717	vm_size_t mdlen;
718	int metadata_missing;
719
720	/*
721	 * NO OUTPUT ALLOWED UNTIL FURTHER NOTICE.
722	 */
723
724	ia64_set_fpsr(IA64_FPSR_DEFAULT);
725
726	/*
727	 * Region 6 is direct mapped UC and region 7 is direct mapped
728	 * WC. The details of this is controlled by the Alt {I,D}TLB
729	 * handlers. Here we just make sure that they have the largest
730	 * possible page size to minimise TLB usage.
731	 */
732	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (LOG2_ID_PAGE_SIZE << 2));
733	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (LOG2_ID_PAGE_SIZE << 2));
734	ia64_srlz_d();
735
736	/* Initialize/setup physical memory datastructures */
737	ia64_physmem_init();
738
739	/*
740	 * Process the memory map. This gives us the PAL locations,
741	 * the I/O port base address, the available memory regions
742	 * for initializing the physical memory map.
743	 */
744	for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
745		mdlen = md->md_pages * EFI_PAGE_SIZE;
746		switch (md->md_type) {
747		case EFI_MD_TYPE_IOPORT:
748			ia64_port_base = (uintptr_t)pmap_mapdev(md->md_phys,
749			    mdlen);
750			break;
751		case EFI_MD_TYPE_PALCODE:
752			ia64_pal_base = md->md_phys;
753			ia64_pal_size = mdlen;
754			/*FALLTHROUGH*/
755		case EFI_MD_TYPE_BAD:
756		case EFI_MD_TYPE_FIRMWARE:
757		case EFI_MD_TYPE_RECLAIM:
758		case EFI_MD_TYPE_RT_CODE:
759		case EFI_MD_TYPE_RT_DATA:
760			/* Don't use these memory regions. */
761			ia64_physmem_track(md->md_phys, mdlen);
762			break;
763		case EFI_MD_TYPE_BS_CODE:
764		case EFI_MD_TYPE_BS_DATA:
765		case EFI_MD_TYPE_CODE:
766		case EFI_MD_TYPE_DATA:
767		case EFI_MD_TYPE_FREE:
768			/* These are ok to use. */
769			ia64_physmem_add(md->md_phys, mdlen);
770			break;
771		}
772	}
773
774	/*
775	 * Remove the PBVM and its page table from phys_avail. The loader
776	 * passes the physical address of the page table to us. The virtual
777	 * address of the page table is fixed.
778	 * Track and the PBVM limit for later use.
779	 */
780	ia64_physmem_delete(bootinfo->bi_pbvm_pgtbl, bootinfo->bi_pbvm_pgtblsz);
781	pbvm_pgtbl_ent = (void *)IA64_PBVM_PGTBL;
782	pbvm_pgtbl_lim = (void *)(IA64_PBVM_PGTBL + bootinfo->bi_pbvm_pgtblsz);
783	while (pbvm_pgtbl_ent < pbvm_pgtbl_lim) {
784		if ((*pbvm_pgtbl_ent & PTE_PRESENT) == 0)
785			break;
786		ia64_physmem_delete(*pbvm_pgtbl_ent & PTE_PPN_MASK,
787		    IA64_PBVM_PAGE_SIZE);
788		pbvm_pgtbl_ent++;
789	}
790
791	/* Finalize physical memory datastructures */
792	ia64_physmem_fini();
793
794	metadata_missing = 0;
795	if (bootinfo->bi_modulep)
796		preload_metadata = (caddr_t)bootinfo->bi_modulep;
797	else
798		metadata_missing = 1;
799
800	if (envmode == 0 && bootinfo->bi_envp)
801		kern_envp = (caddr_t)bootinfo->bi_envp;
802	else
803		kern_envp = static_env;
804
805	/*
806	 * Look at arguments passed to us and compute boothowto.
807	 */
808	boothowto = bootinfo->bi_boothowto;
809
810	if (boothowto & RB_VERBOSE)
811		bootverbose = 1;
812
813	/*
814	 * Wire things up so we can call the firmware.
815	 */
816	map_pal_code();
817	efi_boot_minimal(bootinfo->bi_systab);
818	ia64_xiv_init();
819	ia64_sal_init();
820	calculate_frequencies();
821
822	set_cputicker(ia64_get_itc, (u_long)itc_freq * 1000000, 0);
823
824	/*
825	 * Setup the PCPU data for the bootstrap processor. It is needed
826	 * by printf(). Also, since printf() has critical sections, we
827	 * need to initialize at least pc_curthread.
828	 */
829	pcpup = &pcpu0;
830	ia64_set_k4((u_int64_t)pcpup);
831	pcpu_init(pcpup, 0, sizeof(pcpu0));
832	dpcpu_init(ia64_physmem_alloc(DPCPU_SIZE, PAGE_SIZE), 0);
833	cpu_pcpu_setup(pcpup, ~0U, ia64_get_lid());
834	PCPU_SET(curthread, &thread0);
835
836	/*
837	 * Initialize the console before we print anything out.
838	 */
839	cninit();
840
841	/* OUTPUT NOW ALLOWED */
842
843	if (metadata_missing)
844		printf("WARNING: loader(8) metadata is missing!\n");
845
846	/* Get FPSWA interface */
847	fpswa_iface = (bootinfo->bi_fpswa == 0) ? NULL :
848	    (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo->bi_fpswa);
849
850	/* Init basic tunables, including hz */
851	init_param1();
852
853	p = getenv("kernelname");
854	if (p != NULL) {
855		strlcpy(kernelname, p, sizeof(kernelname));
856		freeenv(p);
857	}
858
859	init_param2(physmem);
860
861	/*
862	 * Initialize error message buffer (at end of core).
863	 */
864	msgbufp = ia64_physmem_alloc(msgbufsize, PAGE_SIZE);
865	msgbufinit(msgbufp, msgbufsize);
866
867	proc_linkup0(&proc0, &thread0);
868	/*
869	 * Init mapping for kernel stack for proc 0
870	 */
871	p = ia64_physmem_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
872	thread0.td_kstack = (uintptr_t)p;
873	thread0.td_kstack_pages = KSTACK_PAGES;
874
875	mutex_init();
876
877	/*
878	 * Initialize the rest of proc 0's PCB.
879	 *
880	 * Set the kernel sp, reserving space for an (empty) trapframe,
881	 * and make proc0's trapframe pointer point to it for sanity.
882	 * Initialise proc0's backing store to start after u area.
883	 */
884	cpu_thread_alloc(&thread0);
885	thread0.td_frame->tf_flags = FRAME_SYSCALL;
886	thread0.td_pcb->pcb_special.sp =
887	    (u_int64_t)thread0.td_frame - 16;
888	thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
889
890	/*
891	 * Initialize the virtual memory system.
892	 */
893	pmap_bootstrap();
894
895	/*
896	 * Initialize debuggers, and break into them if appropriate.
897	 */
898#ifdef DDB
899	ksym_start = bootinfo->bi_symtab;
900	ksym_end = bootinfo->bi_esymtab;
901#endif
902
903	kdb_init();
904
905#ifdef KDB
906	if (boothowto & RB_KDB)
907		kdb_enter(KDB_WHY_BOOTFLAGS,
908		    "Boot flags requested debugger\n");
909#endif
910
911	ia64_set_tpr(0);
912	ia64_srlz_d();
913
914	ret.bspstore = thread0.td_pcb->pcb_special.bspstore;
915	ret.sp = thread0.td_pcb->pcb_special.sp;
916	return (ret);
917}
918
919uint64_t
920ia64_get_hcdp(void)
921{
922
923	return (bootinfo->bi_hcdp);
924}
925
926void
927bzero(void *buf, size_t len)
928{
929	caddr_t p = buf;
930
931	while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
932		*p++ = 0;
933		len--;
934	}
935	while (len >= sizeof(u_long) * 8) {
936		*(u_long*) p = 0;
937		*((u_long*) p + 1) = 0;
938		*((u_long*) p + 2) = 0;
939		*((u_long*) p + 3) = 0;
940		len -= sizeof(u_long) * 8;
941		*((u_long*) p + 4) = 0;
942		*((u_long*) p + 5) = 0;
943		*((u_long*) p + 6) = 0;
944		*((u_long*) p + 7) = 0;
945		p += sizeof(u_long) * 8;
946	}
947	while (len >= sizeof(u_long)) {
948		*(u_long*) p = 0;
949		len -= sizeof(u_long);
950		p += sizeof(u_long);
951	}
952	while (len) {
953		*p++ = 0;
954		len--;
955	}
956}
957
958u_int
959ia64_itc_freq(void)
960{
961
962	return (itc_freq);
963}
964
965void
966DELAY(int n)
967{
968	u_int64_t start, end, now;
969
970	sched_pin();
971
972	start = ia64_get_itc();
973	end = start + itc_freq * n;
974	/* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
975	do {
976		now = ia64_get_itc();
977	} while (now < end || (now > start && end < start));
978
979	sched_unpin();
980}
981
982/*
983 * Send an interrupt (signal) to a process.
984 */
985void
986sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
987{
988	struct proc *p;
989	struct thread *td;
990	struct trapframe *tf;
991	struct sigacts *psp;
992	struct sigframe sf, *sfp;
993	u_int64_t sbs, sp;
994	int oonstack;
995	int sig;
996	u_long code;
997
998	td = curthread;
999	p = td->td_proc;
1000	PROC_LOCK_ASSERT(p, MA_OWNED);
1001	sig = ksi->ksi_signo;
1002	code = ksi->ksi_code;
1003	psp = p->p_sigacts;
1004	mtx_assert(&psp->ps_mtx, MA_OWNED);
1005	tf = td->td_frame;
1006	sp = tf->tf_special.sp;
1007	oonstack = sigonstack(sp);
1008	sbs = 0;
1009
1010	/* save user context */
1011	bzero(&sf, sizeof(struct sigframe));
1012	sf.sf_uc.uc_sigmask = *mask;
1013	sf.sf_uc.uc_stack = td->td_sigstk;
1014	sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
1015	    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1016
1017	/*
1018	 * Allocate and validate space for the signal handler
1019	 * context. Note that if the stack is in P0 space, the
1020	 * call to grow() is a nop, and the useracc() check
1021	 * will fail if the process has not already allocated
1022	 * the space with a `brk'.
1023	 */
1024	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
1025	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
1026		sbs = (u_int64_t)td->td_sigstk.ss_sp;
1027		sbs = (sbs + 15) & ~15;
1028		sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
1029#if defined(COMPAT_43)
1030		td->td_sigstk.ss_flags |= SS_ONSTACK;
1031#endif
1032	} else
1033		sfp = (struct sigframe *)sp;
1034	sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
1035
1036	/* Fill in the siginfo structure for POSIX handlers. */
1037	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
1038		sf.sf_si = ksi->ksi_info;
1039		sf.sf_si.si_signo = sig;
1040		/*
1041		 * XXX this shouldn't be here after code in trap.c
1042		 * is fixed
1043		 */
1044		sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
1045		code = (u_int64_t)&sfp->sf_si;
1046	}
1047
1048	mtx_unlock(&psp->ps_mtx);
1049	PROC_UNLOCK(p);
1050
1051	get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
1052
1053	/* Copy the frame out to userland. */
1054	if (copyout(&sf, sfp, sizeof(sf)) != 0) {
1055		/*
1056		 * Process has trashed its stack; give it an illegal
1057		 * instruction to halt it in its tracks.
1058		 */
1059		PROC_LOCK(p);
1060		sigexit(td, SIGILL);
1061		return;
1062	}
1063
1064	if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
1065		tf->tf_special.psr &= ~IA64_PSR_RI;
1066		tf->tf_special.iip = ia64_get_k5() +
1067		    ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
1068	} else
1069		tf->tf_special.iip = ia64_get_k5() +
1070		    ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
1071
1072	/*
1073	 * Setup the trapframe to return to the signal trampoline. We pass
1074	 * information to the trampoline in the following registers:
1075	 *
1076	 *	gp	new backing store or NULL
1077	 *	r8	signal number
1078	 *	r9	signal code or siginfo pointer
1079	 *	r10	signal handler (function descriptor)
1080	 */
1081	tf->tf_special.sp = (u_int64_t)sfp - 16;
1082	tf->tf_special.gp = sbs;
1083	tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
1084	tf->tf_special.ndirty = 0;
1085	tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
1086	tf->tf_scratch.gr8 = sig;
1087	tf->tf_scratch.gr9 = code;
1088	tf->tf_scratch.gr10 = (u_int64_t)catcher;
1089
1090	PROC_LOCK(p);
1091	mtx_lock(&psp->ps_mtx);
1092}
1093
1094/*
1095 * System call to cleanup state after a signal
1096 * has been taken.  Reset signal mask and
1097 * stack state from context left by sendsig (above).
1098 * Return to previous pc and psl as specified by
1099 * context left by sendsig. Check carefully to
1100 * make sure that the user has not modified the
1101 * state to gain improper privileges.
1102 *
1103 * MPSAFE
1104 */
1105int
1106sys_sigreturn(struct thread *td,
1107	struct sigreturn_args /* {
1108		ucontext_t *sigcntxp;
1109	} */ *uap)
1110{
1111	ucontext_t uc;
1112	struct trapframe *tf;
1113	struct pcb *pcb;
1114
1115	tf = td->td_frame;
1116	pcb = td->td_pcb;
1117
1118	/*
1119	 * Fetch the entire context structure at once for speed.
1120	 * We don't use a normal argument to simplify RSE handling.
1121	 */
1122	if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
1123		return (EFAULT);
1124
1125	set_mcontext(td, &uc.uc_mcontext);
1126
1127#if defined(COMPAT_43)
1128	if (sigonstack(tf->tf_special.sp))
1129		td->td_sigstk.ss_flags |= SS_ONSTACK;
1130	else
1131		td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1132#endif
1133	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
1134
1135	return (EJUSTRETURN);
1136}
1137
1138#ifdef COMPAT_FREEBSD4
1139int
1140freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1141{
1142
1143	return sys_sigreturn(td, (struct sigreturn_args *)uap);
1144}
1145#endif
1146
1147/*
1148 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1149 * we want to start a backtrace from the function that caused us to enter
1150 * the debugger. We have the context in the trapframe, but base the trace
1151 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1152 * enough for a backtrace.
1153 */
1154void
1155makectx(struct trapframe *tf, struct pcb *pcb)
1156{
1157
1158	pcb->pcb_special = tf->tf_special;
1159	pcb->pcb_special.__spare = ~0UL;	/* XXX see unwind.c */
1160	save_callee_saved(&pcb->pcb_preserved);
1161	save_callee_saved_fp(&pcb->pcb_preserved_fp);
1162}
1163
1164int
1165ia64_flush_dirty(struct thread *td, struct _special *r)
1166{
1167	struct iovec iov;
1168	struct uio uio;
1169	uint64_t bspst, kstk, rnat;
1170	int error, locked;
1171
1172	if (r->ndirty == 0)
1173		return (0);
1174
1175	kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
1176	if (td == curthread) {
1177		__asm __volatile("mov	ar.rsc=0;;");
1178		__asm __volatile("mov	%0=ar.bspstore" : "=r"(bspst));
1179		/* Make sure we have all the user registers written out. */
1180		if (bspst - kstk < r->ndirty) {
1181			__asm __volatile("flushrs;;");
1182			__asm __volatile("mov	%0=ar.bspstore" : "=r"(bspst));
1183		}
1184		__asm __volatile("mov	%0=ar.rnat;;" : "=r"(rnat));
1185		__asm __volatile("mov	ar.rsc=3");
1186		error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
1187		kstk += r->ndirty;
1188		r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
1189		    ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
1190	} else {
1191		locked = PROC_LOCKED(td->td_proc);
1192		if (!locked)
1193			PHOLD(td->td_proc);
1194		iov.iov_base = (void*)(uintptr_t)kstk;
1195		iov.iov_len = r->ndirty;
1196		uio.uio_iov = &iov;
1197		uio.uio_iovcnt = 1;
1198		uio.uio_offset = r->bspstore;
1199		uio.uio_resid = r->ndirty;
1200		uio.uio_segflg = UIO_SYSSPACE;
1201		uio.uio_rw = UIO_WRITE;
1202		uio.uio_td = td;
1203		error = proc_rwmem(td->td_proc, &uio);
1204		/*
1205		 * XXX proc_rwmem() doesn't currently return ENOSPC,
1206		 * so I think it can bogusly return 0. Neither do
1207		 * we allow short writes.
1208		 */
1209		if (uio.uio_resid != 0 && error == 0)
1210			error = ENOSPC;
1211		if (!locked)
1212			PRELE(td->td_proc);
1213	}
1214
1215	r->bspstore += r->ndirty;
1216	r->ndirty = 0;
1217	return (error);
1218}
1219
1220int
1221get_mcontext(struct thread *td, mcontext_t *mc, int flags)
1222{
1223	struct trapframe *tf;
1224	int error;
1225
1226	tf = td->td_frame;
1227	bzero(mc, sizeof(*mc));
1228	mc->mc_special = tf->tf_special;
1229	error = ia64_flush_dirty(td, &mc->mc_special);
1230	if (tf->tf_flags & FRAME_SYSCALL) {
1231		mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
1232		mc->mc_scratch = tf->tf_scratch;
1233		if (flags & GET_MC_CLEAR_RET) {
1234			mc->mc_scratch.gr8 = 0;
1235			mc->mc_scratch.gr9 = 0;
1236			mc->mc_scratch.gr10 = 0;
1237			mc->mc_scratch.gr11 = 0;
1238		}
1239	} else {
1240		mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
1241		mc->mc_scratch = tf->tf_scratch;
1242		mc->mc_scratch_fp = tf->tf_scratch_fp;
1243		/*
1244		 * XXX If the thread never used the high FP registers, we
1245		 * probably shouldn't waste time saving them.
1246		 */
1247		ia64_highfp_save(td);
1248		mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
1249		mc->mc_high_fp = td->td_pcb->pcb_high_fp;
1250	}
1251	save_callee_saved(&mc->mc_preserved);
1252	save_callee_saved_fp(&mc->mc_preserved_fp);
1253	return (error);
1254}
1255
1256int
1257set_mcontext(struct thread *td, const mcontext_t *mc)
1258{
1259	struct _special s;
1260	struct trapframe *tf;
1261	uint64_t psrmask;
1262
1263	tf = td->td_frame;
1264
1265	KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1266	    ("Whoa there! We have more than 8KB of dirty registers!"));
1267
1268	s = mc->mc_special;
1269	/*
1270	 * Only copy the user mask and the restart instruction bit from
1271	 * the new context.
1272	 */
1273	psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1274	    IA64_PSR_MFH | IA64_PSR_RI;
1275	s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
1276	/* We don't have any dirty registers of the new context. */
1277	s.ndirty = 0;
1278	if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
1279		/*
1280		 * We can get an async context passed to us while we
1281		 * entered the kernel through a syscall: sigreturn(2)
1282		 * takes contexts that could previously be the result of
1283		 * a trap or interrupt.
1284		 * Hence, we cannot assert that the trapframe is not
1285		 * a syscall frame, but we can assert that it's at
1286		 * least an expected syscall.
1287		 */
1288		if (tf->tf_flags & FRAME_SYSCALL) {
1289			KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn, ("foo"));
1290			tf->tf_flags &= ~FRAME_SYSCALL;
1291		}
1292		tf->tf_scratch = mc->mc_scratch;
1293		tf->tf_scratch_fp = mc->mc_scratch_fp;
1294		if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
1295			td->td_pcb->pcb_high_fp = mc->mc_high_fp;
1296	} else {
1297		KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
1298		if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
1299			s.cfm = tf->tf_special.cfm;
1300			s.iip = tf->tf_special.iip;
1301			tf->tf_scratch.gr15 = 0;	/* Clear syscall nr. */
1302		} else
1303			tf->tf_scratch = mc->mc_scratch;
1304	}
1305	tf->tf_special = s;
1306	restore_callee_saved(&mc->mc_preserved);
1307	restore_callee_saved_fp(&mc->mc_preserved_fp);
1308
1309	return (0);
1310}
1311
1312/*
1313 * Clear registers on exec.
1314 */
1315void
1316exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1317{
1318	struct trapframe *tf;
1319	uint64_t *ksttop, *kst;
1320
1321	tf = td->td_frame;
1322	ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
1323	    (tf->tf_special.bspstore & 0x1ffUL));
1324
1325	/*
1326	 * We can ignore up to 8KB of dirty registers by masking off the
1327	 * lower 13 bits in exception_restore() or epc_syscall(). This
1328	 * should be enough for a couple of years, but if there are more
1329	 * than 8KB of dirty registers, we lose track of the bottom of
1330	 * the kernel stack. The solution is to copy the active part of
1331	 * the kernel stack down 1 page (or 2, but not more than that)
1332	 * so that we always have less than 8KB of dirty registers.
1333	 */
1334	KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1335	    ("Whoa there! We have more than 8KB of dirty registers!"));
1336
1337	bzero(&tf->tf_special, sizeof(tf->tf_special));
1338	if ((tf->tf_flags & FRAME_SYSCALL) == 0) {	/* break syscalls. */
1339		bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1340		bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1341		tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1342		tf->tf_special.bspstore = IA64_BACKINGSTORE;
1343		/*
1344		 * Copy the arguments onto the kernel register stack so that
1345		 * they get loaded by the loadrs instruction. Skip over the
1346		 * NaT collection points.
1347		 */
1348		kst = ksttop - 1;
1349		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1350			*kst-- = 0;
1351		*kst-- = 0;
1352		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1353			*kst-- = 0;
1354		*kst-- = imgp->ps_strings;
1355		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1356			*kst-- = 0;
1357		*kst = stack;
1358		tf->tf_special.ndirty = (ksttop - kst) << 3;
1359	} else {				/* epc syscalls (default). */
1360		tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
1361		tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
1362		/*
1363		 * Write values for out0, out1 and out2 to the user's backing
1364		 * store and arrange for them to be restored into the user's
1365		 * initial register frame.
1366		 * Assumes that (bspstore & 0x1f8) < 0x1e0.
1367		 */
1368		suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1369		suword((caddr_t)tf->tf_special.bspstore - 16, imgp->ps_strings);
1370		suword((caddr_t)tf->tf_special.bspstore -  8, 0);
1371	}
1372
1373	tf->tf_special.iip = imgp->entry_addr;
1374	tf->tf_special.sp = (stack & ~15) - 16;
1375	tf->tf_special.rsc = 0xf;
1376	tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1377	tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1378	    IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1379	    IA64_PSR_CPL_USER;
1380}
1381
1382int
1383ptrace_set_pc(struct thread *td, unsigned long addr)
1384{
1385	uint64_t slot;
1386
1387	switch (addr & 0xFUL) {
1388	case 0:
1389		slot = IA64_PSR_RI_0;
1390		break;
1391	case 1:
1392		/* XXX we need to deal with MLX bundles here */
1393		slot = IA64_PSR_RI_1;
1394		break;
1395	case 2:
1396		slot = IA64_PSR_RI_2;
1397		break;
1398	default:
1399		return (EINVAL);
1400	}
1401
1402	td->td_frame->tf_special.iip = addr & ~0x0FULL;
1403	td->td_frame->tf_special.psr =
1404	    (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1405	return (0);
1406}
1407
1408int
1409ptrace_single_step(struct thread *td)
1410{
1411	struct trapframe *tf;
1412
1413	/*
1414	 * There's no way to set single stepping when we're leaving the
1415	 * kernel through the EPC syscall path. The way we solve this is
1416	 * by enabling the lower-privilege trap so that we re-enter the
1417	 * kernel as soon as the privilege level changes. See trap.c for
1418	 * how we proceed from there.
1419	 */
1420	tf = td->td_frame;
1421	if (tf->tf_flags & FRAME_SYSCALL)
1422		tf->tf_special.psr |= IA64_PSR_LP;
1423	else
1424		tf->tf_special.psr |= IA64_PSR_SS;
1425	return (0);
1426}
1427
1428int
1429ptrace_clear_single_step(struct thread *td)
1430{
1431	struct trapframe *tf;
1432
1433	/*
1434	 * Clear any and all status bits we may use to implement single
1435	 * stepping.
1436	 */
1437	tf = td->td_frame;
1438	tf->tf_special.psr &= ~IA64_PSR_SS;
1439	tf->tf_special.psr &= ~IA64_PSR_LP;
1440	tf->tf_special.psr &= ~IA64_PSR_TB;
1441	return (0);
1442}
1443
1444int
1445fill_regs(struct thread *td, struct reg *regs)
1446{
1447	struct trapframe *tf;
1448
1449	tf = td->td_frame;
1450	regs->r_special = tf->tf_special;
1451	regs->r_scratch = tf->tf_scratch;
1452	save_callee_saved(&regs->r_preserved);
1453	return (0);
1454}
1455
1456int
1457set_regs(struct thread *td, struct reg *regs)
1458{
1459	struct trapframe *tf;
1460	int error;
1461
1462	tf = td->td_frame;
1463	error = ia64_flush_dirty(td, &tf->tf_special);
1464	if (!error) {
1465		tf->tf_special = regs->r_special;
1466		tf->tf_special.bspstore += tf->tf_special.ndirty;
1467		tf->tf_special.ndirty = 0;
1468		tf->tf_scratch = regs->r_scratch;
1469		restore_callee_saved(&regs->r_preserved);
1470	}
1471	return (error);
1472}
1473
1474int
1475fill_dbregs(struct thread *td, struct dbreg *dbregs)
1476{
1477
1478	return (ENOSYS);
1479}
1480
1481int
1482set_dbregs(struct thread *td, struct dbreg *dbregs)
1483{
1484
1485	return (ENOSYS);
1486}
1487
1488int
1489fill_fpregs(struct thread *td, struct fpreg *fpregs)
1490{
1491	struct trapframe *frame = td->td_frame;
1492	struct pcb *pcb = td->td_pcb;
1493
1494	/* Save the high FP registers. */
1495	ia64_highfp_save(td);
1496
1497	fpregs->fpr_scratch = frame->tf_scratch_fp;
1498	save_callee_saved_fp(&fpregs->fpr_preserved);
1499	fpregs->fpr_high = pcb->pcb_high_fp;
1500	return (0);
1501}
1502
1503int
1504set_fpregs(struct thread *td, struct fpreg *fpregs)
1505{
1506	struct trapframe *frame = td->td_frame;
1507	struct pcb *pcb = td->td_pcb;
1508
1509	/* Throw away the high FP registers (should be redundant). */
1510	ia64_highfp_drop(td);
1511
1512	frame->tf_scratch_fp = fpregs->fpr_scratch;
1513	restore_callee_saved_fp(&fpregs->fpr_preserved);
1514	pcb->pcb_high_fp = fpregs->fpr_high;
1515	return (0);
1516}
1517
1518void
1519ia64_sync_icache(vm_offset_t va, vm_offset_t sz)
1520{
1521	vm_offset_t lim;
1522
1523	if (!ia64_sync_icache_needed)
1524		return;
1525
1526	lim = va + sz;
1527	while (va < lim) {
1528		ia64_fc_i(va);
1529		va += 32;	/* XXX */
1530	}
1531
1532	ia64_sync_i();
1533	ia64_srlz_i();
1534}
1535