1/*-
2 * Initial implementation:
3 * Copyright (c) 2001 Robert Drehmel
4 * All rights reserved.
5 *
6 * As long as the above copyright statement and this notice remain
7 * unchanged, you can do what ever you want with this file.
8 */
9/*-
10 * Copyright (c) 2008 - 2012 Marius Strobl <marius@FreeBSD.org>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/*
39 * FreeBSD/sparc64 kernel loader - machine dependent part
40 *
41 *  - implements copyin and readin functions that map kernel
42 *    pages on demand.  The machine independent code does not
43 *    know the size of the kernel early enough to pre-enter
44 *    TTEs and install just one 4MB mapping seemed to limiting
45 *    to me.
46 */
47
48#include <stand.h>
49#include <sys/param.h>
50#include <sys/exec.h>
51#include <sys/linker.h>
52#include <sys/queue.h>
53#include <sys/types.h>
54#ifdef LOADER_ZFS_SUPPORT
55#include <sys/vtoc.h>
56#include "../zfs/libzfs.h"
57#endif
58
59#include <vm/vm.h>
60#include <machine/asi.h>
61#include <machine/cmt.h>
62#include <machine/cpufunc.h>
63#include <machine/elf.h>
64#include <machine/fireplane.h>
65#include <machine/jbus.h>
66#include <machine/lsu.h>
67#include <machine/metadata.h>
68#include <machine/tte.h>
69#include <machine/tlb.h>
70#include <machine/upa.h>
71#include <machine/ver.h>
72#include <machine/vmparam.h>
73
74#include "bootstrap.h"
75#include "libofw.h"
76#include "dev_net.h"
77
78extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
79
80enum {
81	HEAPVA		= 0x800000,
82	HEAPSZ		= 0x1000000,
83	LOADSZ		= 0x1000000	/* for kernel and modules */
84};
85
86/* At least Sun Fire V1280 require page sized allocations to be claimed. */
87CTASSERT(HEAPSZ % PAGE_SIZE == 0);
88
89static struct mmu_ops {
90	void (*tlb_init)(void);
91	int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
92} *mmu_ops;
93
94typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
95    void *openfirmware);
96
97static inline u_long dtlb_get_data_sun4u(u_int, u_int);
98static int dtlb_enter_sun4u(u_int, u_long data, vm_offset_t);
99static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
100static inline u_long itlb_get_data_sun4u(u_int, u_int);
101static int itlb_enter_sun4u(u_int, u_long data, vm_offset_t);
102static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
103static void itlb_relocate_locked0_sun4u(void);
104extern vm_offset_t md_load(char *, vm_offset_t *);
105static int sparc64_autoload(void);
106static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
107static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
108static vm_offset_t claim_virt(vm_offset_t, size_t, int);
109static vm_offset_t alloc_phys(size_t, int);
110static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
111static void release_phys(vm_offset_t, u_int);
112static int __elfN(exec)(struct preloaded_file *);
113static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
114static vm_offset_t init_heap(void);
115static phandle_t find_bsp_sun4u(phandle_t, uint32_t);
116const char *cpu_cpuid_prop_sun4u(void);
117uint32_t cpu_get_mid_sun4u(void);
118static void tlb_init_sun4u(void);
119
120#ifdef LOADER_DEBUG
121typedef u_int64_t tte_t;
122
123static void pmap_print_tlb_sun4u(void);
124static void pmap_print_tte_sun4u(tte_t, tte_t);
125#endif
126
127static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
128
129/* sun4u */
130struct tlb_entry *dtlb_store;
131struct tlb_entry *itlb_store;
132u_int dtlb_slot;
133u_int itlb_slot;
134static int cpu_impl;
135static u_int dtlb_slot_max;
136static u_int itlb_slot_max;
137static u_int tlb_locked;
138
139static vm_offset_t curkva = 0;
140static vm_offset_t heapva;
141
142static char bootpath[64];
143static phandle_t root;
144
145#ifdef LOADER_ZFS_SUPPORT
146static struct zfs_devdesc zfs_currdev;
147#endif
148
149/*
150 * Machine dependent structures that the machine independent
151 * loader part uses.
152 */
153struct devsw *devsw[] = {
154#ifdef LOADER_DISK_SUPPORT
155	&ofwdisk,
156#endif
157#ifdef LOADER_NET_SUPPORT
158	&netdev,
159#endif
160#ifdef LOADER_ZFS_SUPPORT
161	&zfs_dev,
162#endif
163	NULL
164};
165
166struct arch_switch archsw;
167
168static struct file_format sparc64_elf = {
169	__elfN(loadfile),
170	__elfN(exec)
171};
172
173struct file_format *file_formats[] = {
174	&sparc64_elf,
175	NULL
176};
177
178struct fs_ops *file_system[] = {
179#ifdef LOADER_ZFS_SUPPORT
180	&zfs_fsops,
181#endif
182#ifdef LOADER_UFS_SUPPORT
183	&ufs_fsops,
184#endif
185#ifdef LOADER_CD9660_SUPPORT
186	&cd9660_fsops,
187#endif
188#ifdef LOADER_ZIP_SUPPORT
189	&zipfs_fsops,
190#endif
191#ifdef LOADER_GZIP_SUPPORT
192	&gzipfs_fsops,
193#endif
194#ifdef LOADER_BZIP2_SUPPORT
195	&bzipfs_fsops,
196#endif
197#ifdef LOADER_NFS_SUPPORT
198	&nfs_fsops,
199#endif
200#ifdef LOADER_TFTP_SUPPORT
201	&tftp_fsops,
202#endif
203	NULL
204};
205
206struct netif_driver *netif_drivers[] = {
207#ifdef LOADER_NET_SUPPORT
208	&ofwnet,
209#endif
210	NULL
211};
212
213extern struct console ofwconsole;
214struct console *consoles[] = {
215	&ofwconsole,
216	NULL
217};
218
219#ifdef LOADER_DEBUG
220static int
221watch_phys_set_mask(vm_offset_t pa, u_long mask)
222{
223	u_long lsucr;
224
225	stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
226	lsucr = ldxa(0, ASI_LSU_CTL_REG);
227	lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
228	    (mask << LSU_PM_SHIFT);
229	stxa(0, ASI_LSU_CTL_REG, lsucr);
230	return (0);
231}
232
233static int
234watch_phys_set(vm_offset_t pa, int sz)
235{
236	u_long off;
237
238	off = (u_long)pa & 7;
239	/* Test for misaligned watch points. */
240	if (off + sz > 8)
241		return (-1);
242	return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
243}
244
245
246static int
247watch_virt_set_mask(vm_offset_t va, u_long mask)
248{
249	u_long lsucr;
250
251	stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
252	lsucr = ldxa(0, ASI_LSU_CTL_REG);
253	lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
254	    (mask << LSU_VM_SHIFT);
255	stxa(0, ASI_LSU_CTL_REG, lsucr);
256	return (0);
257}
258
259static int
260watch_virt_set(vm_offset_t va, int sz)
261{
262	u_long off;
263
264	off = (u_long)va & 7;
265	/* Test for misaligned watch points. */
266	if (off + sz > 8)
267		return (-1);
268	return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
269}
270#endif
271
272/*
273 * archsw functions
274 */
275static int
276sparc64_autoload(void)
277{
278
279	return (0);
280}
281
282static ssize_t
283sparc64_readin(const int fd, vm_offset_t va, const size_t len)
284{
285
286	mmu_ops->mmu_mapin(va, len);
287	return (read(fd, (void *)va, len));
288}
289
290static ssize_t
291sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
292{
293
294	mmu_ops->mmu_mapin(dest, len);
295	memcpy((void *)dest, src, len);
296	return (len);
297}
298
299/*
300 * other MD functions
301 */
302static vm_offset_t
303claim_virt(vm_offset_t virt, size_t size, int align)
304{
305	vm_offset_t mva;
306
307	if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
308		return ((vm_offset_t)-1);
309	return (mva);
310}
311
312static vm_offset_t
313alloc_phys(size_t size, int align)
314{
315	cell_t phys_hi, phys_low;
316
317	if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
318	    &phys_hi) == -1)
319		return ((vm_offset_t)-1);
320	return ((vm_offset_t)phys_hi << 32 | phys_low);
321}
322
323static int
324map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
325{
326
327	return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
328	    (uint32_t)(phys >> 32), virt, size, mode));
329}
330
331static void
332release_phys(vm_offset_t phys, u_int size)
333{
334
335	(void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
336	    (uint32_t)(phys >> 32), size);
337}
338
339static int
340__elfN(exec)(struct preloaded_file *fp)
341{
342	struct file_metadata *fmp;
343	vm_offset_t mdp;
344	Elf_Addr entry;
345	Elf_Ehdr *e;
346	int error;
347
348	if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
349		return (EFTYPE);
350	e = (Elf_Ehdr *)&fmp->md_data;
351
352	if ((error = md_load(fp->f_args, &mdp)) != 0)
353		return (error);
354
355	printf("jumping to kernel entry at %#lx.\n", e->e_entry);
356#ifdef LOADER_DEBUG
357	pmap_print_tlb_sun4u();
358#endif
359
360	dev_cleanup();
361
362	entry = e->e_entry;
363
364	OF_release((void *)heapva, HEAPSZ);
365
366	((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
367
368	panic("%s: exec returned", __func__);
369}
370
371static inline u_long
372dtlb_get_data_sun4u(u_int tlb, u_int slot)
373{
374	u_long data, pstate;
375
376	slot = TLB_DAR_SLOT(tlb, slot);
377	/*
378	 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
379	 * work around errata of USIII and beyond.
380	 */
381	pstate = rdpr(pstate);
382	wrpr(pstate, pstate & ~PSTATE_IE, 0);
383	(void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
384	data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
385	wrpr(pstate, pstate, 0);
386	return (data);
387}
388
389static inline u_long
390itlb_get_data_sun4u(u_int tlb, u_int slot)
391{
392	u_long data, pstate;
393
394	slot = TLB_DAR_SLOT(tlb, slot);
395	/*
396	 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
397	 * work around errata of USIII and beyond.
398	 */
399	pstate = rdpr(pstate);
400	wrpr(pstate, pstate & ~PSTATE_IE, 0);
401	(void)ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
402	data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
403	wrpr(pstate, pstate, 0);
404	return (data);
405}
406
407static vm_offset_t
408dtlb_va_to_pa_sun4u(vm_offset_t va)
409{
410	u_long pstate, reg;
411	u_int i, tlb;
412
413	pstate = rdpr(pstate);
414	wrpr(pstate, pstate & ~PSTATE_IE, 0);
415	for (i = 0; i < dtlb_slot_max; i++) {
416		reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
417		    ASI_DTLB_TAG_READ_REG);
418		if (TLB_TAR_VA(reg) != va)
419			continue;
420		reg = dtlb_get_data_sun4u(tlb_locked, i);
421		wrpr(pstate, pstate, 0);
422		reg >>= TD_PA_SHIFT;
423		if (cpu_impl == CPU_IMPL_SPARC64V ||
424		    cpu_impl >= CPU_IMPL_ULTRASPARCIII)
425			return (reg & TD_PA_CH_MASK);
426		return (reg & TD_PA_SF_MASK);
427	}
428	wrpr(pstate, pstate, 0);
429	return (-1);
430}
431
432static vm_offset_t
433itlb_va_to_pa_sun4u(vm_offset_t va)
434{
435	u_long pstate, reg;
436	int i;
437
438	pstate = rdpr(pstate);
439	wrpr(pstate, pstate & ~PSTATE_IE, 0);
440	for (i = 0; i < itlb_slot_max; i++) {
441		reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
442		    ASI_ITLB_TAG_READ_REG);
443		if (TLB_TAR_VA(reg) != va)
444			continue;
445		reg = itlb_get_data_sun4u(tlb_locked, i);
446		wrpr(pstate, pstate, 0);
447		reg >>= TD_PA_SHIFT;
448		if (cpu_impl == CPU_IMPL_SPARC64V ||
449		    cpu_impl >= CPU_IMPL_ULTRASPARCIII)
450			return (reg & TD_PA_CH_MASK);
451		return (reg & TD_PA_SF_MASK);
452	}
453	wrpr(pstate, pstate, 0);
454	return (-1);
455}
456
457static int
458dtlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
459{
460
461	return (OF_call_method("SUNW,dtlb-load", mmu, 3, 0, index, data,
462	    virt));
463}
464
465static int
466itlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
467{
468
469	if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp && index == 0 &&
470	    (data & TD_L) != 0)
471		panic("%s: won't enter locked TLB entry at index 0 on USIII+",
472		    __func__);
473	return (OF_call_method("SUNW,itlb-load", mmu, 3, 0, index, data,
474	    virt));
475}
476
477static void
478itlb_relocate_locked0_sun4u(void)
479{
480	u_long data, pstate, tag;
481	int i;
482
483	if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp)
484		return;
485
486	pstate = rdpr(pstate);
487	wrpr(pstate, pstate & ~PSTATE_IE, 0);
488
489	data = itlb_get_data_sun4u(tlb_locked, 0);
490	if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) {
491		wrpr(pstate, pstate, 0);
492		return;
493	}
494
495	/* Flush the mapping of slot 0. */
496	tag = ldxa(TLB_DAR_SLOT(tlb_locked, 0), ASI_ITLB_TAG_READ_REG);
497	stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY |
498	    TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
499	flush(0);	/* The USIII-family ignores the address. */
500
501	/*
502	 * Search a replacement slot != 0 and enter the data and tag
503	 * that formerly were in slot 0.
504	 */
505	for (i = 1; i < itlb_slot_max; i++) {
506		if ((itlb_get_data_sun4u(tlb_locked, i) & TD_V) != 0)
507			continue;
508
509		stxa(AA_IMMU_TAR, ASI_IMMU, tag);
510		stxa(TLB_DAR_SLOT(tlb_locked, i), ASI_ITLB_DATA_ACCESS_REG,
511		    data);
512		flush(0);	/* The USIII-family ignores the address. */
513		break;
514	}
515	wrpr(pstate, pstate, 0);
516	if (i == itlb_slot_max)
517		panic("%s: could not find a replacement slot", __func__);
518}
519
520static int
521mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
522{
523	vm_offset_t pa, mva;
524	u_long data;
525	u_int index;
526
527	if (va + len > curkva)
528		curkva = va + len;
529
530	pa = (vm_offset_t)-1;
531	len += va & PAGE_MASK_4M;
532	va &= ~PAGE_MASK_4M;
533	while (len) {
534		if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
535		    itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
536			/* Allocate a physical page, claim the virtual area. */
537			if (pa == (vm_offset_t)-1) {
538				pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
539				if (pa == (vm_offset_t)-1)
540					panic("%s: out of memory", __func__);
541				mva = claim_virt(va, PAGE_SIZE_4M, 0);
542				if (mva != va)
543					panic("%s: can't claim virtual page "
544					    "(wanted %#lx, got %#lx)",
545					    __func__, va, mva);
546				/*
547				 * The mappings may have changed, be paranoid.
548				 */
549				continue;
550			}
551			/*
552			 * Actually, we can only allocate two pages less at
553			 * most (depending on the kernel TSB size).
554			 */
555			if (dtlb_slot >= dtlb_slot_max)
556				panic("%s: out of dtlb_slots", __func__);
557			if (itlb_slot >= itlb_slot_max)
558				panic("%s: out of itlb_slots", __func__);
559			data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
560			    TD_CV | TD_P | TD_W;
561			dtlb_store[dtlb_slot].te_pa = pa;
562			dtlb_store[dtlb_slot].te_va = va;
563			index = dtlb_slot_max - dtlb_slot - 1;
564			if (dtlb_enter_sun4u(index, data, va) < 0)
565				panic("%s: can't enter dTLB slot %d data "
566				    "%#lx va %#lx", __func__, index, data,
567				    va);
568			dtlb_slot++;
569			itlb_store[itlb_slot].te_pa = pa;
570			itlb_store[itlb_slot].te_va = va;
571			index = itlb_slot_max - itlb_slot - 1;
572			if (itlb_enter_sun4u(index, data, va) < 0)
573				panic("%s: can't enter iTLB slot %d data "
574				    "%#lx va %#lxd", __func__, index, data,
575				    va);
576			itlb_slot++;
577			pa = (vm_offset_t)-1;
578		}
579		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
580		va += PAGE_SIZE_4M;
581	}
582	if (pa != (vm_offset_t)-1)
583		release_phys(pa, PAGE_SIZE_4M);
584	return (0);
585}
586
587static vm_offset_t
588init_heap(void)
589{
590
591	/* There is no need for continuous physical heap memory. */
592	heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
593	return (heapva);
594}
595
596static phandle_t
597find_bsp_sun4u(phandle_t node, uint32_t bspid)
598{
599	char type[sizeof("cpu")];
600	phandle_t child;
601	uint32_t cpuid;
602
603	for (; node > 0; node = OF_peer(node)) {
604		child = OF_child(node);
605		if (child > 0) {
606			child = find_bsp_sun4u(child, bspid);
607			if (child > 0)
608				return (child);
609		} else {
610			if (OF_getprop(node, "device_type", type,
611			    sizeof(type)) <= 0)
612				continue;
613			if (strcmp(type, "cpu") != 0)
614				continue;
615			if (OF_getprop(node, cpu_cpuid_prop_sun4u(), &cpuid,
616			    sizeof(cpuid)) <= 0)
617				continue;
618			if (cpuid == bspid)
619				return (node);
620		}
621	}
622	return (0);
623}
624
625const char *
626cpu_cpuid_prop_sun4u(void)
627{
628
629	switch (cpu_impl) {
630	case CPU_IMPL_SPARC64:
631	case CPU_IMPL_SPARC64V:
632	case CPU_IMPL_ULTRASPARCI:
633	case CPU_IMPL_ULTRASPARCII:
634	case CPU_IMPL_ULTRASPARCIIi:
635	case CPU_IMPL_ULTRASPARCIIe:
636		return ("upa-portid");
637	case CPU_IMPL_ULTRASPARCIII:
638	case CPU_IMPL_ULTRASPARCIIIp:
639	case CPU_IMPL_ULTRASPARCIIIi:
640	case CPU_IMPL_ULTRASPARCIIIip:
641		return ("portid");
642	case CPU_IMPL_ULTRASPARCIV:
643	case CPU_IMPL_ULTRASPARCIVp:
644		return ("cpuid");
645	default:
646		return ("");
647	}
648}
649
650uint32_t
651cpu_get_mid_sun4u(void)
652{
653
654	switch (cpu_impl) {
655	case CPU_IMPL_SPARC64:
656	case CPU_IMPL_SPARC64V:
657	case CPU_IMPL_ULTRASPARCI:
658	case CPU_IMPL_ULTRASPARCII:
659	case CPU_IMPL_ULTRASPARCIIi:
660	case CPU_IMPL_ULTRASPARCIIe:
661		return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG)));
662	case CPU_IMPL_ULTRASPARCIII:
663	case CPU_IMPL_ULTRASPARCIIIp:
664		return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG,
665		    ASI_FIREPLANE_CONFIG_REG)));
666	case CPU_IMPL_ULTRASPARCIIIi:
667	case CPU_IMPL_ULTRASPARCIIIip:
668		return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG)));
669	case CPU_IMPL_ULTRASPARCIV:
670	case CPU_IMPL_ULTRASPARCIVp:
671		return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID)));
672	default:
673		return (0);
674	}
675}
676
677static void
678tlb_init_sun4u(void)
679{
680	phandle_t bsp;
681
682	cpu_impl = VER_IMPL(rdpr(ver));
683	switch (cpu_impl) {
684	case CPU_IMPL_SPARC64:
685	case CPU_IMPL_ULTRASPARCI:
686	case CPU_IMPL_ULTRASPARCII:
687	case CPU_IMPL_ULTRASPARCIIi:
688	case CPU_IMPL_ULTRASPARCIIe:
689		tlb_locked = TLB_DAR_T32;
690		break;
691	case CPU_IMPL_ULTRASPARCIII:
692	case CPU_IMPL_ULTRASPARCIIIp:
693	case CPU_IMPL_ULTRASPARCIIIi:
694	case CPU_IMPL_ULTRASPARCIIIip:
695	case CPU_IMPL_ULTRASPARCIV:
696	case CPU_IMPL_ULTRASPARCIVp:
697		tlb_locked = TLB_DAR_T16;
698		break;
699	case CPU_IMPL_SPARC64V:
700		tlb_locked = TLB_DAR_FTLB;
701		break;
702	}
703	bsp = find_bsp_sun4u(OF_child(root), cpu_get_mid_sun4u());
704	if (bsp == 0)
705		panic("%s: no node for bootcpu?!?!", __func__);
706
707	if (OF_getprop(bsp, "#dtlb-entries", &dtlb_slot_max,
708	    sizeof(dtlb_slot_max)) == -1 ||
709	    OF_getprop(bsp, "#itlb-entries", &itlb_slot_max,
710	    sizeof(itlb_slot_max)) == -1)
711		panic("%s: can't get TLB slot max.", __func__);
712
713	if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
714#ifdef LOADER_DEBUG
715		printf("pre fixup:\n");
716		pmap_print_tlb_sun4u();
717#endif
718
719		/*
720		 * Relocate the locked entry in it16 slot 0 (if existent)
721		 * as part of working around Cheetah+ erratum 34.
722		 */
723		itlb_relocate_locked0_sun4u();
724
725#ifdef LOADER_DEBUG
726		printf("post fixup:\n");
727		pmap_print_tlb_sun4u();
728#endif
729	}
730
731	dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
732	itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
733	if (dtlb_store == NULL || itlb_store == NULL)
734		panic("%s: can't allocate TLB store", __func__);
735}
736
737#ifdef LOADER_ZFS_SUPPORT
738static void
739sparc64_zfs_probe(void)
740{
741	struct vtoc8 vtoc;
742	char alias[64], devname[sizeof(alias) + sizeof(":x") - 1];
743	char type[sizeof("device_type")];
744	char *bdev, *dev, *odev;
745	uint64_t guid;
746	int fd, len, part;
747	phandle_t aliases, options;
748
749	/* Get the GUID of the ZFS pool on the boot device. */
750	guid = 0;
751	zfs_probe_dev(bootpath, &guid);
752
753	/*
754	 * Get the GUIDs of the ZFS pools on any additional disks listed in
755	 * the boot-device environment variable.
756	 */
757	if ((aliases = OF_finddevice("/aliases")) == -1)
758		goto out;
759	options = OF_finddevice("/options");
760	len = OF_getproplen(options, "boot-device");
761	if (len <= 0)
762		goto out;
763	bdev = odev = malloc(len + 1);
764	if (bdev == NULL)
765		goto out;
766	if (OF_getprop(options, "boot-device", bdev, len) <= 0)
767		goto out;
768	bdev[len] = '\0';
769	while ((dev = strsep(&bdev, " ")) != NULL) {
770		if (*dev == '\0')
771			continue;
772		strcpy(alias, dev);
773		(void)OF_getprop(aliases, dev, alias, sizeof(alias));
774		/*
775		 * Don't probe the boot disk twice.  Note that bootpath
776		 * includes the partition specifier.
777		 */
778		if (strncmp(alias, bootpath, strlen(alias)) == 0)
779			continue;
780		if (OF_getprop(OF_finddevice(alias), "device_type", type,
781		    sizeof(type)) == -1)
782			continue;
783		if (strcmp(type, "block") != 0)
784			continue;
785
786		/* Find freebsd-zfs slices in the VTOC. */
787		fd = open(alias, O_RDONLY);
788		if (fd == -1)
789			continue;
790		lseek(fd, 0, SEEK_SET);
791		if (read(fd, &vtoc, sizeof(vtoc)) != sizeof(vtoc)) {
792			close(fd);
793			continue;
794		}
795		close(fd);
796
797		for (part = 0; part < 8; part++) {
798			if (part == 2 || vtoc.part[part].tag !=
799			    VTOC_TAG_FREEBSD_ZFS)
800				continue;
801			(void)sprintf(devname, "%s:%c", alias, part + 'a');
802			if (zfs_probe_dev(devname, NULL) == ENXIO)
803				break;
804		}
805	}
806	free(odev);
807
808 out:
809	if (guid != 0) {
810		zfs_currdev.pool_guid = guid;
811		zfs_currdev.root_guid = 0;
812		zfs_currdev.d_dev = &zfs_dev;
813		zfs_currdev.d_type = zfs_currdev.d_dev->dv_type;
814	}
815}
816#endif /* LOADER_ZFS_SUPPORT */
817
818int
819main(int (*openfirm)(void *))
820{
821	char compatible[32];
822	struct devsw **dp;
823
824	/*
825	 * Tell the Open Firmware functions where they find the OFW gate.
826	 */
827	OF_init(openfirm);
828
829	archsw.arch_getdev = ofw_getdev;
830	archsw.arch_copyin = sparc64_copyin;
831	archsw.arch_copyout = ofw_copyout;
832	archsw.arch_readin = sparc64_readin;
833	archsw.arch_autoload = sparc64_autoload;
834#ifdef LOADER_ZFS_SUPPORT
835	archsw.arch_zfs_probe = sparc64_zfs_probe;
836#endif
837
838	if (init_heap() == (vm_offset_t)-1)
839		OF_exit();
840	setheap((void *)heapva, (void *)(heapva + HEAPSZ));
841
842	/*
843	 * Probe for a console.
844	 */
845	cons_probe();
846
847	if ((root = OF_peer(0)) == -1)
848		panic("%s: can't get root phandle", __func__);
849	OF_getprop(root, "compatible", compatible, sizeof(compatible));
850	mmu_ops = &mmu_ops_sun4u;
851
852	mmu_ops->tlb_init();
853
854	/*
855	 * Set up the current device.
856	 */
857	OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
858
859	/*
860	 * Initialize devices.
861	 */
862	for (dp = devsw; *dp != 0; dp++)
863		if ((*dp)->dv_init != 0)
864			(*dp)->dv_init();
865
866#ifdef LOADER_ZFS_SUPPORT
867	if (zfs_currdev.pool_guid != 0) {
868		(void)strncpy(bootpath, zfs_fmtdev(&zfs_currdev),
869		    sizeof(bootpath) - 1);
870		bootpath[sizeof(bootpath) - 1] = '\0';
871	} else
872#endif
873
874	/*
875	 * Sun compatible bootable CD-ROMs have a disk label placed before
876	 * the ISO 9660 data, with the actual file system being in the first
877	 * partition, while the other partitions contain pseudo disk labels
878	 * with embedded boot blocks for different architectures, which may
879	 * be followed by UFS file systems.
880	 * The firmware will set the boot path to the partition it boots from
881	 * ('f' in the sun4u/sun4v case), but we want the kernel to be loaded
882	 * from the ISO 9660 file system ('a'), so the boot path needs to be
883	 * altered.
884	 */
885	if (bootpath[strlen(bootpath) - 2] == ':' &&
886	    bootpath[strlen(bootpath) - 1] == 'f')
887		bootpath[strlen(bootpath) - 1] = 'a';
888
889	env_setenv("currdev", EV_VOLATILE, bootpath,
890	    ofw_setcurrdev, env_nounset);
891	env_setenv("loaddev", EV_VOLATILE, bootpath,
892	    env_noset, env_nounset);
893
894	printf("\n");
895	printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
896	printf("(%s, %s)\n", bootprog_maker, bootprog_date);
897	printf("bootpath=\"%s\"\n", bootpath);
898
899	/* Give control to the machine independent loader code. */
900	interact();
901	return (1);
902}
903
904COMMAND_SET(heap, "heap", "show heap usage", command_heap);
905
906static int
907command_heap(int argc, char *argv[])
908{
909
910	mallocstats();
911	printf("heap base at %p, top at %p, upper limit at %p\n", heapva,
912	    sbrk(0), heapva + HEAPSZ);
913	return(CMD_OK);
914}
915
916COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
917
918static int
919command_reboot(int argc, char *argv[])
920{
921	int i;
922
923	for (i = 0; devsw[i] != NULL; ++i)
924		if (devsw[i]->dv_cleanup != NULL)
925			(devsw[i]->dv_cleanup)();
926
927	printf("Rebooting...\n");
928	OF_exit();
929}
930
931/* provide this for panic, as it's not in the startup code */
932void
933exit(int code)
934{
935
936	OF_exit();
937}
938
939#ifdef LOADER_DEBUG
940static const char *const page_sizes[] = {
941	"  8k", " 64k", "512k", "  4m"
942};
943
944static void
945pmap_print_tte_sun4u(tte_t tag, tte_t tte)
946{
947
948	printf("%s %s ",
949	    page_sizes[(tte >> TD_SIZE_SHIFT) & TD_SIZE_MASK],
950	    tag & TD_G ? "G" : " ");
951	printf(tte & TD_W ? "W " : "  ");
952	printf(tte & TD_P ? "\e[33mP\e[0m " : "  ");
953	printf(tte & TD_E ? "E " : "  ");
954	printf(tte & TD_CV ? "CV " : "   ");
955	printf(tte & TD_CP ? "CP " : "   ");
956	printf(tte & TD_L ? "\e[32mL\e[0m " : "  ");
957	printf(tte & TD_IE ? "IE " : "   ");
958	printf(tte & TD_NFO ? "NFO " : "    ");
959	printf("pa=0x%lx va=0x%lx ctx=%ld\n",
960	    TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
961}
962
963static void
964pmap_print_tlb_sun4u(void)
965{
966	tte_t tag, tte;
967	u_long pstate;
968	int i;
969
970	pstate = rdpr(pstate);
971	for (i = 0; i < itlb_slot_max; i++) {
972		wrpr(pstate, pstate & ~PSTATE_IE, 0);
973		tte = itlb_get_data_sun4u(tlb_locked, i);
974		wrpr(pstate, pstate, 0);
975		if (!(tte & TD_V))
976			continue;
977		tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
978		    ASI_ITLB_TAG_READ_REG);
979		printf("iTLB-%2u: ", i);
980		pmap_print_tte_sun4u(tag, tte);
981	}
982	for (i = 0; i < dtlb_slot_max; i++) {
983		wrpr(pstate, pstate & ~PSTATE_IE, 0);
984		tte = dtlb_get_data_sun4u(tlb_locked, i);
985		wrpr(pstate, pstate, 0);
986		if (!(tte & TD_V))
987			continue;
988		tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
989		    ASI_DTLB_TAG_READ_REG);
990		printf("dTLB-%2u: ", i);
991		pmap_print_tte_sun4u(tag, tte);
992	}
993}
994#endif
995