main.c revision 235364
1/*-
2 * Initial implementation:
3 * Copyright (c) 2001 Robert Drehmel
4 * All rights reserved.
5 *
6 * As long as the above copyright statement and this notice remain
7 * unchanged, you can do what ever you want with this file.
8 */
9/*-
10 * Copyright (c) 2008 Marius Strobl <marius@FreeBSD.org>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/boot/sparc64/loader/main.c 235364 2012-05-12 20:27:33Z avg $");
37
38/*
39 * FreeBSD/sparc64 kernel loader - machine dependent part
40 *
41 *  - implements copyin and readin functions that map kernel
42 *    pages on demand.  The machine independent code does not
43 *    know the size of the kernel early enough to pre-enter
44 *    TTEs and install just one 4MB mapping seemed to limiting
45 *    to me.
46 */
47
48#include <stand.h>
49#include <sys/param.h>
50#include <sys/exec.h>
51#include <sys/linker.h>
52#include <sys/queue.h>
53#include <sys/types.h>
54#ifdef LOADER_ZFS_SUPPORT
55#include <sys/vtoc.h>
56#include "../zfs/libzfs.h"
57#endif
58
59#include <vm/vm.h>
60#include <machine/asi.h>
61#include <machine/cmt.h>
62#include <machine/cpufunc.h>
63#include <machine/elf.h>
64#include <machine/fireplane.h>
65#include <machine/jbus.h>
66#include <machine/lsu.h>
67#include <machine/metadata.h>
68#include <machine/tte.h>
69#include <machine/tlb.h>
70#include <machine/upa.h>
71#include <machine/ver.h>
72#include <machine/vmparam.h>
73
74#include "bootstrap.h"
75#include "libofw.h"
76#include "dev_net.h"
77
78#define	MAXDEV	31
79
80extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
81
82enum {
83	HEAPVA		= 0x800000,
84	HEAPSZ		= 0x1000000,
85	LOADSZ		= 0x1000000	/* for kernel and modules */
86};
87
88/* At least Sun Fire V1280 require page sized allocations to be claimed. */
89CTASSERT(HEAPSZ % PAGE_SIZE == 0);
90
91static struct mmu_ops {
92	void (*tlb_init)(void);
93	int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
94} *mmu_ops;
95
96typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
97    void *openfirmware);
98
99static inline u_long dtlb_get_data_sun4u(u_int, u_int);
100static int dtlb_enter_sun4u(u_int, u_long data, vm_offset_t);
101static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
102static inline u_long itlb_get_data_sun4u(u_int, u_int);
103static int itlb_enter_sun4u(u_int, u_long data, vm_offset_t);
104static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
105static void itlb_relocate_locked0_sun4u(void);
106extern vm_offset_t md_load(char *, vm_offset_t *);
107static int sparc64_autoload(void);
108static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
109static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
110static vm_offset_t claim_virt(vm_offset_t, size_t, int);
111static vm_offset_t alloc_phys(size_t, int);
112static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
113static void release_phys(vm_offset_t, u_int);
114static int __elfN(exec)(struct preloaded_file *);
115static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
116static vm_offset_t init_heap(void);
117static phandle_t find_bsp_sun4u(phandle_t, uint32_t);
118const char *cpu_cpuid_prop_sun4u(void);
119uint32_t cpu_get_mid_sun4u(void);
120static void tlb_init_sun4u(void);
121
122#ifdef LOADER_DEBUG
123typedef u_int64_t tte_t;
124
125static void pmap_print_tlb_sun4u(void);
126static void pmap_print_tte_sun4u(tte_t, tte_t);
127#endif
128
129static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
130
131/* sun4u */
132struct tlb_entry *dtlb_store;
133struct tlb_entry *itlb_store;
134u_int dtlb_slot;
135u_int itlb_slot;
136static int cpu_impl;
137static u_int dtlb_slot_max;
138static u_int itlb_slot_max;
139static u_int tlb_locked;
140
141static vm_offset_t curkva = 0;
142static vm_offset_t heapva;
143
144static phandle_t root;
145
146/*
147 * Machine dependent structures that the machine independent
148 * loader part uses.
149 */
150struct devsw *devsw[] = {
151#ifdef LOADER_DISK_SUPPORT
152	&ofwdisk,
153#endif
154#ifdef LOADER_NET_SUPPORT
155	&netdev,
156#endif
157#ifdef LOADER_ZFS_SUPPORT
158	&zfs_dev,
159#endif
160	0
161};
162struct arch_switch archsw;
163
164static struct file_format sparc64_elf = {
165	__elfN(loadfile),
166	__elfN(exec)
167};
168struct file_format *file_formats[] = {
169	&sparc64_elf,
170	0
171};
172
173struct fs_ops *file_system[] = {
174#ifdef LOADER_UFS_SUPPORT
175	&ufs_fsops,
176#endif
177#ifdef LOADER_CD9660_SUPPORT
178	&cd9660_fsops,
179#endif
180#ifdef LOADER_ZFS_SUPPORT
181	&zfs_fsops,
182#endif
183#ifdef LOADER_ZIP_SUPPORT
184	&zipfs_fsops,
185#endif
186#ifdef LOADER_GZIP_SUPPORT
187	&gzipfs_fsops,
188#endif
189#ifdef LOADER_BZIP2_SUPPORT
190	&bzipfs_fsops,
191#endif
192#ifdef LOADER_NFS_SUPPORT
193	&nfs_fsops,
194#endif
195#ifdef LOADER_TFTP_SUPPORT
196	&tftp_fsops,
197#endif
198	0
199};
200struct netif_driver *netif_drivers[] = {
201#ifdef LOADER_NET_SUPPORT
202	&ofwnet,
203#endif
204	0
205};
206
207extern struct console ofwconsole;
208struct console *consoles[] = {
209	&ofwconsole,
210	0
211};
212
213#ifdef LOADER_DEBUG
214static int
215watch_phys_set_mask(vm_offset_t pa, u_long mask)
216{
217	u_long lsucr;
218
219	stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
220	lsucr = ldxa(0, ASI_LSU_CTL_REG);
221	lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
222	    (mask << LSU_PM_SHIFT);
223	stxa(0, ASI_LSU_CTL_REG, lsucr);
224	return (0);
225}
226
227static int
228watch_phys_set(vm_offset_t pa, int sz)
229{
230	u_long off;
231
232	off = (u_long)pa & 7;
233	/* Test for misaligned watch points. */
234	if (off + sz > 8)
235		return (-1);
236	return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
237}
238
239
240static int
241watch_virt_set_mask(vm_offset_t va, u_long mask)
242{
243	u_long lsucr;
244
245	stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
246	lsucr = ldxa(0, ASI_LSU_CTL_REG);
247	lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
248	    (mask << LSU_VM_SHIFT);
249	stxa(0, ASI_LSU_CTL_REG, lsucr);
250	return (0);
251}
252
253static int
254watch_virt_set(vm_offset_t va, int sz)
255{
256	u_long off;
257
258	off = (u_long)va & 7;
259	/* Test for misaligned watch points. */
260	if (off + sz > 8)
261		return (-1);
262	return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
263}
264#endif
265
266/*
267 * archsw functions
268 */
269static int
270sparc64_autoload(void)
271{
272
273	return (0);
274}
275
276static ssize_t
277sparc64_readin(const int fd, vm_offset_t va, const size_t len)
278{
279
280	mmu_ops->mmu_mapin(va, len);
281	return (read(fd, (void *)va, len));
282}
283
284static ssize_t
285sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
286{
287
288	mmu_ops->mmu_mapin(dest, len);
289	memcpy((void *)dest, src, len);
290	return (len);
291}
292
293/*
294 * other MD functions
295 */
296static vm_offset_t
297claim_virt(vm_offset_t virt, size_t size, int align)
298{
299	vm_offset_t mva;
300
301	if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
302		return ((vm_offset_t)-1);
303	return (mva);
304}
305
306static vm_offset_t
307alloc_phys(size_t size, int align)
308{
309	cell_t phys_hi, phys_low;
310
311	if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
312	    &phys_hi) == -1)
313		return ((vm_offset_t)-1);
314	return ((vm_offset_t)phys_hi << 32 | phys_low);
315}
316
317static int
318map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
319{
320
321	return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
322	    (uint32_t)(phys >> 32), virt, size, mode));
323}
324
325static void
326release_phys(vm_offset_t phys, u_int size)
327{
328
329	(void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
330	    (uint32_t)(phys >> 32), size);
331}
332
333static int
334__elfN(exec)(struct preloaded_file *fp)
335{
336	struct file_metadata *fmp;
337	vm_offset_t mdp;
338	Elf_Addr entry;
339	Elf_Ehdr *e;
340	int error;
341
342	if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
343		return (EFTYPE);
344	e = (Elf_Ehdr *)&fmp->md_data;
345
346	if ((error = md_load(fp->f_args, &mdp)) != 0)
347		return (error);
348
349	printf("jumping to kernel entry at %#lx.\n", e->e_entry);
350#ifdef LOADER_DEBUG
351	pmap_print_tlb_sun4u();
352#endif
353
354	dev_cleanup();
355
356	entry = e->e_entry;
357
358	OF_release((void *)heapva, HEAPSZ);
359
360	((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
361
362	panic("%s: exec returned", __func__);
363}
364
365static inline u_long
366dtlb_get_data_sun4u(u_int tlb, u_int slot)
367{
368	u_long data, pstate;
369
370	slot = TLB_DAR_SLOT(tlb, slot);
371	/*
372	 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
373	 * work around errata of USIII and beyond.
374	 */
375	pstate = rdpr(pstate);
376	wrpr(pstate, pstate & ~PSTATE_IE, 0);
377	(void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
378	data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
379	wrpr(pstate, pstate, 0);
380	return (data);
381}
382
383static inline u_long
384itlb_get_data_sun4u(u_int tlb, u_int slot)
385{
386	u_long data, pstate;
387
388	slot = TLB_DAR_SLOT(tlb, slot);
389	/*
390	 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
391	 * work around errata of USIII and beyond.
392	 */
393	pstate = rdpr(pstate);
394	wrpr(pstate, pstate & ~PSTATE_IE, 0);
395	(void)ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
396	data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
397	wrpr(pstate, pstate, 0);
398	return (data);
399}
400
401static vm_offset_t
402dtlb_va_to_pa_sun4u(vm_offset_t va)
403{
404	u_long pstate, reg;
405	u_int i, tlb;
406
407	pstate = rdpr(pstate);
408	wrpr(pstate, pstate & ~PSTATE_IE, 0);
409	for (i = 0; i < dtlb_slot_max; i++) {
410		reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
411		    ASI_DTLB_TAG_READ_REG);
412		if (TLB_TAR_VA(reg) != va)
413			continue;
414		reg = dtlb_get_data_sun4u(tlb_locked, i);
415		wrpr(pstate, pstate, 0);
416		reg >>= TD_PA_SHIFT;
417		if (cpu_impl == CPU_IMPL_SPARC64V ||
418		    cpu_impl >= CPU_IMPL_ULTRASPARCIII)
419			return (reg & TD_PA_CH_MASK);
420		return (reg & TD_PA_SF_MASK);
421	}
422	wrpr(pstate, pstate, 0);
423	return (-1);
424}
425
426static vm_offset_t
427itlb_va_to_pa_sun4u(vm_offset_t va)
428{
429	u_long pstate, reg;
430	int i;
431
432	pstate = rdpr(pstate);
433	wrpr(pstate, pstate & ~PSTATE_IE, 0);
434	for (i = 0; i < itlb_slot_max; i++) {
435		reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
436		    ASI_ITLB_TAG_READ_REG);
437		if (TLB_TAR_VA(reg) != va)
438			continue;
439		reg = itlb_get_data_sun4u(tlb_locked, i);
440		wrpr(pstate, pstate, 0);
441		reg >>= TD_PA_SHIFT;
442		if (cpu_impl == CPU_IMPL_SPARC64V ||
443		    cpu_impl >= CPU_IMPL_ULTRASPARCIII)
444			return (reg & TD_PA_CH_MASK);
445		return (reg & TD_PA_SF_MASK);
446	}
447	wrpr(pstate, pstate, 0);
448	return (-1);
449}
450
451static int
452dtlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
453{
454
455	return (OF_call_method("SUNW,dtlb-load", mmu, 3, 0, index, data,
456	    virt));
457}
458
459static int
460itlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
461{
462
463	if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp && index == 0 &&
464	    (data & TD_L) != 0)
465		panic("%s: won't enter locked TLB entry at index 0 on USIII+",
466		    __func__);
467	return (OF_call_method("SUNW,itlb-load", mmu, 3, 0, index, data,
468	    virt));
469}
470
471static void
472itlb_relocate_locked0_sun4u(void)
473{
474	u_long data, pstate, tag;
475	int i;
476
477	if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp)
478		return;
479
480	pstate = rdpr(pstate);
481	wrpr(pstate, pstate & ~PSTATE_IE, 0);
482
483	data = itlb_get_data_sun4u(tlb_locked, 0);
484	if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) {
485		wrpr(pstate, pstate, 0);
486		return;
487	}
488
489	/* Flush the mapping of slot 0. */
490	tag = ldxa(TLB_DAR_SLOT(tlb_locked, 0), ASI_ITLB_TAG_READ_REG);
491	stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY |
492	    TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
493	flush(0);	/* The USIII-family ignores the address. */
494
495	/*
496	 * Search a replacement slot != 0 and enter the data and tag
497	 * that formerly were in slot 0.
498	 */
499	for (i = 1; i < itlb_slot_max; i++) {
500		if ((itlb_get_data_sun4u(tlb_locked, i) & TD_V) != 0)
501			continue;
502
503		stxa(AA_IMMU_TAR, ASI_IMMU, tag);
504		stxa(TLB_DAR_SLOT(tlb_locked, i), ASI_ITLB_DATA_ACCESS_REG,
505		    data);
506		flush(0);	/* The USIII-family ignores the address. */
507		break;
508	}
509	wrpr(pstate, pstate, 0);
510	if (i == itlb_slot_max)
511		panic("%s: could not find a replacement slot", __func__);
512}
513
514static int
515mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
516{
517	vm_offset_t pa, mva;
518	u_long data;
519	u_int index;
520
521	if (va + len > curkva)
522		curkva = va + len;
523
524	pa = (vm_offset_t)-1;
525	len += va & PAGE_MASK_4M;
526	va &= ~PAGE_MASK_4M;
527	while (len) {
528		if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
529		    itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
530			/* Allocate a physical page, claim the virtual area. */
531			if (pa == (vm_offset_t)-1) {
532				pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
533				if (pa == (vm_offset_t)-1)
534					panic("%s: out of memory", __func__);
535				mva = claim_virt(va, PAGE_SIZE_4M, 0);
536				if (mva != va)
537					panic("%s: can't claim virtual page "
538					    "(wanted %#lx, got %#lx)",
539					    __func__, va, mva);
540				/*
541				 * The mappings may have changed, be paranoid.
542				 */
543				continue;
544			}
545			/*
546			 * Actually, we can only allocate two pages less at
547			 * most (depending on the kernel TSB size).
548			 */
549			if (dtlb_slot >= dtlb_slot_max)
550				panic("%s: out of dtlb_slots", __func__);
551			if (itlb_slot >= itlb_slot_max)
552				panic("%s: out of itlb_slots", __func__);
553			data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
554			    TD_CV | TD_P | TD_W;
555			dtlb_store[dtlb_slot].te_pa = pa;
556			dtlb_store[dtlb_slot].te_va = va;
557			index = dtlb_slot_max - dtlb_slot - 1;
558			if (dtlb_enter_sun4u(index, data, va) < 0)
559				panic("%s: can't enter dTLB slot %d data "
560				    "%#lx va %#lx", __func__, index, data,
561				    va);
562			dtlb_slot++;
563			itlb_store[itlb_slot].te_pa = pa;
564			itlb_store[itlb_slot].te_va = va;
565			index = itlb_slot_max - itlb_slot - 1;
566			if (itlb_enter_sun4u(index, data, va) < 0)
567				panic("%s: can't enter iTLB slot %d data "
568				    "%#lx va %#lxd", __func__, index, data,
569				    va);
570			itlb_slot++;
571			pa = (vm_offset_t)-1;
572		}
573		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
574		va += PAGE_SIZE_4M;
575	}
576	if (pa != (vm_offset_t)-1)
577		release_phys(pa, PAGE_SIZE_4M);
578	return (0);
579}
580
581static vm_offset_t
582init_heap(void)
583{
584
585	/* There is no need for continuous physical heap memory. */
586	heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
587	return (heapva);
588}
589
590static phandle_t
591find_bsp_sun4u(phandle_t node, uint32_t bspid)
592{
593	char type[sizeof("cpu")];
594	phandle_t child;
595	uint32_t cpuid;
596
597	for (; node > 0; node = OF_peer(node)) {
598		child = OF_child(node);
599		if (child > 0) {
600			child = find_bsp_sun4u(child, bspid);
601			if (child > 0)
602				return (child);
603		} else {
604			if (OF_getprop(node, "device_type", type,
605			    sizeof(type)) <= 0)
606				continue;
607			if (strcmp(type, "cpu") != 0)
608				continue;
609			if (OF_getprop(node, cpu_cpuid_prop_sun4u(), &cpuid,
610			    sizeof(cpuid)) <= 0)
611				continue;
612			if (cpuid == bspid)
613				return (node);
614		}
615	}
616	return (0);
617}
618
619const char *
620cpu_cpuid_prop_sun4u(void)
621{
622
623	switch (cpu_impl) {
624	case CPU_IMPL_SPARC64:
625	case CPU_IMPL_SPARC64V:
626	case CPU_IMPL_ULTRASPARCI:
627	case CPU_IMPL_ULTRASPARCII:
628	case CPU_IMPL_ULTRASPARCIIi:
629	case CPU_IMPL_ULTRASPARCIIe:
630		return ("upa-portid");
631	case CPU_IMPL_ULTRASPARCIII:
632	case CPU_IMPL_ULTRASPARCIIIp:
633	case CPU_IMPL_ULTRASPARCIIIi:
634	case CPU_IMPL_ULTRASPARCIIIip:
635		return ("portid");
636	case CPU_IMPL_ULTRASPARCIV:
637	case CPU_IMPL_ULTRASPARCIVp:
638		return ("cpuid");
639	default:
640		return ("");
641	}
642}
643
644uint32_t
645cpu_get_mid_sun4u(void)
646{
647
648	switch (cpu_impl) {
649	case CPU_IMPL_SPARC64:
650	case CPU_IMPL_SPARC64V:
651	case CPU_IMPL_ULTRASPARCI:
652	case CPU_IMPL_ULTRASPARCII:
653	case CPU_IMPL_ULTRASPARCIIi:
654	case CPU_IMPL_ULTRASPARCIIe:
655		return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG)));
656	case CPU_IMPL_ULTRASPARCIII:
657	case CPU_IMPL_ULTRASPARCIIIp:
658		return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG,
659		    ASI_FIREPLANE_CONFIG_REG)));
660	case CPU_IMPL_ULTRASPARCIIIi:
661	case CPU_IMPL_ULTRASPARCIIIip:
662		return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG)));
663	case CPU_IMPL_ULTRASPARCIV:
664	case CPU_IMPL_ULTRASPARCIVp:
665		return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID)));
666	default:
667		return (0);
668	}
669}
670
671static void
672tlb_init_sun4u(void)
673{
674	phandle_t bsp;
675
676	cpu_impl = VER_IMPL(rdpr(ver));
677	switch (cpu_impl) {
678	case CPU_IMPL_SPARC64:
679	case CPU_IMPL_ULTRASPARCI:
680	case CPU_IMPL_ULTRASPARCII:
681	case CPU_IMPL_ULTRASPARCIIi:
682	case CPU_IMPL_ULTRASPARCIIe:
683		tlb_locked = TLB_DAR_T32;
684		break;
685	case CPU_IMPL_ULTRASPARCIII:
686	case CPU_IMPL_ULTRASPARCIIIp:
687	case CPU_IMPL_ULTRASPARCIIIi:
688	case CPU_IMPL_ULTRASPARCIIIip:
689	case CPU_IMPL_ULTRASPARCIV:
690	case CPU_IMPL_ULTRASPARCIVp:
691		tlb_locked = TLB_DAR_T16;
692		break;
693	case CPU_IMPL_SPARC64V:
694		tlb_locked = TLB_DAR_FTLB;
695		break;
696	}
697	bsp = find_bsp_sun4u(OF_child(root), cpu_get_mid_sun4u());
698	if (bsp == 0)
699		panic("%s: no node for bootcpu?!?!", __func__);
700
701	if (OF_getprop(bsp, "#dtlb-entries", &dtlb_slot_max,
702	    sizeof(dtlb_slot_max)) == -1 ||
703	    OF_getprop(bsp, "#itlb-entries", &itlb_slot_max,
704	    sizeof(itlb_slot_max)) == -1)
705		panic("%s: can't get TLB slot max.", __func__);
706
707	if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
708#ifdef LOADER_DEBUG
709		printf("pre fixup:\n");
710		pmap_print_tlb_sun4u();
711#endif
712
713		/*
714		 * Relocate the locked entry in it16 slot 0 (if existent)
715		 * as part of working around Cheetah+ erratum 34.
716		 */
717		itlb_relocate_locked0_sun4u();
718
719#ifdef LOADER_DEBUG
720		printf("post fixup:\n");
721		pmap_print_tlb_sun4u();
722#endif
723	}
724
725	dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
726	itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
727	if (dtlb_store == NULL || itlb_store == NULL)
728		panic("%s: can't allocate TLB store", __func__);
729}
730
731#ifdef LOADER_ZFS_SUPPORT
732static void
733sparc64_zfs_probe(void)
734{
735	struct vtoc8 vtoc;
736	struct zfs_devdesc zfs_currdev;
737	char devname[32];
738	uint64_t guid;
739	int fd, part, unit;
740
741	/* Get the GUID of the ZFS pool on the boot device. */
742	guid = 0;
743	zfs_probe_dev(getenv("currdev"), &guid);
744
745	for (unit = 0; unit < MAXDEV; unit++) {
746		/* Find freebsd-zfs slices in the VTOC. */
747		sprintf(devname, "disk%d:", unit);
748		fd = open(devname, O_RDONLY);
749		if (fd == -1)
750			continue;
751		lseek(fd, 0, SEEK_SET);
752		if (read(fd, &vtoc, sizeof(vtoc)) != sizeof(vtoc)) {
753			close(fd);
754			continue;
755		}
756		close(fd);
757
758		for (part = 0; part < 8; part++) {
759			if (part == 2 || vtoc.part[part].tag !=
760			     VTOC_TAG_FREEBSD_ZFS)
761				continue;
762			sprintf(devname, "disk%d:%c", unit, part + 'a');
763			if (zfs_probe_dev(devname, NULL) == ENXIO)
764				break;
765		}
766	}
767
768	if (guid != 0) {
769		zfs_currdev.pool_guid = guid;
770		zfs_currdev.root_guid = 0;
771		zfs_currdev.d_dev = &zfs_dev;
772		zfs_currdev.d_type = zfs_currdev.d_dev->dv_type;
773		/* Update the environment for ZFS. */
774		env_setenv("currdev", EV_VOLATILE, zfs_fmtdev(&zfs_currdev),
775		    ofw_setcurrdev, env_nounset);
776		env_setenv("loaddev", EV_VOLATILE, zfs_fmtdev(&zfs_currdev),
777		    env_noset, env_nounset);
778	}
779}
780#endif /* LOADER_ZFS_SUPPORT */
781
782int
783main(int (*openfirm)(void *))
784{
785	char bootpath[64];
786	char compatible[32];
787	struct devsw **dp;
788
789	/*
790	 * Tell the Open Firmware functions where they find the OFW gate.
791	 */
792	OF_init(openfirm);
793
794	archsw.arch_getdev = ofw_getdev;
795	archsw.arch_copyin = sparc64_copyin;
796	archsw.arch_copyout = ofw_copyout;
797	archsw.arch_readin = sparc64_readin;
798	archsw.arch_autoload = sparc64_autoload;
799#ifdef LOADER_ZFS_SUPPORT
800	archsw.arch_zfs_probe = sparc64_zfs_probe;
801#endif
802
803	if (init_heap() == (vm_offset_t)-1)
804		OF_exit();
805	setheap((void *)heapva, (void *)(heapva + HEAPSZ));
806
807	/*
808	 * Probe for a console.
809	 */
810	cons_probe();
811
812	if ((root = OF_peer(0)) == -1)
813		panic("%s: can't get root phandle", __func__);
814	OF_getprop(root, "compatible", compatible, sizeof(compatible));
815	mmu_ops = &mmu_ops_sun4u;
816
817	mmu_ops->tlb_init();
818
819	/*
820	 * Set up the current device.
821	 */
822	OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
823
824	/*
825	 * Sun compatible bootable CD-ROMs have a disk label placed
826	 * before the cd9660 data, with the actual filesystem being
827	 * in the first partition, while the other partitions contain
828	 * pseudo disk labels with embedded boot blocks for different
829	 * architectures, which may be followed by UFS filesystems.
830	 * The firmware will set the boot path to the partition it
831	 * boots from ('f' in the sun4u case), but we want the kernel
832	 * to be loaded from the cd9660 fs ('a'), so the boot path
833	 * needs to be altered.
834	 */
835	if (bootpath[strlen(bootpath) - 2] == ':' &&
836	    bootpath[strlen(bootpath) - 1] == 'f' &&
837	    strstr(bootpath, "cdrom")) {
838		bootpath[strlen(bootpath) - 1] = 'a';
839		printf("Boot path set to %s\n", bootpath);
840	}
841
842	env_setenv("currdev", EV_VOLATILE, bootpath,
843	    ofw_setcurrdev, env_nounset);
844	env_setenv("loaddev", EV_VOLATILE, bootpath,
845	    env_noset, env_nounset);
846
847	/*
848	 * Initialize devices.
849	 */
850	for (dp = devsw; *dp != 0; dp++)
851		if ((*dp)->dv_init != 0)
852			(*dp)->dv_init();
853
854	printf("\n");
855	printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
856	printf("(%s, %s)\n", bootprog_maker, bootprog_date);
857	printf("bootpath=\"%s\"\n", bootpath);
858
859	/* Give control to the machine independent loader code. */
860	interact();
861	return (1);
862}
863
864COMMAND_SET(heap, "heap", "show heap usage", command_heap);
865
866static int
867command_heap(int argc, char *argv[])
868{
869
870	mallocstats();
871	printf("heap base at %p, top at %p, upper limit at %p\n", heapva,
872	    sbrk(0), heapva + HEAPSZ);
873	return(CMD_OK);
874}
875
876COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
877
878static int
879command_reboot(int argc, char *argv[])
880{
881	int i;
882
883	for (i = 0; devsw[i] != NULL; ++i)
884		if (devsw[i]->dv_cleanup != NULL)
885			(devsw[i]->dv_cleanup)();
886
887	printf("Rebooting...\n");
888	OF_exit();
889}
890
891/* provide this for panic, as it's not in the startup code */
892void
893exit(int code)
894{
895
896	OF_exit();
897}
898
899#ifdef LOADER_DEBUG
900static const char *const page_sizes[] = {
901	"  8k", " 64k", "512k", "  4m"
902};
903
904static void
905pmap_print_tte_sun4u(tte_t tag, tte_t tte)
906{
907
908	printf("%s %s ",
909	    page_sizes[(tte >> TD_SIZE_SHIFT) & TD_SIZE_MASK],
910	    tag & TD_G ? "G" : " ");
911	printf(tte & TD_W ? "W " : "  ");
912	printf(tte & TD_P ? "\e[33mP\e[0m " : "  ");
913	printf(tte & TD_E ? "E " : "  ");
914	printf(tte & TD_CV ? "CV " : "   ");
915	printf(tte & TD_CP ? "CP " : "   ");
916	printf(tte & TD_L ? "\e[32mL\e[0m " : "  ");
917	printf(tte & TD_IE ? "IE " : "   ");
918	printf(tte & TD_NFO ? "NFO " : "    ");
919	printf("pa=0x%lx va=0x%lx ctx=%ld\n",
920	    TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
921}
922
923static void
924pmap_print_tlb_sun4u(void)
925{
926	tte_t tag, tte;
927	u_long pstate;
928	int i;
929
930	pstate = rdpr(pstate);
931	for (i = 0; i < itlb_slot_max; i++) {
932		wrpr(pstate, pstate & ~PSTATE_IE, 0);
933		tte = itlb_get_data_sun4u(tlb_locked, i);
934		wrpr(pstate, pstate, 0);
935		if (!(tte & TD_V))
936			continue;
937		tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
938		    ASI_ITLB_TAG_READ_REG);
939		printf("iTLB-%2u: ", i);
940		pmap_print_tte_sun4u(tag, tte);
941	}
942	for (i = 0; i < dtlb_slot_max; i++) {
943		wrpr(pstate, pstate & ~PSTATE_IE, 0);
944		tte = dtlb_get_data_sun4u(tlb_locked, i);
945		wrpr(pstate, pstate, 0);
946		if (!(tte & TD_V))
947			continue;
948		tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
949		    ASI_DTLB_TAG_READ_REG);
950		printf("dTLB-%2u: ", i);
951		pmap_print_tte_sun4u(tag, tte);
952	}
953}
954#endif
955