machdep.c revision 317003
1/*	$NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $	*/
2
3/*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by Mark Brinicombe
22 *	for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 *    endorse or promote products derived from this software without specific
25 *    prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependent functions for kernel setup
40 *
41 * Created      : 17/09/94
42 * Updated	: 18/04/01 updated for new wscons
43 */
44
45#include "opt_compat.h"
46#include "opt_ddb.h"
47#include "opt_kstack_pages.h"
48#include "opt_platform.h"
49#include "opt_sched.h"
50#include "opt_timer.h"
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD: stable/11/sys/arm/arm/machdep.c 317003 2017-04-16 06:51:06Z mmel $");
54
55#include <sys/param.h>
56#include <sys/proc.h>
57#include <sys/systm.h>
58#include <sys/bio.h>
59#include <sys/buf.h>
60#include <sys/bus.h>
61#include <sys/cons.h>
62#include <sys/cpu.h>
63#include <sys/ctype.h>
64#include <sys/devmap.h>
65#include <sys/efi.h>
66#include <sys/exec.h>
67#include <sys/imgact.h>
68#include <sys/kdb.h>
69#include <sys/kernel.h>
70#include <sys/ktr.h>
71#include <sys/linker.h>
72#include <sys/lock.h>
73#include <sys/malloc.h>
74#include <sys/msgbuf.h>
75#include <sys/mutex.h>
76#include <sys/pcpu.h>
77#include <sys/ptrace.h>
78#include <sys/reboot.h>
79#if defined(LINUX_BOOT_ABI)
80#include <sys/boot.h>
81#endif
82#include <sys/rwlock.h>
83#include <sys/sched.h>
84#include <sys/signalvar.h>
85#include <sys/syscallsubr.h>
86#include <sys/sysctl.h>
87#include <sys/sysent.h>
88#include <sys/sysproto.h>
89#include <sys/uio.h>
90#include <sys/vdso.h>
91
92#include <vm/vm.h>
93#include <vm/pmap.h>
94#include <vm/vm_map.h>
95#include <vm/vm_object.h>
96#include <vm/vm_page.h>
97#include <vm/vm_pager.h>
98
99#include <machine/armreg.h>
100#include <machine/atags.h>
101#include <machine/cpu.h>
102#include <machine/cpuinfo.h>
103#include <machine/debug_monitor.h>
104#include <machine/db_machdep.h>
105#include <machine/frame.h>
106#include <machine/intr.h>
107#include <machine/machdep.h>
108#include <machine/md_var.h>
109#include <machine/metadata.h>
110#include <machine/pcb.h>
111#include <machine/physmem.h>
112#include <machine/platform.h>
113#include <machine/reg.h>
114#include <machine/trap.h>
115#include <machine/undefined.h>
116#include <machine/vfp.h>
117#include <machine/vmparam.h>
118#include <machine/sysarch.h>
119
120#ifdef FDT
121#include <contrib/libfdt/libfdt.h>
122#include <dev/fdt/fdt_common.h>
123#include <dev/ofw/openfirm.h>
124#endif
125
126#ifdef DDB
127#include <ddb/ddb.h>
128
129#if __ARM_ARCH >= 6
130
131DB_SHOW_COMMAND(cp15, db_show_cp15)
132{
133	u_int reg;
134
135	reg = cp15_midr_get();
136	db_printf("Cpu ID: 0x%08x\n", reg);
137	reg = cp15_ctr_get();
138	db_printf("Current Cache Lvl ID: 0x%08x\n",reg);
139
140	reg = cp15_sctlr_get();
141	db_printf("Ctrl: 0x%08x\n",reg);
142	reg = cp15_actlr_get();
143	db_printf("Aux Ctrl: 0x%08x\n",reg);
144
145	reg = cp15_id_pfr0_get();
146	db_printf("Processor Feat 0: 0x%08x\n", reg);
147	reg = cp15_id_pfr1_get();
148	db_printf("Processor Feat 1: 0x%08x\n", reg);
149	reg = cp15_id_dfr0_get();
150	db_printf("Debug Feat 0: 0x%08x\n", reg);
151	reg = cp15_id_afr0_get();
152	db_printf("Auxiliary Feat 0: 0x%08x\n", reg);
153	reg = cp15_id_mmfr0_get();
154	db_printf("Memory Model Feat 0: 0x%08x\n", reg);
155	reg = cp15_id_mmfr1_get();
156	db_printf("Memory Model Feat 1: 0x%08x\n", reg);
157	reg = cp15_id_mmfr2_get();
158	db_printf("Memory Model Feat 2: 0x%08x\n", reg);
159	reg = cp15_id_mmfr3_get();
160	db_printf("Memory Model Feat 3: 0x%08x\n", reg);
161	reg = cp15_ttbr_get();
162	db_printf("TTB0: 0x%08x\n", reg);
163}
164
165DB_SHOW_COMMAND(vtop, db_show_vtop)
166{
167	u_int reg;
168
169	if (have_addr) {
170		cp15_ats1cpr_set(addr);
171		reg = cp15_par_get();
172		db_printf("Physical address reg: 0x%08x\n",reg);
173	} else
174		db_printf("show vtop <virt_addr>\n");
175}
176#endif /* __ARM_ARCH >= 6 */
177#endif /* DDB */
178
179#ifdef DEBUG
180#define	debugf(fmt, args...) printf(fmt, ##args)
181#else
182#define	debugf(fmt, args...)
183#endif
184
185#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
186    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
187    defined(COMPAT_FREEBSD9)
188#error FreeBSD/arm doesn't provide compatibility with releases prior to 10
189#endif
190
191struct pcpu __pcpu[MAXCPU];
192struct pcpu *pcpup = &__pcpu[0];
193
194static struct trapframe proc0_tf;
195uint32_t cpu_reset_address = 0;
196int cold = 1;
197vm_offset_t vector_page;
198
199int (*_arm_memcpy)(void *, void *, int, int) = NULL;
200int (*_arm_bzero)(void *, int, int) = NULL;
201int _min_memcpy_size = 0;
202int _min_bzero_size = 0;
203
204extern int *end;
205
206#ifdef FDT
207static char *loader_envp;
208
209vm_paddr_t pmap_pa;
210
211#if __ARM_ARCH >= 6
212vm_offset_t systempage;
213vm_offset_t irqstack;
214vm_offset_t undstack;
215vm_offset_t abtstack;
216#else
217/*
218 * This is the number of L2 page tables required for covering max
219 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
220 * stacks etc.), uprounded to be divisible by 4.
221 */
222#define KERNEL_PT_MAX	78
223
224static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
225
226struct pv_addr systempage;
227static struct pv_addr msgbufpv;
228struct pv_addr irqstack;
229struct pv_addr undstack;
230struct pv_addr abtstack;
231static struct pv_addr kernelstack;
232#endif
233#endif
234
235#if defined(LINUX_BOOT_ABI)
236#define LBABI_MAX_BANKS	10
237
238#define CMDLINE_GUARD "FreeBSD:"
239uint32_t board_id;
240struct arm_lbabi_tag *atag_list;
241char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
242char atags[LBABI_MAX_COMMAND_LINE * 2];
243uint32_t memstart[LBABI_MAX_BANKS];
244uint32_t memsize[LBABI_MAX_BANKS];
245uint32_t membanks;
246#endif
247#ifdef MULTIDELAY
248static delay_func *delay_impl;
249static void *delay_arg;
250#endif
251
252static uint32_t board_revision;
253/* hex representation of uint64_t */
254static char board_serial[32];
255
256SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes");
257SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD,
258    &board_revision, 0, "Board revision");
259SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD,
260    board_serial, 0, "Board serial");
261
262int vfp_exists;
263SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
264    &vfp_exists, 0, "Floating point support enabled");
265
266void
267board_set_serial(uint64_t serial)
268{
269
270	snprintf(board_serial, sizeof(board_serial)-1,
271		    "%016jx", serial);
272}
273
274void
275board_set_revision(uint32_t revision)
276{
277
278	board_revision = revision;
279}
280
281void
282sendsig(catcher, ksi, mask)
283	sig_t catcher;
284	ksiginfo_t *ksi;
285	sigset_t *mask;
286{
287	struct thread *td;
288	struct proc *p;
289	struct trapframe *tf;
290	struct sigframe *fp, frame;
291	struct sigacts *psp;
292	struct sysentvec *sysent;
293	int onstack;
294	int sig;
295	int code;
296
297	td = curthread;
298	p = td->td_proc;
299	PROC_LOCK_ASSERT(p, MA_OWNED);
300	sig = ksi->ksi_signo;
301	code = ksi->ksi_code;
302	psp = p->p_sigacts;
303	mtx_assert(&psp->ps_mtx, MA_OWNED);
304	tf = td->td_frame;
305	onstack = sigonstack(tf->tf_usr_sp);
306
307	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
308	    catcher, sig);
309
310	/* Allocate and validate space for the signal handler context. */
311	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
312	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
313		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
314		    td->td_sigstk.ss_size);
315#if defined(COMPAT_43)
316		td->td_sigstk.ss_flags |= SS_ONSTACK;
317#endif
318	} else
319		fp = (struct sigframe *)td->td_frame->tf_usr_sp;
320
321	/* make room on the stack */
322	fp--;
323
324	/* make the stack aligned */
325	fp = (struct sigframe *)STACKALIGN(fp);
326	/* Populate the siginfo frame. */
327	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
328	frame.sf_si = ksi->ksi_info;
329	frame.sf_uc.uc_sigmask = *mask;
330	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
331	    ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
332	frame.sf_uc.uc_stack = td->td_sigstk;
333	mtx_unlock(&psp->ps_mtx);
334	PROC_UNLOCK(td->td_proc);
335
336	/* Copy the sigframe out to the user's stack. */
337	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
338		/* Process has trashed its stack. Kill it. */
339		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
340		PROC_LOCK(p);
341		sigexit(td, SIGILL);
342	}
343
344	/*
345	 * Build context to run handler in.  We invoke the handler
346	 * directly, only returning via the trampoline.  Note the
347	 * trampoline version numbers are coordinated with machine-
348	 * dependent code in libc.
349	 */
350
351	tf->tf_r0 = sig;
352	tf->tf_r1 = (register_t)&fp->sf_si;
353	tf->tf_r2 = (register_t)&fp->sf_uc;
354
355	/* the trampoline uses r5 as the uc address */
356	tf->tf_r5 = (register_t)&fp->sf_uc;
357	tf->tf_pc = (register_t)catcher;
358	tf->tf_usr_sp = (register_t)fp;
359	sysent = p->p_sysent;
360	if (sysent->sv_sigcode_base != 0)
361		tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
362	else
363		tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
364		    *(sysent->sv_szsigcode));
365	/* Set the mode to enter in the signal handler */
366#if __ARM_ARCH >= 7
367	if ((register_t)catcher & 1)
368		tf->tf_spsr |= PSR_T;
369	else
370		tf->tf_spsr &= ~PSR_T;
371#endif
372
373	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
374	    tf->tf_usr_sp);
375
376	PROC_LOCK(p);
377	mtx_lock(&psp->ps_mtx);
378}
379
380struct kva_md_info kmi;
381
382/*
383 * arm32_vector_init:
384 *
385 *	Initialize the vector page, and select whether or not to
386 *	relocate the vectors.
387 *
388 *	NOTE: We expect the vector page to be mapped at its expected
389 *	destination.
390 */
391
392extern unsigned int page0[], page0_data[];
393void
394arm_vector_init(vm_offset_t va, int which)
395{
396	unsigned int *vectors = (int *) va;
397	unsigned int *vectors_data = vectors + (page0_data - page0);
398	int vec;
399
400	/*
401	 * Loop through the vectors we're taking over, and copy the
402	 * vector's insn and data word.
403	 */
404	for (vec = 0; vec < ARM_NVEC; vec++) {
405		if ((which & (1 << vec)) == 0) {
406			/* Don't want to take over this vector. */
407			continue;
408		}
409		vectors[vec] = page0[vec];
410		vectors_data[vec] = page0_data[vec];
411	}
412
413	/* Now sync the vectors. */
414	icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
415
416	vector_page = va;
417#if __ARM_ARCH < 6
418	if (va == ARM_VECTORS_HIGH) {
419		/*
420		 * Enable high vectors in the system control reg (SCTLR).
421		 *
422		 * Assume the MD caller knows what it's doing here, and really
423		 * does want the vector page relocated.
424		 *
425		 * Note: This has to be done here (and not just in
426		 * cpu_setup()) because the vector page needs to be
427		 * accessible *before* cpu_startup() is called.
428		 * Think ddb(9) ...
429		 */
430		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
431	}
432#endif
433}
434
435static void
436cpu_startup(void *dummy)
437{
438	struct pcb *pcb = thread0.td_pcb;
439	const unsigned int mbyte = 1024 * 1024;
440#if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
441	vm_page_t m;
442#endif
443
444	identify_arm_cpu();
445
446	vm_ksubmap_init(&kmi);
447
448	/*
449	 * Display the RAM layout.
450	 */
451	printf("real memory  = %ju (%ju MB)\n",
452	    (uintmax_t)arm32_ptob(realmem),
453	    (uintmax_t)arm32_ptob(realmem) / mbyte);
454	printf("avail memory = %ju (%ju MB)\n",
455	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
456	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
457	if (bootverbose) {
458		arm_physmem_print_tables();
459		devmap_print_table();
460	}
461
462	bufinit();
463	vm_pager_bufferinit();
464	pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
465	    USPACE_SVC_STACK_TOP;
466	pmap_set_pcb_pagedir(kernel_pmap, pcb);
467#if __ARM_ARCH < 6
468	vector_page_setprot(VM_PROT_READ);
469	pmap_postinit();
470#ifdef ARM_CACHE_LOCK_ENABLE
471	pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
472	arm_lock_cache_line(ARM_TP_ADDRESS);
473#else
474	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
475	pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
476#endif
477	*(uint32_t *)ARM_RAS_START = 0;
478	*(uint32_t *)ARM_RAS_END = 0xffffffff;
479#endif
480}
481
482SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
483
484/*
485 * Flush the D-cache for non-DMA I/O so that the I-cache can
486 * be made coherent later.
487 */
488void
489cpu_flush_dcache(void *ptr, size_t len)
490{
491
492	dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
493}
494
495/* Get current clock frequency for the given cpu id. */
496int
497cpu_est_clockrate(int cpu_id, uint64_t *rate)
498{
499
500	return (ENXIO);
501}
502
503void
504cpu_idle(int busy)
505{
506
507	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
508	spinlock_enter();
509#ifndef NO_EVENTTIMERS
510	if (!busy)
511		cpu_idleclock();
512#endif
513	if (!sched_runnable())
514		cpu_sleep(0);
515#ifndef NO_EVENTTIMERS
516	if (!busy)
517		cpu_activeclock();
518#endif
519	spinlock_exit();
520	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
521}
522
523int
524cpu_idle_wakeup(int cpu)
525{
526
527	return (0);
528}
529
530/*
531 * Most ARM platforms don't need to do anything special to init their clocks
532 * (they get intialized during normal device attachment), and by not defining a
533 * cpu_initclocks() function they get this generic one.  Any platform that needs
534 * to do something special can just provide their own implementation, which will
535 * override this one due to the weak linkage.
536 */
537void
538arm_generic_initclocks(void)
539{
540
541#ifndef NO_EVENTTIMERS
542#ifdef SMP
543	if (PCPU_GET(cpuid) == 0)
544		cpu_initclocks_bsp();
545	else
546		cpu_initclocks_ap();
547#else
548	cpu_initclocks_bsp();
549#endif
550#endif
551}
552__weak_reference(arm_generic_initclocks, cpu_initclocks);
553
554#ifdef MULTIDELAY
555void
556arm_set_delay(delay_func *impl, void *arg)
557{
558
559	KASSERT(impl != NULL, ("No DELAY implementation"));
560	delay_impl = impl;
561	delay_arg = arg;
562}
563
564void
565DELAY(int usec)
566{
567
568	delay_impl(usec, delay_arg);
569}
570#endif
571
572int
573fill_regs(struct thread *td, struct reg *regs)
574{
575	struct trapframe *tf = td->td_frame;
576	bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
577	regs->r_sp = tf->tf_usr_sp;
578	regs->r_lr = tf->tf_usr_lr;
579	regs->r_pc = tf->tf_pc;
580	regs->r_cpsr = tf->tf_spsr;
581	return (0);
582}
583int
584fill_fpregs(struct thread *td, struct fpreg *regs)
585{
586	bzero(regs, sizeof(*regs));
587	return (0);
588}
589
590int
591set_regs(struct thread *td, struct reg *regs)
592{
593	struct trapframe *tf = td->td_frame;
594
595	bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
596	tf->tf_usr_sp = regs->r_sp;
597	tf->tf_usr_lr = regs->r_lr;
598	tf->tf_pc = regs->r_pc;
599	tf->tf_spsr &=  ~PSR_FLAGS;
600	tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
601	return (0);
602}
603
604int
605set_fpregs(struct thread *td, struct fpreg *regs)
606{
607	return (0);
608}
609
610int
611fill_dbregs(struct thread *td, struct dbreg *regs)
612{
613	return (0);
614}
615int
616set_dbregs(struct thread *td, struct dbreg *regs)
617{
618	return (0);
619}
620
621
622static int
623ptrace_read_int(struct thread *td, vm_offset_t addr, uint32_t *v)
624{
625
626	if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v))
627		return (ENOMEM);
628	return (0);
629}
630
631static int
632ptrace_write_int(struct thread *td, vm_offset_t addr, uint32_t v)
633{
634
635	if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v))
636		return (ENOMEM);
637	return (0);
638}
639
640static u_int
641ptrace_get_usr_reg(void *cookie, int reg)
642{
643	int ret;
644	struct thread *td = cookie;
645
646	KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)),
647	 ("reg is outside range"));
648
649	switch(reg) {
650	case ARM_REG_NUM_PC:
651		ret = td->td_frame->tf_pc;
652		break;
653	case ARM_REG_NUM_LR:
654		ret = td->td_frame->tf_usr_lr;
655		break;
656	case ARM_REG_NUM_SP:
657		ret = td->td_frame->tf_usr_sp;
658		break;
659	default:
660		ret = *((register_t*)&td->td_frame->tf_r0 + reg);
661		break;
662	}
663
664	return (ret);
665}
666
667static u_int
668ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val)
669{
670	struct thread *td = cookie;
671	u_int error;
672
673	error = ptrace_read_int(td, offset, val);
674
675	return (error);
676}
677
678/**
679 * This function parses current instruction opcode and decodes
680 * any possible jump (change in PC) which might occur after
681 * the instruction is executed.
682 *
683 * @param     td                Thread structure of analysed task
684 * @param     cur_instr         Currently executed instruction
685 * @param     alt_next_address  Pointer to the variable where
686 *                              the destination address of the
687 *                              jump instruction shall be stored.
688 *
689 * @return    <0>               when jump is possible
690 *            <EINVAL>          otherwise
691 */
692static int
693ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr,
694    uint32_t *alt_next_address)
695{
696	int error;
697
698	if (inst_branch(cur_instr) || inst_call(cur_instr) ||
699	    inst_return(cur_instr)) {
700		error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc,
701		    alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int);
702
703		return (error);
704	}
705
706	return (EINVAL);
707}
708
709int
710ptrace_single_step(struct thread *td)
711{
712	struct proc *p;
713	int error, error_alt;
714	uint32_t cur_instr, alt_next = 0;
715
716	/* TODO: This needs to be updated for Thumb-2 */
717	if ((td->td_frame->tf_spsr & PSR_T) != 0)
718		return (EINVAL);
719
720	KASSERT(td->td_md.md_ptrace_instr == 0,
721	 ("Didn't clear single step"));
722	KASSERT(td->td_md.md_ptrace_instr_alt == 0,
723	 ("Didn't clear alternative single step"));
724	p = td->td_proc;
725	PROC_UNLOCK(p);
726
727	error = ptrace_read_int(td, td->td_frame->tf_pc,
728	    &cur_instr);
729	if (error)
730		goto out;
731
732	error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE,
733	    &td->td_md.md_ptrace_instr);
734	if (error == 0) {
735		error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE,
736		    PTRACE_BREAKPOINT);
737		if (error) {
738			td->td_md.md_ptrace_instr = 0;
739		} else {
740			td->td_md.md_ptrace_addr = td->td_frame->tf_pc +
741			    INSN_SIZE;
742		}
743	}
744
745	error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next);
746	if (error_alt == 0) {
747		error_alt = ptrace_read_int(td, alt_next,
748		    &td->td_md.md_ptrace_instr_alt);
749		if (error_alt) {
750			td->td_md.md_ptrace_instr_alt = 0;
751		} else {
752			error_alt = ptrace_write_int(td, alt_next,
753			    PTRACE_BREAKPOINT);
754			if (error_alt)
755				td->td_md.md_ptrace_instr_alt = 0;
756			else
757				td->td_md.md_ptrace_addr_alt = alt_next;
758		}
759	}
760
761out:
762	PROC_LOCK(p);
763	return ((error != 0) && (error_alt != 0));
764}
765
766int
767ptrace_clear_single_step(struct thread *td)
768{
769	struct proc *p;
770
771	/* TODO: This needs to be updated for Thumb-2 */
772	if ((td->td_frame->tf_spsr & PSR_T) != 0)
773		return (EINVAL);
774
775	if (td->td_md.md_ptrace_instr != 0) {
776		p = td->td_proc;
777		PROC_UNLOCK(p);
778		ptrace_write_int(td, td->td_md.md_ptrace_addr,
779		    td->td_md.md_ptrace_instr);
780		PROC_LOCK(p);
781		td->td_md.md_ptrace_instr = 0;
782	}
783
784	if (td->td_md.md_ptrace_instr_alt != 0) {
785		p = td->td_proc;
786		PROC_UNLOCK(p);
787		ptrace_write_int(td, td->td_md.md_ptrace_addr_alt,
788		    td->td_md.md_ptrace_instr_alt);
789		PROC_LOCK(p);
790		td->td_md.md_ptrace_instr_alt = 0;
791	}
792
793	return (0);
794}
795
796int
797ptrace_set_pc(struct thread *td, unsigned long addr)
798{
799	td->td_frame->tf_pc = addr;
800	return (0);
801}
802
803void
804cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
805{
806}
807
808void
809spinlock_enter(void)
810{
811	struct thread *td;
812	register_t cspr;
813
814	td = curthread;
815	if (td->td_md.md_spinlock_count == 0) {
816		cspr = disable_interrupts(PSR_I | PSR_F);
817		td->td_md.md_spinlock_count = 1;
818		td->td_md.md_saved_cspr = cspr;
819	} else
820		td->td_md.md_spinlock_count++;
821	critical_enter();
822}
823
824void
825spinlock_exit(void)
826{
827	struct thread *td;
828	register_t cspr;
829
830	td = curthread;
831	critical_exit();
832	cspr = td->td_md.md_saved_cspr;
833	td->td_md.md_spinlock_count--;
834	if (td->td_md.md_spinlock_count == 0)
835		restore_interrupts(cspr);
836}
837
838/*
839 * Clear registers on exec
840 */
841void
842exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
843{
844	struct trapframe *tf = td->td_frame;
845
846	memset(tf, 0, sizeof(*tf));
847	tf->tf_usr_sp = stack;
848	tf->tf_usr_lr = imgp->entry_addr;
849	tf->tf_svc_lr = 0x77777777;
850	tf->tf_pc = imgp->entry_addr;
851	tf->tf_spsr = PSR_USR32_MODE;
852}
853
854/*
855 * Get machine context.
856 */
857int
858get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
859{
860	struct trapframe *tf = td->td_frame;
861	__greg_t *gr = mcp->__gregs;
862
863	if (clear_ret & GET_MC_CLEAR_RET) {
864		gr[_REG_R0] = 0;
865		gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
866	} else {
867		gr[_REG_R0]   = tf->tf_r0;
868		gr[_REG_CPSR] = tf->tf_spsr;
869	}
870	gr[_REG_R1]   = tf->tf_r1;
871	gr[_REG_R2]   = tf->tf_r2;
872	gr[_REG_R3]   = tf->tf_r3;
873	gr[_REG_R4]   = tf->tf_r4;
874	gr[_REG_R5]   = tf->tf_r5;
875	gr[_REG_R6]   = tf->tf_r6;
876	gr[_REG_R7]   = tf->tf_r7;
877	gr[_REG_R8]   = tf->tf_r8;
878	gr[_REG_R9]   = tf->tf_r9;
879	gr[_REG_R10]  = tf->tf_r10;
880	gr[_REG_R11]  = tf->tf_r11;
881	gr[_REG_R12]  = tf->tf_r12;
882	gr[_REG_SP]   = tf->tf_usr_sp;
883	gr[_REG_LR]   = tf->tf_usr_lr;
884	gr[_REG_PC]   = tf->tf_pc;
885
886	return (0);
887}
888
889/*
890 * Set machine context.
891 *
892 * However, we don't set any but the user modifiable flags, and we won't
893 * touch the cs selector.
894 */
895int
896set_mcontext(struct thread *td, mcontext_t *mcp)
897{
898	struct trapframe *tf = td->td_frame;
899	const __greg_t *gr = mcp->__gregs;
900
901	tf->tf_r0 = gr[_REG_R0];
902	tf->tf_r1 = gr[_REG_R1];
903	tf->tf_r2 = gr[_REG_R2];
904	tf->tf_r3 = gr[_REG_R3];
905	tf->tf_r4 = gr[_REG_R4];
906	tf->tf_r5 = gr[_REG_R5];
907	tf->tf_r6 = gr[_REG_R6];
908	tf->tf_r7 = gr[_REG_R7];
909	tf->tf_r8 = gr[_REG_R8];
910	tf->tf_r9 = gr[_REG_R9];
911	tf->tf_r10 = gr[_REG_R10];
912	tf->tf_r11 = gr[_REG_R11];
913	tf->tf_r12 = gr[_REG_R12];
914	tf->tf_usr_sp = gr[_REG_SP];
915	tf->tf_usr_lr = gr[_REG_LR];
916	tf->tf_pc = gr[_REG_PC];
917	tf->tf_spsr = gr[_REG_CPSR];
918
919	return (0);
920}
921
922/*
923 * MPSAFE
924 */
925int
926sys_sigreturn(td, uap)
927	struct thread *td;
928	struct sigreturn_args /* {
929		const struct __ucontext *sigcntxp;
930	} */ *uap;
931{
932	ucontext_t uc;
933	int spsr;
934
935	if (uap == NULL)
936		return (EFAULT);
937	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
938		return (EFAULT);
939	/*
940	 * Make sure the processor mode has not been tampered with and
941	 * interrupts have not been disabled.
942	 */
943	spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
944	if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
945	    (spsr & (PSR_I | PSR_F)) != 0)
946		return (EINVAL);
947		/* Restore register context. */
948	set_mcontext(td, &uc.uc_mcontext);
949
950	/* Restore signal mask. */
951	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
952
953	return (EJUSTRETURN);
954}
955
956
957/*
958 * Construct a PCB from a trapframe. This is called from kdb_trap() where
959 * we want to start a backtrace from the function that caused us to enter
960 * the debugger. We have the context in the trapframe, but base the trace
961 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
962 * enough for a backtrace.
963 */
964void
965makectx(struct trapframe *tf, struct pcb *pcb)
966{
967	pcb->pcb_regs.sf_r4 = tf->tf_r4;
968	pcb->pcb_regs.sf_r5 = tf->tf_r5;
969	pcb->pcb_regs.sf_r6 = tf->tf_r6;
970	pcb->pcb_regs.sf_r7 = tf->tf_r7;
971	pcb->pcb_regs.sf_r8 = tf->tf_r8;
972	pcb->pcb_regs.sf_r9 = tf->tf_r9;
973	pcb->pcb_regs.sf_r10 = tf->tf_r10;
974	pcb->pcb_regs.sf_r11 = tf->tf_r11;
975	pcb->pcb_regs.sf_r12 = tf->tf_r12;
976	pcb->pcb_regs.sf_pc = tf->tf_pc;
977	pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
978	pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
979}
980
981/*
982 * Fake up a boot descriptor table
983 */
984vm_offset_t
985fake_preload_metadata(struct arm_boot_params *abp __unused, void *dtb_ptr,
986    size_t dtb_size)
987{
988#ifdef DDB
989	vm_offset_t zstart = 0, zend = 0;
990#endif
991	vm_offset_t lastaddr;
992	int i = 0;
993	static uint32_t fake_preload[35];
994
995	fake_preload[i++] = MODINFO_NAME;
996	fake_preload[i++] = strlen("kernel") + 1;
997	strcpy((char*)&fake_preload[i++], "kernel");
998	i += 1;
999	fake_preload[i++] = MODINFO_TYPE;
1000	fake_preload[i++] = strlen("elf kernel") + 1;
1001	strcpy((char*)&fake_preload[i++], "elf kernel");
1002	i += 2;
1003	fake_preload[i++] = MODINFO_ADDR;
1004	fake_preload[i++] = sizeof(vm_offset_t);
1005	fake_preload[i++] = KERNVIRTADDR;
1006	fake_preload[i++] = MODINFO_SIZE;
1007	fake_preload[i++] = sizeof(uint32_t);
1008	fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
1009#ifdef DDB
1010	if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
1011		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
1012		fake_preload[i++] = sizeof(vm_offset_t);
1013		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
1014		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
1015		fake_preload[i++] = sizeof(vm_offset_t);
1016		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
1017		lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
1018		zend = lastaddr;
1019		zstart = *(uint32_t *)(KERNVIRTADDR + 4);
1020		db_fetch_ksymtab(zstart, zend);
1021	} else
1022#endif
1023		lastaddr = (vm_offset_t)&end;
1024	if (dtb_ptr != NULL) {
1025		/* Copy DTB to KVA space and insert it into module chain. */
1026		lastaddr = roundup(lastaddr, sizeof(int));
1027		fake_preload[i++] = MODINFO_METADATA | MODINFOMD_DTBP;
1028		fake_preload[i++] = sizeof(uint32_t);
1029		fake_preload[i++] = (uint32_t)lastaddr;
1030		memmove((void *)lastaddr, dtb_ptr, dtb_size);
1031		lastaddr += dtb_size;
1032		lastaddr = roundup(lastaddr, sizeof(int));
1033	}
1034	fake_preload[i++] = 0;
1035	fake_preload[i] = 0;
1036	preload_metadata = (void *)fake_preload;
1037
1038	init_static_kenv(NULL, 0);
1039
1040	return (lastaddr);
1041}
1042
1043void
1044pcpu0_init(void)
1045{
1046#if __ARM_ARCH >= 6
1047	set_curthread(&thread0);
1048#endif
1049	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1050	PCPU_SET(curthread, &thread0);
1051}
1052
1053#if defined(LINUX_BOOT_ABI)
1054
1055/* Convert the U-Boot command line into FreeBSD kenv and boot options. */
1056static void
1057cmdline_set_env(char *cmdline, const char *guard)
1058{
1059	char *cmdline_next, *env;
1060	size_t size, guard_len;
1061	int i;
1062
1063	size = strlen(cmdline);
1064	/* Skip leading spaces. */
1065	for (; isspace(*cmdline) && (size > 0); cmdline++)
1066		size--;
1067
1068	/* Test and remove guard. */
1069	if (guard != NULL && guard[0] != '\0') {
1070		guard_len  =  strlen(guard);
1071		if (strncasecmp(cmdline, guard, guard_len) != 0)
1072			return;
1073		cmdline += guard_len;
1074		size -= guard_len;
1075	}
1076
1077	/* Skip leading spaces. */
1078	for (; isspace(*cmdline) && (size > 0); cmdline++)
1079		size--;
1080
1081	/* Replace ',' with '\0'. */
1082	/* TODO: implement escaping for ',' character. */
1083	cmdline_next = cmdline;
1084	while(strsep(&cmdline_next, ",") != NULL)
1085		;
1086	init_static_kenv(cmdline, 0);
1087	/* Parse boothowto. */
1088	for (i = 0; howto_names[i].ev != NULL; i++) {
1089		env = kern_getenv(howto_names[i].ev);
1090		if (env != NULL) {
1091			if (strtoul(env, NULL, 10) != 0)
1092				boothowto |= howto_names[i].mask;
1093			freeenv(env);
1094		}
1095	}
1096}
1097
1098vm_offset_t
1099linux_parse_boot_param(struct arm_boot_params *abp)
1100{
1101	struct arm_lbabi_tag *walker;
1102	uint32_t revision;
1103	uint64_t serial;
1104	int size;
1105	vm_offset_t lastaddr;
1106#ifdef FDT
1107	struct fdt_header *dtb_ptr;
1108	uint32_t dtb_size;
1109#endif
1110
1111	/*
1112	 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2
1113	 * is atags or dtb pointer.  If all of these aren't satisfied,
1114	 * then punt. Unfortunately, it looks like DT enabled kernels
1115	 * doesn't uses board type and U-Boot delivers 0 in r1 for them.
1116	 */
1117	if (abp->abp_r0 != 0 || abp->abp_r2 == 0)
1118		return (0);
1119#ifdef FDT
1120	/* Test if r2 point to valid DTB. */
1121	dtb_ptr = (struct fdt_header *)abp->abp_r2;
1122	if (fdt_check_header(dtb_ptr) == 0) {
1123		dtb_size = fdt_totalsize(dtb_ptr);
1124		return (fake_preload_metadata(abp, dtb_ptr, dtb_size));
1125	}
1126#endif
1127
1128	board_id = abp->abp_r1;
1129	walker = (struct arm_lbabi_tag *)abp->abp_r2;
1130
1131	if (ATAG_TAG(walker) != ATAG_CORE)
1132		return 0;
1133
1134	atag_list = walker;
1135	while (ATAG_TAG(walker) != ATAG_NONE) {
1136		switch (ATAG_TAG(walker)) {
1137		case ATAG_CORE:
1138			break;
1139		case ATAG_MEM:
1140			arm_physmem_hardware_region(walker->u.tag_mem.start,
1141			    walker->u.tag_mem.size);
1142			break;
1143		case ATAG_INITRD2:
1144			break;
1145		case ATAG_SERIAL:
1146			serial = walker->u.tag_sn.high;
1147			serial <<= 32;
1148			serial |= walker->u.tag_sn.low;
1149			board_set_serial(serial);
1150			break;
1151		case ATAG_REVISION:
1152			revision = walker->u.tag_rev.rev;
1153			board_set_revision(revision);
1154			break;
1155		case ATAG_CMDLINE:
1156			size = ATAG_SIZE(walker) -
1157			    sizeof(struct arm_lbabi_header);
1158			size = min(size, LBABI_MAX_COMMAND_LINE);
1159			strncpy(linux_command_line, walker->u.tag_cmd.command,
1160			    size);
1161			linux_command_line[size] = '\0';
1162			break;
1163		default:
1164			break;
1165		}
1166		walker = ATAG_NEXT(walker);
1167	}
1168
1169	/* Save a copy for later */
1170	bcopy(atag_list, atags,
1171	    (char *)walker - (char *)atag_list + ATAG_SIZE(walker));
1172
1173	lastaddr = fake_preload_metadata(abp, NULL, 0);
1174	cmdline_set_env(linux_command_line, CMDLINE_GUARD);
1175	return lastaddr;
1176}
1177#endif
1178
1179#if defined(FREEBSD_BOOT_LOADER)
1180vm_offset_t
1181freebsd_parse_boot_param(struct arm_boot_params *abp)
1182{
1183	vm_offset_t lastaddr = 0;
1184	void *mdp;
1185	void *kmdp;
1186#ifdef DDB
1187	vm_offset_t ksym_start;
1188	vm_offset_t ksym_end;
1189#endif
1190
1191	/*
1192	 * Mask metadata pointer: it is supposed to be on page boundary. If
1193	 * the first argument (mdp) doesn't point to a valid address the
1194	 * bootloader must have passed us something else than the metadata
1195	 * ptr, so we give up.  Also give up if we cannot find metadta section
1196	 * the loader creates that we get all this data out of.
1197	 */
1198
1199	if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL)
1200		return 0;
1201	preload_metadata = mdp;
1202	kmdp = preload_search_by_type("elf kernel");
1203	if (kmdp == NULL)
1204		return 0;
1205
1206	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1207	loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
1208	init_static_kenv(loader_envp, 0);
1209	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1210#ifdef DDB
1211	ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1212	ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1213	db_fetch_ksymtab(ksym_start, ksym_end);
1214#endif
1215	return lastaddr;
1216}
1217#endif
1218
1219vm_offset_t
1220default_parse_boot_param(struct arm_boot_params *abp)
1221{
1222	vm_offset_t lastaddr;
1223
1224#if defined(LINUX_BOOT_ABI)
1225	if ((lastaddr = linux_parse_boot_param(abp)) != 0)
1226		return lastaddr;
1227#endif
1228#if defined(FREEBSD_BOOT_LOADER)
1229	if ((lastaddr = freebsd_parse_boot_param(abp)) != 0)
1230		return lastaddr;
1231#endif
1232	/* Fall back to hardcoded metadata. */
1233	lastaddr = fake_preload_metadata(abp, NULL, 0);
1234
1235	return lastaddr;
1236}
1237
1238/*
1239 * Stub version of the boot parameter parsing routine.  We are
1240 * called early in initarm, before even VM has been initialized.
1241 * This routine needs to preserve any data that the boot loader
1242 * has passed in before the kernel starts to grow past the end
1243 * of the BSS, traditionally the place boot-loaders put this data.
1244 *
1245 * Since this is called so early, things that depend on the vm system
1246 * being setup (including access to some SoC's serial ports), about
1247 * all that can be done in this routine is to copy the arguments.
1248 *
1249 * This is the default boot parameter parsing routine.  Individual
1250 * kernels/boards can override this weak function with one of their
1251 * own.  We just fake metadata...
1252 */
1253__weak_reference(default_parse_boot_param, parse_boot_param);
1254
1255/*
1256 * Initialize proc0
1257 */
1258void
1259init_proc0(vm_offset_t kstack)
1260{
1261	proc_linkup0(&proc0, &thread0);
1262	thread0.td_kstack = kstack;
1263	thread0.td_pcb = (struct pcb *)
1264		(thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
1265	thread0.td_pcb->pcb_flags = 0;
1266	thread0.td_pcb->pcb_vfpcpu = -1;
1267	thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
1268	thread0.td_frame = &proc0_tf;
1269	pcpup->pc_curpcb = thread0.td_pcb;
1270}
1271
1272int
1273arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc,
1274    u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*))
1275{
1276	u_int addr, nregs, offset = 0;
1277	int error = 0;
1278
1279	switch ((insn >> 24) & 0xf) {
1280	case 0x2:	/* add pc, reg1, #value */
1281	case 0x0:	/* add pc, reg1, reg2, lsl #offset */
1282		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1283		if (((insn >> 16) & 0xf) == 15)
1284			addr += 8;
1285		if (insn & 0x0200000) {
1286			offset = (insn >> 7) & 0x1e;
1287			offset = (insn & 0xff) << (32 - offset) |
1288			    (insn & 0xff) >> offset;
1289		} else {
1290
1291			offset = fetch_reg(cookie, insn & 0x0f);
1292			if ((insn & 0x0000ff0) != 0x00000000) {
1293				if (insn & 0x10)
1294					nregs = fetch_reg(cookie,
1295					    (insn >> 8) & 0xf);
1296				else
1297					nregs = (insn >> 7) & 0x1f;
1298				switch ((insn >> 5) & 3) {
1299				case 0:
1300					/* lsl */
1301					offset = offset << nregs;
1302					break;
1303				case 1:
1304					/* lsr */
1305					offset = offset >> nregs;
1306					break;
1307				default:
1308					break; /* XXX */
1309				}
1310
1311			}
1312			*new_pc = addr + offset;
1313			return (0);
1314
1315		}
1316
1317	case 0xa:	/* b ... */
1318	case 0xb:	/* bl ... */
1319		addr = ((insn << 2) & 0x03ffffff);
1320		if (addr & 0x02000000)
1321			addr |= 0xfc000000;
1322		*new_pc = (pc + 8 + addr);
1323		return (0);
1324	case 0x7:	/* ldr pc, [pc, reg, lsl #2] */
1325		addr = fetch_reg(cookie, insn & 0xf);
1326		addr = pc + 8 + (addr << 2);
1327		error = read_int(cookie, addr, &addr);
1328		*new_pc = addr;
1329		return (error);
1330	case 0x1:	/* mov pc, reg */
1331		*new_pc = fetch_reg(cookie, insn & 0xf);
1332		return (0);
1333	case 0x4:
1334	case 0x5:	/* ldr pc, [reg] */
1335		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1336		/* ldr pc, [reg, #offset] */
1337		if (insn & (1 << 24))
1338			offset = insn & 0xfff;
1339		if (insn & 0x00800000)
1340			addr += offset;
1341		else
1342			addr -= offset;
1343		error = read_int(cookie, addr, &addr);
1344		*new_pc = addr;
1345
1346		return (error);
1347	case 0x8:	/* ldmxx reg, {..., pc} */
1348	case 0x9:
1349		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1350		nregs = (insn  & 0x5555) + ((insn  >> 1) & 0x5555);
1351		nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
1352		nregs = (nregs + (nregs >> 4)) & 0x0f0f;
1353		nregs = (nregs + (nregs >> 8)) & 0x001f;
1354		switch ((insn >> 23) & 0x3) {
1355		case 0x0:	/* ldmda */
1356			addr = addr - 0;
1357			break;
1358		case 0x1:	/* ldmia */
1359			addr = addr + 0 + ((nregs - 1) << 2);
1360			break;
1361		case 0x2:	/* ldmdb */
1362			addr = addr - 4;
1363			break;
1364		case 0x3:	/* ldmib */
1365			addr = addr + 4 + ((nregs - 1) << 2);
1366			break;
1367		}
1368		error = read_int(cookie, addr, &addr);
1369		*new_pc = addr;
1370
1371		return (error);
1372	default:
1373		return (EINVAL);
1374	}
1375}
1376
1377#if __ARM_ARCH >= 6
1378void
1379set_stackptrs(int cpu)
1380{
1381
1382	set_stackptr(PSR_IRQ32_MODE,
1383	    irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1384	set_stackptr(PSR_ABT32_MODE,
1385	    abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1386	set_stackptr(PSR_UND32_MODE,
1387	    undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1388}
1389#else
1390void
1391set_stackptrs(int cpu)
1392{
1393
1394	set_stackptr(PSR_IRQ32_MODE,
1395	    irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1396	set_stackptr(PSR_ABT32_MODE,
1397	    abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1398	set_stackptr(PSR_UND32_MODE,
1399	    undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1400}
1401#endif
1402
1403#ifdef EFI
1404static void
1405add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr,
1406    int *mrcnt)
1407{
1408	struct efi_md *map, *p;
1409	const char *type;
1410	size_t efisz, memory_size;
1411	int ndesc, i, j;
1412
1413	static const char *types[] = {
1414		"Reserved",
1415		"LoaderCode",
1416		"LoaderData",
1417		"BootServicesCode",
1418		"BootServicesData",
1419		"RuntimeServicesCode",
1420		"RuntimeServicesData",
1421		"ConventionalMemory",
1422		"UnusableMemory",
1423		"ACPIReclaimMemory",
1424		"ACPIMemoryNVS",
1425		"MemoryMappedIO",
1426		"MemoryMappedIOPortSpace",
1427		"PalCode"
1428	};
1429
1430	*mrcnt = 0;
1431
1432	/*
1433	 * Memory map data provided by UEFI via the GetMemoryMap
1434	 * Boot Services API.
1435	 */
1436	efisz = roundup2(sizeof(struct efi_map_header), 0x10);
1437	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1438
1439	if (efihdr->descriptor_size == 0)
1440		return;
1441	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1442
1443	if (boothowto & RB_VERBOSE)
1444		printf("%23s %12s %12s %8s %4s\n",
1445		    "Type", "Physical", "Virtual", "#Pages", "Attr");
1446
1447	memory_size = 0;
1448	for (i = 0, j = 0, p = map; i < ndesc; i++,
1449	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1450		if (boothowto & RB_VERBOSE) {
1451			if (p->md_type <= EFI_MD_TYPE_PALCODE)
1452				type = types[p->md_type];
1453			else
1454				type = "<INVALID>";
1455			printf("%23s %012llx %12p %08llx ", type, p->md_phys,
1456			    p->md_virt, p->md_pages);
1457			if (p->md_attr & EFI_MD_ATTR_UC)
1458				printf("UC ");
1459			if (p->md_attr & EFI_MD_ATTR_WC)
1460				printf("WC ");
1461			if (p->md_attr & EFI_MD_ATTR_WT)
1462				printf("WT ");
1463			if (p->md_attr & EFI_MD_ATTR_WB)
1464				printf("WB ");
1465			if (p->md_attr & EFI_MD_ATTR_UCE)
1466				printf("UCE ");
1467			if (p->md_attr & EFI_MD_ATTR_WP)
1468				printf("WP ");
1469			if (p->md_attr & EFI_MD_ATTR_RP)
1470				printf("RP ");
1471			if (p->md_attr & EFI_MD_ATTR_XP)
1472				printf("XP ");
1473			if (p->md_attr & EFI_MD_ATTR_RT)
1474				printf("RUNTIME");
1475			printf("\n");
1476		}
1477
1478		switch (p->md_type) {
1479		case EFI_MD_TYPE_CODE:
1480		case EFI_MD_TYPE_DATA:
1481		case EFI_MD_TYPE_BS_CODE:
1482		case EFI_MD_TYPE_BS_DATA:
1483		case EFI_MD_TYPE_FREE:
1484			/*
1485			 * We're allowed to use any entry with these types.
1486			 */
1487			break;
1488		default:
1489			continue;
1490		}
1491
1492		j++;
1493		if (j >= FDT_MEM_REGIONS)
1494			break;
1495
1496		mr[j].mr_start = p->md_phys;
1497		mr[j].mr_size = p->md_pages * PAGE_SIZE;
1498		memory_size += mr[j].mr_size;
1499	}
1500
1501	*mrcnt = j;
1502}
1503#endif /* EFI */
1504
1505#ifdef FDT
1506static char *
1507kenv_next(char *cp)
1508{
1509
1510	if (cp != NULL) {
1511		while (*cp != 0)
1512			cp++;
1513		cp++;
1514		if (*cp == 0)
1515			cp = NULL;
1516	}
1517	return (cp);
1518}
1519
1520static void
1521print_kenv(void)
1522{
1523	char *cp;
1524
1525	debugf("loader passed (static) kenv:\n");
1526	if (loader_envp == NULL) {
1527		debugf(" no env, null ptr\n");
1528		return;
1529	}
1530	debugf(" loader_envp = 0x%08x\n", (uint32_t)loader_envp);
1531
1532	for (cp = loader_envp; cp != NULL; cp = kenv_next(cp))
1533		debugf(" %x %s\n", (uint32_t)cp, cp);
1534}
1535
1536#if __ARM_ARCH < 6
1537void *
1538initarm(struct arm_boot_params *abp)
1539{
1540	struct mem_region mem_regions[FDT_MEM_REGIONS];
1541	struct pv_addr kernel_l1pt;
1542	struct pv_addr dpcpu;
1543	vm_offset_t dtbp, freemempos, l2_start, lastaddr;
1544	uint64_t memsize;
1545	uint32_t l2size;
1546	char *env;
1547	void *kmdp;
1548	u_int l1pagetable;
1549	int i, j, err_devmap, mem_regions_sz;
1550
1551	lastaddr = parse_boot_param(abp);
1552	arm_physmem_kernaddr = abp->abp_physaddr;
1553
1554	memsize = 0;
1555
1556	cpuinfo_init();
1557	set_cpufuncs();
1558
1559	/*
1560	 * Find the dtb passed in by the boot loader.
1561	 */
1562	kmdp = preload_search_by_type("elf kernel");
1563	if (kmdp != NULL)
1564		dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1565	else
1566		dtbp = (vm_offset_t)NULL;
1567
1568#if defined(FDT_DTB_STATIC)
1569	/*
1570	 * In case the device tree blob was not retrieved (from metadata) try
1571	 * to use the statically embedded one.
1572	 */
1573	if (dtbp == (vm_offset_t)NULL)
1574		dtbp = (vm_offset_t)&fdt_static_dtb;
1575#endif
1576
1577	if (OF_install(OFW_FDT, 0) == FALSE)
1578		panic("Cannot install FDT");
1579
1580	if (OF_init((void *)dtbp) != 0)
1581		panic("OF_init failed with the found device tree");
1582
1583	/* Grab physical memory regions information from device tree. */
1584	if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
1585		panic("Cannot get physical memory regions");
1586	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1587
1588	/* Grab reserved memory regions information from device tree. */
1589	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1590		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1591		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1592
1593	/* Platform-specific initialisation */
1594	platform_probe_and_attach();
1595
1596	pcpu0_init();
1597
1598	/* Do basic tuning, hz etc */
1599	init_param1();
1600
1601	/* Calculate number of L2 tables needed for mapping vm_page_array */
1602	l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
1603	l2size = (l2size >> L1_S_SHIFT) + 1;
1604
1605	/*
1606	 * Add one table for end of kernel map, one for stacks, msgbuf and
1607	 * L1 and L2 tables map and one for vectors map.
1608	 */
1609	l2size += 3;
1610
1611	/* Make it divisible by 4 */
1612	l2size = (l2size + 3) & ~3;
1613
1614	freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
1615
1616	/* Define a macro to simplify memory allocation */
1617#define valloc_pages(var, np)						\
1618	alloc_pages((var).pv_va, (np));					\
1619	(var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
1620
1621#define alloc_pages(var, np)						\
1622	(var) = freemempos;						\
1623	freemempos += (np * PAGE_SIZE);					\
1624	memset((char *)(var), 0, ((np) * PAGE_SIZE));
1625
1626	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
1627		freemempos += PAGE_SIZE;
1628	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
1629
1630	for (i = 0, j = 0; i < l2size; ++i) {
1631		if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
1632			valloc_pages(kernel_pt_table[i],
1633			    L2_TABLE_SIZE / PAGE_SIZE);
1634			j = i;
1635		} else {
1636			kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
1637			    L2_TABLE_SIZE_REAL * (i - j);
1638			kernel_pt_table[i].pv_pa =
1639			    kernel_pt_table[i].pv_va - KERNVIRTADDR +
1640			    abp->abp_physaddr;
1641
1642		}
1643	}
1644	/*
1645	 * Allocate a page for the system page mapped to 0x00000000
1646	 * or 0xffff0000. This page will just contain the system vectors
1647	 * and can be shared by all processes.
1648	 */
1649	valloc_pages(systempage, 1);
1650
1651	/* Allocate dynamic per-cpu area. */
1652	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1653	dpcpu_init((void *)dpcpu.pv_va, 0);
1654
1655	/* Allocate stacks for all modes */
1656	valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
1657	valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
1658	valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
1659	valloc_pages(kernelstack, kstack_pages * MAXCPU);
1660	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1661
1662	/*
1663	 * Now we start construction of the L1 page table
1664	 * We start by mapping the L2 page tables into the L1.
1665	 * This means that we can replace L1 mappings later on if necessary
1666	 */
1667	l1pagetable = kernel_l1pt.pv_va;
1668
1669	/*
1670	 * Try to map as much as possible of kernel text and data using
1671	 * 1MB section mapping and for the rest of initial kernel address
1672	 * space use L2 coarse tables.
1673	 *
1674	 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
1675	 * and kernel structures
1676	 */
1677	l2_start = lastaddr & ~(L1_S_OFFSET);
1678	for (i = 0 ; i < l2size - 1; i++)
1679		pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
1680		    &kernel_pt_table[i]);
1681
1682	pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
1683
1684	/* Map kernel code and data */
1685	pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
1686	   (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
1687	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1688
1689	/* Map L1 directory and allocated L2 page tables */
1690	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
1691	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1692
1693	pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
1694	    kernel_pt_table[0].pv_pa,
1695	    L2_TABLE_SIZE_REAL * l2size,
1696	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1697
1698	/* Map allocated DPCPU, stacks and msgbuf */
1699	pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
1700	    freemempos - dpcpu.pv_va,
1701	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1702
1703	/* Link and map the vector page */
1704	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
1705	    &kernel_pt_table[l2size - 1]);
1706	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
1707	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
1708
1709	/* Establish static device mappings. */
1710	err_devmap = platform_devmap_init();
1711	devmap_bootstrap(l1pagetable, NULL);
1712	vm_max_kernel_address = platform_lastaddr();
1713
1714	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
1715	pmap_pa = kernel_l1pt.pv_pa;
1716	cpu_setttb(kernel_l1pt.pv_pa);
1717	cpu_tlb_flushID();
1718	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
1719
1720	/*
1721	 * Now that proper page tables are installed, call cpu_setup() to enable
1722	 * instruction and data caches and other chip-specific features.
1723	 */
1724	cpu_setup();
1725
1726	/*
1727	 * Only after the SOC registers block is mapped we can perform device
1728	 * tree fixups, as they may attempt to read parameters from hardware.
1729	 */
1730	OF_interpret("perform-fixup", 0);
1731
1732	platform_gpio_init();
1733
1734	cninit();
1735
1736	debugf("initarm: console initialized\n");
1737	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1738	debugf(" boothowto = 0x%08x\n", boothowto);
1739	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1740	print_kenv();
1741
1742	env = kern_getenv("kernelname");
1743	if (env != NULL) {
1744		strlcpy(kernelname, env, sizeof(kernelname));
1745		freeenv(env);
1746	}
1747
1748	if (err_devmap != 0)
1749		printf("WARNING: could not fully configure devmap, error=%d\n",
1750		    err_devmap);
1751
1752	platform_late_init();
1753
1754	/*
1755	 * Pages were allocated during the secondary bootstrap for the
1756	 * stacks for different CPU modes.
1757	 * We must now set the r13 registers in the different CPU modes to
1758	 * point to these stacks.
1759	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1760	 * of the stack memory.
1761	 */
1762	cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
1763
1764	set_stackptrs(0);
1765
1766	/*
1767	 * We must now clean the cache again....
1768	 * Cleaning may be done by reading new data to displace any
1769	 * dirty data in the cache. This will have happened in cpu_setttb()
1770	 * but since we are boot strapping the addresses used for the read
1771	 * may have just been remapped and thus the cache could be out
1772	 * of sync. A re-clean after the switch will cure this.
1773	 * After booting there are no gross relocations of the kernel thus
1774	 * this problem will not occur after initarm().
1775	 */
1776	cpu_idcache_wbinv_all();
1777
1778	undefined_init();
1779
1780	init_proc0(kernelstack.pv_va);
1781
1782	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1783	pmap_bootstrap(freemempos, &kernel_l1pt);
1784	msgbufp = (void *)msgbufpv.pv_va;
1785	msgbufinit(msgbufp, msgbufsize);
1786	mutex_init();
1787
1788	/*
1789	 * Exclude the kernel (and all the things we allocated which immediately
1790	 * follow the kernel) from the VM allocation pool but not from crash
1791	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1792	 * "allocated" while setting up pmaps.
1793	 *
1794	 * Prepare the list of physical memory available to the vm subsystem.
1795	 */
1796	arm_physmem_exclude_region(abp->abp_physaddr,
1797	    (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
1798	arm_physmem_init_kernel_globals();
1799
1800	init_param2(physmem);
1801	dbg_monitor_init();
1802	kdb_init();
1803
1804	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1805	    sizeof(struct pcb)));
1806}
1807#else /* __ARM_ARCH < 6 */
1808void *
1809initarm(struct arm_boot_params *abp)
1810{
1811	struct mem_region mem_regions[FDT_MEM_REGIONS];
1812	vm_paddr_t lastaddr;
1813	vm_offset_t dtbp, kernelstack, dpcpu;
1814	char *env;
1815	void *kmdp;
1816	int err_devmap, mem_regions_sz;
1817#ifdef EFI
1818	struct efi_map_header *efihdr;
1819#endif
1820
1821	/* get last allocated physical address */
1822	arm_physmem_kernaddr = abp->abp_physaddr;
1823	lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
1824
1825	set_cpufuncs();
1826	cpuinfo_init();
1827
1828	/*
1829	 * Find the dtb passed in by the boot loader.
1830	 */
1831	kmdp = preload_search_by_type("elf kernel");
1832	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1833#if defined(FDT_DTB_STATIC)
1834	/*
1835	 * In case the device tree blob was not retrieved (from metadata) try
1836	 * to use the statically embedded one.
1837	 */
1838	if (dtbp == (vm_offset_t)NULL)
1839		dtbp = (vm_offset_t)&fdt_static_dtb;
1840#endif
1841
1842	if (OF_install(OFW_FDT, 0) == FALSE)
1843		panic("Cannot install FDT");
1844
1845	if (OF_init((void *)dtbp) != 0)
1846		panic("OF_init failed with the found device tree");
1847
1848#if defined(LINUX_BOOT_ABI)
1849	if (loader_envp == NULL && fdt_get_chosen_bootargs(linux_command_line,
1850	    LBABI_MAX_COMMAND_LINE) == 0)
1851		cmdline_set_env(linux_command_line, CMDLINE_GUARD);
1852#endif
1853
1854#ifdef EFI
1855	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1856	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1857	if (efihdr != NULL) {
1858		add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
1859	} else
1860#endif
1861	{
1862		/* Grab physical memory regions information from device tree. */
1863		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
1864			panic("Cannot get physical memory regions");
1865	}
1866	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1867
1868	/* Grab reserved memory regions information from device tree. */
1869	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1870		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1871		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1872
1873	/*
1874	 * Set TEX remapping registers.
1875	 * Setup kernel page tables and switch to kernel L1 page table.
1876	 */
1877	pmap_set_tex();
1878	pmap_bootstrap_prepare(lastaddr);
1879
1880	/*
1881	 * Now that proper page tables are installed, call cpu_setup() to enable
1882	 * instruction and data caches and other chip-specific features.
1883	 */
1884	cpu_setup();
1885
1886	/* Platform-specific initialisation */
1887	platform_probe_and_attach();
1888	pcpu0_init();
1889
1890	/* Do basic tuning, hz etc */
1891	init_param1();
1892
1893	/*
1894	 * Allocate a page for the system page mapped to 0xffff0000
1895	 * This page will just contain the system vectors and can be
1896	 * shared by all processes.
1897	 */
1898	systempage = pmap_preboot_get_pages(1);
1899
1900	/* Map the vector page. */
1901	pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
1902	if (virtual_end >= ARM_VECTORS_HIGH)
1903		virtual_end = ARM_VECTORS_HIGH - 1;
1904
1905	/* Allocate dynamic per-cpu area. */
1906	dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
1907	dpcpu_init((void *)dpcpu, 0);
1908
1909	/* Allocate stacks for all modes */
1910	irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
1911	abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
1912	undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
1913	kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
1914
1915	/* Allocate message buffer. */
1916	msgbufp = (void *)pmap_preboot_get_vpages(
1917	    round_page(msgbufsize) / PAGE_SIZE);
1918
1919	/*
1920	 * Pages were allocated during the secondary bootstrap for the
1921	 * stacks for different CPU modes.
1922	 * We must now set the r13 registers in the different CPU modes to
1923	 * point to these stacks.
1924	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1925	 * of the stack memory.
1926	 */
1927	set_stackptrs(0);
1928	mutex_init();
1929
1930	/* Establish static device mappings. */
1931	err_devmap = platform_devmap_init();
1932	devmap_bootstrap(0, NULL);
1933	vm_max_kernel_address = platform_lastaddr();
1934
1935	/*
1936	 * Only after the SOC registers block is mapped we can perform device
1937	 * tree fixups, as they may attempt to read parameters from hardware.
1938	 */
1939	OF_interpret("perform-fixup", 0);
1940	platform_gpio_init();
1941	cninit();
1942
1943	debugf("initarm: console initialized\n");
1944	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1945	debugf(" boothowto = 0x%08x\n", boothowto);
1946	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1947	debugf(" lastaddr1: 0x%08x\n", lastaddr);
1948	print_kenv();
1949
1950	env = kern_getenv("kernelname");
1951	if (env != NULL)
1952		strlcpy(kernelname, env, sizeof(kernelname));
1953
1954	if (err_devmap != 0)
1955		printf("WARNING: could not fully configure devmap, error=%d\n",
1956		    err_devmap);
1957
1958	platform_late_init();
1959
1960	/*
1961	 * We must now clean the cache again....
1962	 * Cleaning may be done by reading new data to displace any
1963	 * dirty data in the cache. This will have happened in cpu_setttb()
1964	 * but since we are boot strapping the addresses used for the read
1965	 * may have just been remapped and thus the cache could be out
1966	 * of sync. A re-clean after the switch will cure this.
1967	 * After booting there are no gross relocations of the kernel thus
1968	 * this problem will not occur after initarm().
1969	 */
1970	/* Set stack for exception handlers */
1971	undefined_init();
1972	init_proc0(kernelstack);
1973	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1974	enable_interrupts(PSR_A);
1975	pmap_bootstrap(0);
1976
1977	/* Exclude the kernel (and all the things we allocated which immediately
1978	 * follow the kernel) from the VM allocation pool but not from crash
1979	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1980	 * "allocated" while setting up pmaps.
1981	 *
1982	 * Prepare the list of physical memory available to the vm subsystem.
1983	 */
1984	arm_physmem_exclude_region(abp->abp_physaddr,
1985		pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
1986	arm_physmem_init_kernel_globals();
1987
1988	init_param2(physmem);
1989	/* Init message buffer. */
1990	msgbufinit(msgbufp, msgbufsize);
1991	dbg_monitor_init();
1992	kdb_init();
1993	return ((void *)STACKALIGN(thread0.td_pcb));
1994
1995}
1996
1997#endif /* __ARM_ARCH < 6 */
1998#endif /* FDT */
1999